1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-05 12:56:08 +00:00

Handle the driver KPI change from r292373. Ensure that managed device

pagers fault routines always return with a result page, be it the
proper and valid result page, or initially passed freshly allocated
placeholder.  Do not free the passed in page until we are able to
provide the replacement, and do not assign NULL to *mres.

Reported and tested by:	dumbbell
Reviewed by:	royger (who also verified that Xen code is safe)
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Konstantin Belousov 2016-03-24 09:56:53 +00:00
parent 61c8fde5d6
commit 58a236093c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=297231
2 changed files with 23 additions and 28 deletions

View File

@ -1481,7 +1481,7 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
vm_page_t page, oldpage;
vm_page_t page;
int ret = 0;
#ifdef FREEBSD_WIP
bool write = (prot & VM_PROT_WRITE) != 0;
@ -1504,13 +1504,10 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
* progress.
*/
if (*mres != NULL) {
oldpage = *mres;
vm_page_lock(oldpage);
vm_page_remove(oldpage);
vm_page_unlock(oldpage);
*mres = NULL;
} else
oldpage = NULL;
vm_page_lock(*mres);
vm_page_remove(*mres);
vm_page_unlock(*mres);
}
VM_OBJECT_WUNLOCK(vm_obj);
retry:
ret = 0;
@ -1590,7 +1587,6 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
}
page->valid = VM_PAGE_BITS_ALL;
have_page:
*mres = page;
vm_page_xbusy(page);
CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
@ -1603,11 +1599,13 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
i915_gem_object_unpin(obj);
}
DRM_UNLOCK(dev);
if (oldpage != NULL) {
vm_page_lock(oldpage);
vm_page_free(oldpage);
vm_page_unlock(oldpage);
if (*mres != NULL) {
KASSERT(*mres != page, ("loosing %p %p", *mres, page));
vm_page_lock(*mres);
vm_page_free(*mres);
vm_page_unlock(*mres);
}
*mres = page;
vm_object_pip_wakeup(vm_obj);
return (VM_PAGER_OK);

View File

@ -106,21 +106,18 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
struct ttm_buffer_object *bo = vm_obj->handle;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_tt *ttm = NULL;
vm_page_t m, m1, oldm;
vm_page_t m, m1;
int ret;
int retval = VM_PAGER_OK;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
vm_object_pip_add(vm_obj, 1);
oldm = *mres;
if (oldm != NULL) {
vm_page_lock(oldm);
vm_page_remove(oldm);
vm_page_unlock(oldm);
*mres = NULL;
} else
oldm = NULL;
if (*mres != NULL) {
vm_page_lock(*mres);
vm_page_remove(*mres);
vm_page_unlock(*mres);
}
retry:
VM_OBJECT_WUNLOCK(vm_obj);
m = NULL;
@ -261,14 +258,14 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
bo, m, m1, (uintmax_t)offset));
}
m->valid = VM_PAGE_BITS_ALL;
*mres = m;
vm_page_xbusy(m);
if (oldm != NULL) {
vm_page_lock(oldm);
vm_page_free(oldm);
vm_page_unlock(oldm);
if (*mres != NULL) {
KASSERT(*mres != m, ("loosing %p %p", *mres, m));
vm_page_lock(*mres);
vm_page_free(*mres);
vm_page_unlock(*mres);
}
*mres = m;
out_io_unlock1:
ttm_mem_io_unlock(man);