Avoid an exclusive acquisition of the object lock on the expected execution

path through the NFS clients' getpages functions.

Introduce vm_pager_free_nonreq().  This function can be used to eliminate
code that is duplicated in many getpages functions.  Also, in contrast to
the code that currently appears in those getpages functions,
vm_pager_free_nonreq() avoids acquiring an exclusive object lock in one
case.

Reviewed by:	kib
MFC after:	6 weeks
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Alan Cox 2014-09-14 18:07:55 +00:00
parent 4a396d7a67
commit 396b3e34b4
5 changed files with 49 additions and 56 deletions

View File

@ -128,24 +128,21 @@ ncl_getpages(struct vop_getpages_args *ap)
npages = btoc(count);
/*
* Since the caller has busied the requested page, that page's valid
* field will not be changed by other threads.
*/
vm_page_assert_xbusied(pages[ap->a_reqpage]);
/*
* If the requested page is partially valid, just return it and
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
VM_OBJECT_WLOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_free(pages[i]);
vm_page_unlock(pages[i]);
}
}
VM_OBJECT_WUNLOCK(object);
return (0);
vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages);
return (VM_PAGER_OK);
}
VM_OBJECT_WUNLOCK(object);
/*
* We use only the kva address for the buffer, but this is extremely
@ -175,15 +172,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
ncl_printf("nfs_getpages: error %d\n", error);
VM_OBJECT_WLOCK(object);
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_free(pages[i]);
vm_page_unlock(pages[i]);
}
}
VM_OBJECT_WUNLOCK(object);
vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages);
return (VM_PAGER_ERROR);
}

View File

@ -122,24 +122,21 @@ nfs_getpages(struct vop_getpages_args *ap)
npages = btoc(count);
/*
* Since the caller has busied the requested page, that page's valid
* field will not be changed by other threads.
*/
vm_page_assert_xbusied(pages[ap->a_reqpage]);
/*
* If the requested page is partially valid, just return it and
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
VM_OBJECT_WLOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_free(pages[i]);
vm_page_unlock(pages[i]);
}
}
VM_OBJECT_WUNLOCK(object);
return (0);
vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages);
return (VM_PAGER_OK);
}
VM_OBJECT_WUNLOCK(object);
/*
* We use only the kva address for the buffer, but this is extremely
@ -169,15 +166,7 @@ nfs_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
nfs_printf("nfs_getpages: error %d\n", error);
VM_OBJECT_WLOCK(object);
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_free(pages[i]);
vm_page_unlock(pages[i]);
}
}
VM_OBJECT_WUNLOCK(object);
vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages);
return (VM_PAGER_ERROR);
}

View File

@ -281,6 +281,33 @@ vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
return (object);
}
/*
* Free the non-requested pages from the given array.
*/
void
vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
int npages)
{
int i;
boolean_t object_locked;
VM_OBJECT_ASSERT_UNLOCKED(object);
object_locked = FALSE;
for (i = 0; i < npages; ++i) {
if (i != reqpage) {
if (!object_locked) {
VM_OBJECT_WLOCK(object);
object_locked = TRUE;
}
vm_page_lock(ma[i]);
vm_page_free(ma[i]);
vm_page_unlock(ma[i]);
}
}
if (object_locked)
VM_OBJECT_WUNLOCK(object);
}
/*
* initialize a physical buffer
*/

View File

@ -106,6 +106,8 @@ static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
static __inline boolean_t vm_pager_has_page(vm_object_t, vm_pindex_t, int *, int *);
void vm_pager_init(void);
vm_object_t vm_pager_object_lookup(struct pagerlst *, void *);
void vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
int npages);
/*
* vm_page_get_pages:

View File

@ -722,14 +722,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
VM_OBJECT_WUNLOCK(object);
return (error);
} else if (error != 0) {
VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_WUNLOCK(object);
vm_pager_free_nonreq(object, m, reqpage, count);
return (VM_PAGER_ERROR);
/*
@ -739,14 +732,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
*/
} else if ((PAGE_SIZE / bsize) > 1 &&
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_WUNLOCK(object);
vm_pager_free_nonreq(object, m, reqpage, count);
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
return vnode_pager_input_smlfs(object, m[reqpage]);