1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-18 10:35:55 +00:00

Release the page lock early in vm_pageout_clean(). There is no reason to

hold this lock until the end of the function.

With the aforementioned change to vm_pageout_clean(), page locks don't need
to support recursive (MTX_RECURSE) or duplicate (MTX_DUPOK) acquisitions.

Reviewed by:	kib
This commit is contained in:
Alan Cox 2011-01-03 00:41:56 +00:00
parent fa5ecdd3b9
commit 17f6a17bf7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=216899
2 changed files with 4 additions and 5 deletions

View File

@ -332,8 +332,7 @@ vm_page_startup(vm_offset_t vaddr)
/* Setup page locks. */
for (i = 0; i < PA_LOCK_COUNT; i++)
mtx_init(&pa_lock[i].data, "page lock", NULL,
MTX_DEF | MTX_RECURSE | MTX_DUPOK);
mtx_init(&pa_lock[i].data, "page lock", NULL, MTX_DEF);
/*
* Initialize the queue headers for the hold queue, the active queue,

View File

@ -326,7 +326,8 @@ vm_pageout_clean(vm_page_t m)
vm_pindex_t pindex = m->pindex;
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
object = m->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
/*
* It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
@ -343,6 +344,7 @@ vm_pageout_clean(vm_page_t m)
KASSERT(m->busy == 0 && (m->oflags & VPO_BUSY) == 0,
("vm_pageout_clean: page %p is busy", m));
KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m));
vm_page_unlock(m);
mc[vm_pageout_page_count] = pb = ps = m;
pageout_count = 1;
@ -369,7 +371,6 @@ vm_pageout_clean(vm_page_t m)
* first and attempt to align our cluster, then do a
* forward scan if room remains.
*/
object = m->object;
more:
while (ib && pageout_count < vm_pageout_page_count) {
vm_page_t p;
@ -434,7 +435,6 @@ vm_pageout_clean(vm_page_t m)
if (ib && pageout_count < vm_pageout_page_count)
goto more;
vm_page_unlock(m);
/*
* we allow reads during pageouts...
*/