mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-30 16:51:41 +00:00
Replace PG_BUSY with VPO_BUSY. In other words, changes to the page's
busy flag, i.e., VPO_BUSY, are now synchronized by the per-vm object lock instead of the global page queues lock.
This commit is contained in:
parent
43200cd3ed
commit
9af80719db
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=163604
@ -827,7 +827,7 @@ exec_map_first_page(imgp)
|
||||
if ((ma[i] = vm_page_lookup(object, i)) != NULL) {
|
||||
if (ma[i]->valid)
|
||||
break;
|
||||
if ((ma[i]->flags & PG_BUSY) || ma[i]->busy)
|
||||
if ((ma[i]->oflags & VPO_BUSY) || ma[i]->busy)
|
||||
break;
|
||||
vm_page_lock_queues();
|
||||
vm_page_busy(ma[i]);
|
||||
|
@ -2100,7 +2100,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
|
||||
* If not and it is not valid, then free it.
|
||||
*/
|
||||
if (pg->wire_count == 0 && pg->valid == 0 &&
|
||||
pg->busy == 0 && !(pg->flags & PG_BUSY) &&
|
||||
pg->busy == 0 && !(pg->oflags & VPO_BUSY) &&
|
||||
pg->hold_count == 0) {
|
||||
vm_page_free(pg);
|
||||
}
|
||||
|
@ -1519,7 +1519,7 @@ vfs_vmio_release(struct buf *bp)
|
||||
* the responsibility of the process that
|
||||
* busied the pages to deal with them.
|
||||
*/
|
||||
if ((m->flags & PG_BUSY) || (m->busy != 0))
|
||||
if ((m->oflags & VPO_BUSY) || (m->busy != 0))
|
||||
continue;
|
||||
|
||||
if (m->wire_count == 0) {
|
||||
@ -2879,7 +2879,7 @@ allocbuf(struct buf *bp, int size)
|
||||
* retry because it might have gotten freed out
|
||||
* from under us.
|
||||
*
|
||||
* We can only test PG_BUSY here. Blocking on
|
||||
* We can only test VPO_BUSY here. Blocking on
|
||||
* m->busy might lead to a deadlock:
|
||||
*
|
||||
* vm_fault->getpages->cluster_read->allocbuf
|
||||
@ -3369,7 +3369,7 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
|
||||
* This routine is called before a device strategy routine.
|
||||
* It is used to tell the VM system that paging I/O is in
|
||||
* progress, and treat the pages associated with the buffer
|
||||
* almost as being PG_BUSY. Also the object paging_in_progress
|
||||
* almost as being VPO_BUSY. Also the object paging_in_progress
|
||||
* flag is handled to make sure that the object doesn't become
|
||||
* inconsistant.
|
||||
*
|
||||
|
@ -911,7 +911,7 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
if (i != 0) { /* if not first buffer */
|
||||
for (j = 0; j < tbp->b_npages; j += 1) {
|
||||
m = tbp->b_pages[j];
|
||||
if (m->flags & PG_BUSY) {
|
||||
if (m->oflags & VPO_BUSY) {
|
||||
VM_OBJECT_UNLOCK(
|
||||
tbp->b_object);
|
||||
bqrelse(tbp);
|
||||
|
@ -295,8 +295,8 @@ dev_pager_getfake(paddr)
|
||||
|
||||
m = uma_zalloc(fakepg_zone, M_WAITOK);
|
||||
|
||||
m->flags = PG_BUSY | PG_FICTITIOUS;
|
||||
m->oflags = 0;
|
||||
m->flags = PG_FICTITIOUS;
|
||||
m->oflags = VPO_BUSY;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->dirty = 0;
|
||||
m->busy = 0;
|
||||
|
@ -158,7 +158,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
m[i]->dirty = 0;
|
||||
/* The requested page must remain busy, the others not. */
|
||||
if (reqpage != i) {
|
||||
vm_page_flag_clear(m[i], PG_BUSY);
|
||||
m[i]->oflags &= ~VPO_BUSY;
|
||||
m[i]->busy = 0;
|
||||
}
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ contigmalloc1(
|
||||
start++;
|
||||
goto again0;
|
||||
}
|
||||
if ((m->flags & PG_BUSY) || m->busy != 0) {
|
||||
if ((m->oflags & VPO_BUSY) || m->busy != 0) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
start++;
|
||||
goto again0;
|
||||
@ -504,7 +504,7 @@ vm_page_alloc_contig(vm_pindex_t npages, vm_paddr_t low, vm_paddr_t high,
|
||||
object = m->object;
|
||||
if (!VM_OBJECT_TRYLOCK(object))
|
||||
goto cleanup_freed;
|
||||
if ((m->flags & PG_BUSY) || m->busy != 0) {
|
||||
if ((m->oflags & VPO_BUSY) || m->busy != 0) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
goto cleanup_freed;
|
||||
}
|
||||
|
@ -351,7 +351,7 @@ RetryFault:;
|
||||
|
||||
/*
|
||||
* Wait/Retry if the page is busy. We have to do this
|
||||
* if the page is busy via either PG_BUSY or
|
||||
* if the page is busy via either VPO_BUSY or
|
||||
* vm_page_t->busy because the vm_pager may be using
|
||||
* vm_page_t->busy for pageouts ( and even pageins if
|
||||
* it is the vnode pager ), and we could end up trying
|
||||
@ -365,7 +365,7 @@ RetryFault:;
|
||||
* around with a vm_page_t->busy page except, perhaps,
|
||||
* to pmap it.
|
||||
*/
|
||||
if ((fs.m->flags & PG_BUSY) || fs.m->busy) {
|
||||
if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) {
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(fs.object);
|
||||
if (fs.object != fs.first_object) {
|
||||
@ -510,7 +510,8 @@ RetryFault:;
|
||||
if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
|
||||
break;
|
||||
if (mt->busy ||
|
||||
(mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
|
||||
(mt->oflags & VPO_BUSY) ||
|
||||
(mt->flags & (PG_FICTITIOUS | PG_UNMANAGED)) ||
|
||||
mt->hold_count ||
|
||||
mt->wire_count)
|
||||
continue;
|
||||
@ -539,7 +540,7 @@ RetryFault:;
|
||||
* return value is the index into the marray for the
|
||||
* vm_page_t passed to the routine.
|
||||
*
|
||||
* fs.m plus the additional pages are PG_BUSY'd.
|
||||
* fs.m plus the additional pages are VPO_BUSY'd.
|
||||
*
|
||||
* XXX vm_fault_additional_pages() can block
|
||||
* without releasing the map lock.
|
||||
@ -559,7 +560,7 @@ RetryFault:;
|
||||
/*
|
||||
* Call the pager to retrieve the data, if any, after
|
||||
* releasing the lock on the map. We hold a ref on
|
||||
* fs.object and the pages are PG_BUSY'd.
|
||||
* fs.object and the pages are VPO_BUSY'd.
|
||||
*/
|
||||
unlock_map(&fs);
|
||||
|
||||
@ -684,7 +685,7 @@ RetryFault:;
|
||||
}
|
||||
}
|
||||
|
||||
KASSERT((fs.m->flags & PG_BUSY) != 0,
|
||||
KASSERT((fs.m->oflags & VPO_BUSY) != 0,
|
||||
("vm_fault: not busy after main loop"));
|
||||
|
||||
/*
|
||||
@ -875,7 +876,7 @@ RetryFault:;
|
||||
/*
|
||||
* Page had better still be busy
|
||||
*/
|
||||
KASSERT(fs.m->flags & PG_BUSY,
|
||||
KASSERT(fs.m->oflags & VPO_BUSY,
|
||||
("vm_fault: page %p not busy!", fs.m));
|
||||
/*
|
||||
* Sanity check: page must be completely valid or it is not fit to
|
||||
|
@ -650,7 +650,7 @@ vm_object_terminate(vm_object_t object)
|
||||
*/
|
||||
vm_page_lock_queues();
|
||||
while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
|
||||
KASSERT(!p->busy && (p->flags & PG_BUSY) == 0,
|
||||
KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
|
||||
("vm_object_terminate: freeing busy page %p "
|
||||
"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
|
||||
if (p->wire_count == 0) {
|
||||
@ -909,7 +909,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
|
||||
vm_page_t tp;
|
||||
|
||||
if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
|
||||
if ((tp->flags & PG_BUSY) ||
|
||||
if ((tp->oflags & VPO_BUSY) ||
|
||||
((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
|
||||
(tp->flags & PG_CLEANCHK) == 0) ||
|
||||
(tp->busy != 0))
|
||||
@ -937,7 +937,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
|
||||
vm_page_t tp;
|
||||
|
||||
if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
|
||||
if ((tp->flags & PG_BUSY) ||
|
||||
if ((tp->oflags & VPO_BUSY) ||
|
||||
((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
|
||||
(tp->flags & PG_CLEANCHK) == 0) ||
|
||||
(tp->busy != 0))
|
||||
@ -1151,7 +1151,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
|
||||
vm_page_unlock_queues();
|
||||
goto unlock_tobject;
|
||||
}
|
||||
if ((m->flags & PG_BUSY) || m->busy) {
|
||||
if ((m->oflags & VPO_BUSY) || m->busy) {
|
||||
vm_page_flag_set(m, PG_REFERENCED);
|
||||
vm_page_unlock_queues();
|
||||
if (object != tobject)
|
||||
@ -1340,7 +1340,7 @@ vm_object_split(vm_map_entry_t entry)
|
||||
* We do not have to VM_PROT_NONE the page as mappings should
|
||||
* not be changed by this operation.
|
||||
*/
|
||||
if ((m->flags & PG_BUSY) || m->busy) {
|
||||
if ((m->oflags & VPO_BUSY) || m->busy) {
|
||||
vm_page_flag_set(m, PG_REFERENCED);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(new_object);
|
||||
@ -1468,14 +1468,14 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
vm_page_t pp;
|
||||
|
||||
if (op & OBSC_COLLAPSE_NOWAIT) {
|
||||
if ((p->flags & PG_BUSY) ||
|
||||
if ((p->oflags & VPO_BUSY) ||
|
||||
!p->valid ||
|
||||
p->busy) {
|
||||
p = next;
|
||||
continue;
|
||||
}
|
||||
} else if (op & OBSC_COLLAPSE_WAIT) {
|
||||
if ((p->flags & PG_BUSY) || p->busy) {
|
||||
if ((p->oflags & VPO_BUSY) || p->busy) {
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_set(p, PG_REFERENCED);
|
||||
vm_page_unlock_queues();
|
||||
|
@ -372,9 +372,9 @@ vm_page_busy(vm_page_t m)
|
||||
{
|
||||
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->flags & PG_BUSY) == 0,
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
("vm_page_busy: page already busy!!!"));
|
||||
vm_page_flag_set(m, PG_BUSY);
|
||||
m->oflags |= VPO_BUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -396,7 +396,7 @@ vm_page_flash(vm_page_t m)
|
||||
/*
|
||||
* vm_page_wakeup:
|
||||
*
|
||||
* clear the PG_BUSY flag and wakeup anyone waiting for the
|
||||
* clear the VPO_BUSY flag and wakeup anyone waiting for the
|
||||
* page.
|
||||
*
|
||||
*/
|
||||
@ -405,8 +405,8 @@ vm_page_wakeup(vm_page_t m)
|
||||
{
|
||||
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
|
||||
vm_page_flag_clear(m, PG_BUSY);
|
||||
KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
|
||||
m->oflags &= ~VPO_BUSY;
|
||||
vm_page_flash(m);
|
||||
}
|
||||
|
||||
@ -678,8 +678,8 @@ vm_page_remove(vm_page_t m)
|
||||
if ((object = m->object) == NULL)
|
||||
return;
|
||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||
if (m->flags & PG_BUSY) {
|
||||
vm_page_flag_clear(m, PG_BUSY);
|
||||
if (m->oflags & VPO_BUSY) {
|
||||
m->oflags &= ~VPO_BUSY;
|
||||
vm_page_flash(m);
|
||||
}
|
||||
|
||||
@ -794,7 +794,7 @@ vm_page_select_cache(int color)
|
||||
if (m->hold_count == 0 && (object = m->object,
|
||||
(was_trylocked = VM_OBJECT_TRYLOCK(object)) ||
|
||||
VM_OBJECT_LOCKED(object))) {
|
||||
KASSERT((m->flags & PG_BUSY) == 0 && m->busy == 0,
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0 && m->busy == 0,
|
||||
("Found busy cache page %p", m));
|
||||
vm_page_free(m);
|
||||
if (was_trylocked)
|
||||
@ -918,16 +918,17 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
/*
|
||||
* Initialize structure. Only the PG_ZERO flag is inherited.
|
||||
*/
|
||||
flags = PG_BUSY;
|
||||
flags = 0;
|
||||
if (m->flags & PG_ZERO) {
|
||||
vm_page_zero_count--;
|
||||
if (req & VM_ALLOC_ZERO)
|
||||
flags = PG_ZERO | PG_BUSY;
|
||||
flags = PG_ZERO;
|
||||
}
|
||||
if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
|
||||
flags &= ~PG_BUSY;
|
||||
m->flags = flags;
|
||||
m->oflags = 0;
|
||||
if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
|
||||
m->oflags = 0;
|
||||
else
|
||||
m->oflags = VPO_BUSY;
|
||||
if (req & VM_ALLOC_WIRED) {
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
m->wire_count = 1;
|
||||
@ -1090,8 +1091,8 @@ vm_page_free_toq(vm_page_t m)
|
||||
|
||||
if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) {
|
||||
printf(
|
||||
"vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
|
||||
(u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
|
||||
"vm_page_free: pindex(%lu), busy(%d), VPO_BUSY(%d), hold(%d)\n",
|
||||
(u_long)m->pindex, m->busy, (m->oflags & VPO_BUSY) ? 1 : 0,
|
||||
m->hold_count);
|
||||
if (VM_PAGE_INQUEUE1(m, PQ_FREE))
|
||||
panic("vm_page_free: freeing free page");
|
||||
@ -1319,7 +1320,7 @@ vm_page_try_to_cache(vm_page_t m)
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
|
||||
(m->flags & (PG_BUSY|PG_UNMANAGED))) {
|
||||
(m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) {
|
||||
return (0);
|
||||
}
|
||||
pmap_remove_all(m);
|
||||
@ -1343,7 +1344,7 @@ vm_page_try_to_free(vm_page_t m)
|
||||
if (m->object != NULL)
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
|
||||
(m->flags & (PG_BUSY|PG_UNMANAGED))) {
|
||||
(m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) {
|
||||
return (0);
|
||||
}
|
||||
pmap_remove_all(m);
|
||||
@ -1366,7 +1367,7 @@ vm_page_cache(vm_page_t m)
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
|
||||
if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy ||
|
||||
m->hold_count || m->wire_count) {
|
||||
printf("vm_page_cache: attempting to cache busy page\n");
|
||||
return;
|
||||
|
@ -142,6 +142,7 @@ struct vm_page {
|
||||
* Access to these page flags is synchronized by the lock on the object
|
||||
* containing the page (O).
|
||||
*/
|
||||
#define VPO_BUSY 0x0001 /* page is in transit */
|
||||
#define VPO_WANTED 0x0002 /* someone is waiting for page */
|
||||
#define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */
|
||||
#define VPO_NOSYNC 0x0400 /* do not collect for syncer */
|
||||
@ -220,7 +221,6 @@ extern struct pq_coloring page_queue_coloring;
|
||||
* pte mappings, nor can they be removed from their objects via
|
||||
* the object, and such pages are also not on any PQ queue.
|
||||
*/
|
||||
#define PG_BUSY 0x0001 /* page is in transit (O) */
|
||||
#define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */
|
||||
#define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */
|
||||
#define PG_WRITEABLE 0x0010 /* page is mapped writeable */
|
||||
@ -362,7 +362,7 @@ void vm_page_cowclear (vm_page_t);
|
||||
/*
|
||||
* vm_page_sleep_if_busy:
|
||||
*
|
||||
* Sleep and release the page queues lock if PG_BUSY is set or,
|
||||
* Sleep and release the page queues lock if VPO_BUSY is set or,
|
||||
* if also_m_busy is TRUE, busy is non-zero. Returns TRUE if the
|
||||
* thread slept and the page queues lock was released.
|
||||
* Otherwise, retains the page queues lock and returns FALSE.
|
||||
@ -373,7 +373,7 @@ static __inline int
|
||||
vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
|
||||
{
|
||||
|
||||
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
|
||||
if ((m->oflags & VPO_BUSY) || (also_m_busy && m->busy)) {
|
||||
vm_page_sleep(m, msg);
|
||||
return (TRUE);
|
||||
}
|
||||
|
@ -236,7 +236,8 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
|
||||
* Initialize our marker
|
||||
*/
|
||||
bzero(&marker, sizeof(marker));
|
||||
marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
|
||||
marker.flags = PG_FICTITIOUS | PG_MARKER;
|
||||
marker.oflags = VPO_BUSY;
|
||||
marker.queue = m->queue;
|
||||
marker.wire_count = 1;
|
||||
|
||||
@ -294,7 +295,8 @@ vm_pageout_clean(m)
|
||||
* Don't mess with the page if it's busy, held, or special
|
||||
*/
|
||||
if ((m->hold_count != 0) ||
|
||||
((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) {
|
||||
((m->busy != 0) || (m->oflags & VPO_BUSY) ||
|
||||
(m->flags & PG_UNMANAGED))) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -338,7 +340,8 @@ vm_pageout_clean(m)
|
||||
break;
|
||||
}
|
||||
if (VM_PAGE_INQUEUE1(p, PQ_CACHE) ||
|
||||
(p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
|
||||
(p->oflags & VPO_BUSY) || p->busy ||
|
||||
(p->flags & PG_UNMANAGED)) {
|
||||
ib = 0;
|
||||
break;
|
||||
}
|
||||
@ -368,7 +371,8 @@ vm_pageout_clean(m)
|
||||
if ((p = vm_page_lookup(object, pindex + is)) == NULL)
|
||||
break;
|
||||
if (VM_PAGE_INQUEUE1(p, PQ_CACHE) ||
|
||||
(p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
|
||||
(p->oflags & VPO_BUSY) || p->busy ||
|
||||
(p->flags & PG_UNMANAGED)) {
|
||||
break;
|
||||
}
|
||||
vm_page_test_dirty(p);
|
||||
@ -538,7 +542,8 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
|
||||
if (p->wire_count != 0 ||
|
||||
p->hold_count != 0 ||
|
||||
p->busy != 0 ||
|
||||
(p->flags & (PG_BUSY|PG_UNMANAGED)) ||
|
||||
(p->oflags & VPO_BUSY) ||
|
||||
(p->flags & PG_UNMANAGED) ||
|
||||
!pmap_page_exists_quick(pmap, p)) {
|
||||
p = next;
|
||||
continue;
|
||||
@ -706,7 +711,8 @@ vm_pageout_scan(int pass)
|
||||
* Initialize our marker
|
||||
*/
|
||||
bzero(&marker, sizeof(marker));
|
||||
marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
|
||||
marker.flags = PG_FICTITIOUS | PG_MARKER;
|
||||
marker.oflags = VPO_BUSY;
|
||||
marker.queue = PQ_INACTIVE;
|
||||
marker.wire_count = 1;
|
||||
|
||||
@ -773,7 +779,7 @@ vm_pageout_scan(int pass)
|
||||
addl_page_shortage++;
|
||||
continue;
|
||||
}
|
||||
if (m->busy || (m->flags & PG_BUSY)) {
|
||||
if (m->busy || (m->oflags & VPO_BUSY)) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
addl_page_shortage++;
|
||||
continue;
|
||||
@ -987,7 +993,7 @@ vm_pageout_scan(int pass)
|
||||
* page back onto the end of the queue so that
|
||||
* statistics are more correct if we don't.
|
||||
*/
|
||||
if (m->busy || (m->flags & PG_BUSY)) {
|
||||
if (m->busy || (m->oflags & VPO_BUSY)) {
|
||||
goto unlock_and_continue;
|
||||
}
|
||||
|
||||
@ -1071,7 +1077,7 @@ vm_pageout_scan(int pass)
|
||||
* Don't deactivate pages that are busy.
|
||||
*/
|
||||
if ((m->busy != 0) ||
|
||||
(m->flags & PG_BUSY) ||
|
||||
(m->oflags & VPO_BUSY) ||
|
||||
(m->hold_count != 0)) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
vm_pageq_requeue(m);
|
||||
@ -1157,7 +1163,7 @@ vm_pageout_scan(int pass)
|
||||
("Found wired cache page %p", m));
|
||||
if (m->hold_count == 0 && VM_OBJECT_TRYLOCK(object =
|
||||
m->object)) {
|
||||
KASSERT((m->flags & PG_BUSY) == 0 &&
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0 &&
|
||||
m->busy == 0, ("Found busy cache page %p",
|
||||
m));
|
||||
vm_page_free(m);
|
||||
@ -1349,7 +1355,7 @@ vm_pageout_page_stats()
|
||||
* Don't deactivate pages that are busy.
|
||||
*/
|
||||
if ((m->busy != 0) ||
|
||||
(m->flags & PG_BUSY) ||
|
||||
(m->oflags & VPO_BUSY) ||
|
||||
(m->hold_count != 0)) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
vm_pageq_requeue(m);
|
||||
|
Loading…
Reference in New Issue
Block a user