mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-06 09:17:25 +00:00
Push down the acquisition of the page queues lock into vm_page_unwire().
Update the comment describing which lock should be held on entry to vm_page_wire(). Reviewed by: kib
This commit is contained in:
parent
d6da836201
commit
e3ef0d2fcf
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207644
@ -624,9 +624,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
|
||||
if (k >= i)
|
||||
vm_page_wakeup(m);
|
||||
vm_page_lock(m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
@ -660,9 +658,7 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
|
||||
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
|
||||
m = vm_page_lookup(mem->am_obj, atop(i));
|
||||
vm_page_lock(m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
|
@ -1011,9 +1011,7 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
|
||||
VM_OBJECT_LOCK(mem->am_obj);
|
||||
m = vm_page_lookup(mem->am_obj, 0);
|
||||
vm_page_lock(m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
} else {
|
||||
|
@ -461,9 +461,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
out:
|
||||
vm_page_lock(m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(m, TRUE);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(m);
|
||||
vm_page_wakeup(m);
|
||||
vm_object_pip_subtract(tobj, 1);
|
||||
|
@ -1571,7 +1571,6 @@ vfs_vmio_release(struct buf *bp)
|
||||
* everything on the inactive queue.
|
||||
*/
|
||||
vm_page_lock(m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(m, 0);
|
||||
/*
|
||||
* We don't mess with busy pages, it is
|
||||
@ -1580,6 +1579,7 @@ vfs_vmio_release(struct buf *bp)
|
||||
*/
|
||||
if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 &&
|
||||
m->wire_count == 0) {
|
||||
vm_page_lock_queues();
|
||||
/*
|
||||
* Might as well free the page if we can and it has
|
||||
* no valid data. We also free the page if the
|
||||
@ -1593,8 +1593,8 @@ vfs_vmio_release(struct buf *bp)
|
||||
} else if (buf_vm_page_count_severe()) {
|
||||
vm_page_try_to_cache(m);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
|
||||
@ -2957,9 +2957,7 @@ allocbuf(struct buf *bp, int size)
|
||||
|
||||
bp->b_pages[i] = NULL;
|
||||
vm_page_lock(m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
|
||||
|
@ -799,9 +799,7 @@ RetryFault:;
|
||||
vm_page_unlock(fs.first_m);
|
||||
|
||||
vm_page_lock(fs.m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(fs.m, FALSE);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(fs.m);
|
||||
}
|
||||
/*
|
||||
@ -1112,6 +1110,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_offset_t va;
|
||||
vm_page_t m;
|
||||
pmap_t pmap;
|
||||
|
||||
pmap = vm_map_pmap(map);
|
||||
@ -1125,11 +1124,10 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
if (pa != 0) {
|
||||
pmap_change_wiring(pmap, va, FALSE);
|
||||
if (!fictitious) {
|
||||
vm_page_lock(PHYS_TO_VM_PAGE(pa));
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(PHYS_TO_VM_PAGE(pa));
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_lock(m);
|
||||
vm_page_unwire(m, TRUE);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1275,9 +1273,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
|
||||
if (upgrade) {
|
||||
vm_page_lock(src_m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(src_m, 0);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(src_m);
|
||||
|
||||
vm_page_lock(dst_m);
|
||||
|
@ -529,9 +529,7 @@ vm_thread_swapout(struct thread *td)
|
||||
panic("vm_thread_swapout: kstack already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_lock(m);
|
||||
vm_page_lock_queues();
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(ksobj);
|
||||
|
@ -1532,7 +1532,7 @@ vm_page_free_toq(vm_page_t m)
|
||||
* another map, removing it from paging queues
|
||||
* as necessary.
|
||||
*
|
||||
* The page queues must be locked.
|
||||
* The page must be locked.
|
||||
* This routine may not block.
|
||||
*/
|
||||
void
|
||||
@ -1584,31 +1584,31 @@ vm_page_wire(vm_page_t m)
|
||||
* be placed in the cache - for example, just after dirtying a page.
|
||||
* dirty pages in the cache are not allowed.
|
||||
*
|
||||
* The page queues must be locked.
|
||||
* The page must be locked.
|
||||
* This routine may not block.
|
||||
*/
|
||||
void
|
||||
vm_page_unwire(vm_page_t m, int activate)
|
||||
{
|
||||
|
||||
if ((m->flags & PG_UNMANAGED) == 0) {
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
if ((m->flags & PG_UNMANAGED) == 0)
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
}
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return;
|
||||
if (m->wire_count > 0) {
|
||||
m->wire_count--;
|
||||
if (m->wire_count == 0) {
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
if (m->flags & PG_UNMANAGED) {
|
||||
;
|
||||
} else if (activate)
|
||||
if ((m->flags & PG_UNMANAGED) != 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
if (activate)
|
||||
vm_page_enqueue(PQ_ACTIVE, m);
|
||||
else {
|
||||
vm_page_flag_clear(m, PG_WINATCFLS);
|
||||
vm_page_enqueue(PQ_INACTIVE, m);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
} else {
|
||||
panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
|
||||
|
Loading…
Reference in New Issue
Block a user