1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-27 16:39:08 +00:00

o Complete the locking of page queue accesses by vm_page_unwire().

o Assert that the page queues lock is held in vm_page_unwire().
 o Make vm_page_lock_queues() and vm_page_unlock_queues() visible
   to kernel loadable modules.
This commit is contained in:
Alan Cox 2002-07-13 20:55:21 +00:00
parent 5123aaef42
commit 1f54526952
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=99927
8 changed files with 24 additions and 5 deletions

View File

@ -997,8 +997,10 @@ pmap_dispose_thread(td)
vm_page_busy(m);
ptek[i] = 0;
pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
/*
@ -1036,8 +1038,10 @@ pmap_swapout_thread(td)
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
pmap_kremove(ks + i * PAGE_SIZE);
}
}

View File

@ -448,7 +448,9 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
for (k = 0; k <= i; k += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj,
OFF_TO_IDX(k));
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
return error;
@ -499,7 +501,9 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
AGP_UNBIND_PAGE(dev, mem->am_offset + i);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i));
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
agp_flush_cache();

View File

@ -399,7 +399,9 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
* Unwire the page which we wired in alloc_memory.
*/
vm_page_t m = vm_page_lookup(mem->am_obj, 0);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
sc->agp.as_allocated -= mem->am_size;

View File

@ -448,7 +448,9 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
for (k = 0; k <= i; k += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj,
OFF_TO_IDX(k));
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
return error;
@ -499,7 +501,9 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
AGP_UNBIND_PAGE(dev, mem->am_offset + i);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i));
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
agp_flush_cache();

View File

@ -399,7 +399,9 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
* Unwire the page which we wired in alloc_memory.
*/
vm_page_t m = vm_page_lookup(mem->am_obj, 0);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
sc->agp.as_allocated -= mem->am_size;

View File

@ -887,9 +887,11 @@ pmap_dispose_thread(struct thread *td)
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
pmap_qremove(ks, KSTACK_PAGES);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
@ -914,8 +916,10 @@ pmap_swapout_thread(struct thread *td)
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
pmap_qremove(ks, KSTACK_PAGES);
}

View File

@ -1285,7 +1285,7 @@ vm_page_unwire(vm_page_t m, int activate)
int s;
s = splvm();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->wire_count > 0) {
m->wire_count--;
if (m->wire_count == 0) {

View File

@ -216,12 +216,8 @@ struct vpgqueues {
};
extern struct vpgqueues vm_page_queues[PQ_COUNT];
extern struct mtx vm_page_queue_mtx;
extern struct mtx vm_page_queue_free_mtx;
#define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx)
#define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
#endif /* !defined(KLD_MODULE) */
/*
@ -299,6 +295,9 @@ extern long first_page; /* first physical page number */
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[atop(pa) - first_page ])
extern struct mtx vm_page_queue_mtx;
#define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx)
#define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
#if PAGE_SIZE == 4096
#define VM_PAGE_BITS_ALL 0xff