1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-20 11:11:24 +00:00

Add the ability for the allocflag argument of the vm_page_grab() to

specify the increment of vm_pageout_deficit when sleeping due to page
shortage. Then, in allocbuf(), the code to allocate pages when extending
vmio buffer can be replaced by a call to vm_page_grab().

Suggested and reviewed by:	alc
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2010-07-05 21:13:32 +00:00
parent ec1f83f78e
commit 5f195aa32e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=209713
3 changed files with 24 additions and 52 deletions

View File

@ -3013,63 +3013,24 @@ allocbuf(struct buf *bp, int size)
VM_OBJECT_LOCK(obj);
while (bp->b_npages < desiredpages) {
vm_page_t m;
vm_pindex_t pi;
pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
if ((m = vm_page_lookup(obj, pi)) == NULL) {
/*
* note: must allocate system pages
* since blocking here could intefere
* with paging I/O, no matter which
* process we are.
*/
m = vm_page_alloc(obj, pi,
VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
VM_ALLOC_WIRED);
if (m == NULL) {
atomic_add_int(&vm_pageout_deficit,
desiredpages - bp->b_npages);
VM_OBJECT_UNLOCK(obj);
VM_WAIT;
VM_OBJECT_LOCK(obj);
} else {
if (m->valid == 0)
bp->b_flags &= ~B_CACHE;
bp->b_pages[bp->b_npages] = m;
++bp->b_npages;
}
continue;
}
/*
* We found a page. If we have to sleep on it,
* retry because it might have gotten freed out
* from under us.
* We must allocate system pages since blocking
* here could intefere with paging I/O, no
* matter which process we are.
*
* We can only test VPO_BUSY here. Blocking on
* m->busy might lead to a deadlock:
*
* vm_fault->getpages->cluster_read->allocbuf
*
* Thus, we specify VM_ALLOC_IGN_SBUSY.
*/
if ((m->oflags & VPO_BUSY) != 0) {
/*
* Reference the page before unlocking
* and sleeping so that the page daemon
* is less likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(m, PG_REFERENCED);
vm_page_sleep(m, "pgtblk");
continue;
}
/*
* We have a good page.
*/
vm_page_lock(m);
vm_page_wire(m);
vm_page_unlock(m);
m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
bp->b_npages, VM_ALLOC_NOBUSY |
VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY |
VM_ALLOC_COUNT(desiredpages - bp->b_npages));
if (m->valid == 0)
bp->b_flags &= ~B_CACHE;
bp->b_pages[bp->b_npages] = m;
++bp->b_npages;
}

View File

@ -2038,11 +2038,13 @@ vm_page_t
vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
{
vm_page_t m;
u_int count;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
retrylookup:
if ((m = vm_page_lookup(object, pindex)) != NULL) {
if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
if ((m->oflags & VPO_BUSY) != 0 ||
((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) {
if ((allocflags & VM_ALLOC_RETRY) != 0) {
/*
* Reference the page before unlocking and
@ -2067,9 +2069,13 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
return (m);
}
}
m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
VM_ALLOC_IGN_SBUSY | VM_ALLOC_COUNT_MASK));
if (m == NULL) {
VM_OBJECT_UNLOCK(object);
count = (u_int)allocflags >> VM_ALLOC_COUNT_SHIFT;
if (count > 0)
atomic_add_int(&vm_pageout_deficit, count);
VM_WAIT;
VM_OBJECT_LOCK(object);
if ((allocflags & VM_ALLOC_RETRY) == 0)

View File

@ -317,6 +317,11 @@ extern struct vpglocks vm_page_queue_lock;
#define VM_ALLOC_NOBUSY 0x0200 /* Do not busy the page */
#define VM_ALLOC_IFCACHED 0x0400 /* Fail if the page is not cached */
#define VM_ALLOC_IFNOTCACHED 0x0800 /* Fail if the page is cached */
#define VM_ALLOC_IGN_SBUSY 0x1000 /* vm_page_grab() only */
#define VM_ALLOC_COUNT_SHIFT 16
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
#define VM_ALLOC_COUNT_MASK VM_ALLOC_COUNT(0xffff)
void vm_page_flag_set(vm_page_t m, unsigned short bits);
void vm_page_flag_clear(vm_page_t m, unsigned short bits);