mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-13 10:02:38 +00:00
Introduce vm_page_grab_pages(), which is intended to replace loops calling
vm_page_grab() on consecutive page indices. Besides simplifying the code in the caller, vm_page_grab_pages() allows for batching optimizations. For example, the current implementation replaces calls to vm_page_lookup() on consecutive page indices by cheaper calls to vm_page_next(). Reviewed by: kib, markj Tested by: pho (an earlier version) MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D11926
This commit is contained in:
parent
29a263df94
commit
5471caf6f1
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=322296
@ -2735,7 +2735,7 @@ vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
|
||||
*/
|
||||
obj = bp->b_bufobj->bo_object;
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
while (bp->b_npages < desiredpages) {
|
||||
if (bp->b_npages < desiredpages) {
|
||||
/*
|
||||
* We must allocate system pages since blocking
|
||||
* here could interfere with paging I/O, no
|
||||
@ -2746,14 +2746,12 @@ vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
|
||||
* deadlocks once allocbuf() is called after
|
||||
* pages are vfs_busy_pages().
|
||||
*/
|
||||
m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + bp->b_npages,
|
||||
VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
|
||||
VM_ALLOC_COUNT(desiredpages - bp->b_npages));
|
||||
if (m->valid == 0)
|
||||
bp->b_flags &= ~B_CACHE;
|
||||
bp->b_pages[bp->b_npages] = m;
|
||||
++bp->b_npages;
|
||||
vm_page_grab_pages(obj,
|
||||
OFF_TO_IDX(bp->b_offset) + bp->b_npages,
|
||||
VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
|
||||
VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
|
||||
&bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
|
||||
bp->b_npages = desiredpages;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1226,7 +1226,6 @@ int
|
||||
pmap_pinit(pmap_t pm)
|
||||
{
|
||||
vm_page_t ma[TSB_PAGES];
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@ -1249,14 +1248,11 @@ pmap_pinit(pmap_t pm)
|
||||
CPU_ZERO(&pm->pm_active);
|
||||
|
||||
VM_OBJECT_WLOCK(pm->pm_tsb_obj);
|
||||
for (i = 0; i < TSB_PAGES; i++) {
|
||||
m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->md.pmap = pm;
|
||||
ma[i] = m;
|
||||
}
|
||||
vm_page_grab_pages(pm->pm_tsb_obj, 0, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO, ma, TSB_PAGES);
|
||||
VM_OBJECT_WUNLOCK(pm->pm_tsb_obj);
|
||||
for (i = 0; i < TSB_PAGES; i++)
|
||||
ma[i]->md.pmap = pm;
|
||||
pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
|
||||
|
||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
|
||||
|
@ -322,7 +322,7 @@ vm_thread_new(struct thread *td, int pages)
|
||||
{
|
||||
vm_object_t ksobj;
|
||||
vm_offset_t ks;
|
||||
vm_page_t m, ma[KSTACK_MAX_PAGES];
|
||||
vm_page_t ma[KSTACK_MAX_PAGES];
|
||||
struct kstack_cache_entry *ks_ce;
|
||||
int i;
|
||||
|
||||
@ -391,15 +391,10 @@ vm_thread_new(struct thread *td, int pages)
|
||||
* page of stack.
|
||||
*/
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
for (i = 0; i < pages; i++) {
|
||||
/*
|
||||
* Get a kernel stack page.
|
||||
*/
|
||||
m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
|
||||
ma[i] = m;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_WIRED, ma, pages);
|
||||
for (i = 0; i < pages; i++)
|
||||
ma[i]->valid = VM_PAGE_BITS_ALL;
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
pmap_qenter(ks, ma, pages);
|
||||
return (1);
|
||||
@ -573,9 +568,8 @@ vm_thread_swapin(struct thread *td)
|
||||
pages = td->td_kstack_pages;
|
||||
ksobj = td->td_kstack_obj;
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
for (int i = 0; i < pages; i++)
|
||||
ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_WIRED);
|
||||
vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma,
|
||||
pages);
|
||||
for (int i = 0; i < pages;) {
|
||||
int j, a, count, rv;
|
||||
|
||||
|
@ -3154,6 +3154,100 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
|
||||
return (m);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the specified range of pages from the given object. For each
|
||||
* page offset within the range, if a page already exists within the object
|
||||
* at that offset and it is busy, then wait for it to change state. If,
|
||||
* instead, the page doesn't exist, then allocate it.
|
||||
*
|
||||
* The caller must always specify an allocation class.
|
||||
*
|
||||
* allocation classes:
|
||||
* VM_ALLOC_NORMAL normal process request
|
||||
* VM_ALLOC_SYSTEM system *really* needs the pages
|
||||
*
|
||||
* The caller must always specify that the pages are to be busied and/or
|
||||
* wired.
|
||||
*
|
||||
* optional allocation flags:
|
||||
* VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages
|
||||
* VM_ALLOC_NOBUSY do not exclusive busy the page
|
||||
* VM_ALLOC_SBUSY set page to sbusy state
|
||||
* VM_ALLOC_WIRED wire the pages
|
||||
* VM_ALLOC_ZERO zero and validate any invalid pages
|
||||
*
|
||||
* This routine may sleep.
|
||||
*/
|
||||
void
|
||||
vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
|
||||
vm_page_t *ma, int count)
|
||||
{
|
||||
vm_page_t m;
|
||||
int i;
|
||||
bool sleep;
|
||||
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
|
||||
("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
|
||||
KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
|
||||
(allocflags & VM_ALLOC_WIRED) != 0,
|
||||
("vm_page_grab_pages: the pages must be busied or wired"));
|
||||
KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
|
||||
(allocflags & VM_ALLOC_IGN_SBUSY) != 0,
|
||||
("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch"));
|
||||
if (count == 0)
|
||||
return;
|
||||
i = 0;
|
||||
retrylookup:
|
||||
m = vm_page_lookup(object, pindex + i);
|
||||
for (; i < count; i++) {
|
||||
if (m != NULL) {
|
||||
sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
|
||||
vm_page_xbusied(m) : vm_page_busied(m);
|
||||
if (sleep) {
|
||||
/*
|
||||
* Reference the page before unlocking and
|
||||
* sleeping so that the page daemon is less
|
||||
* likely to reclaim it.
|
||||
*/
|
||||
vm_page_aflag_set(m, PGA_REFERENCED);
|
||||
vm_page_lock(m);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_page_busy_sleep(m, "grbmaw", (allocflags &
|
||||
VM_ALLOC_IGN_SBUSY) != 0);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retrylookup;
|
||||
}
|
||||
if ((allocflags & VM_ALLOC_WIRED) != 0) {
|
||||
vm_page_lock(m);
|
||||
vm_page_wire(m);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
if ((allocflags & (VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_SBUSY)) == 0)
|
||||
vm_page_xbusy(m);
|
||||
if ((allocflags & VM_ALLOC_SBUSY) != 0)
|
||||
vm_page_sbusy(m);
|
||||
} else {
|
||||
m = vm_page_alloc(object, pindex + i, (allocflags &
|
||||
~VM_ALLOC_IGN_SBUSY) | VM_ALLOC_COUNT(count - i));
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retrylookup;
|
||||
}
|
||||
}
|
||||
if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) {
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
ma[i] = m;
|
||||
m = vm_page_next(m);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Mapping function for valid or dirty bits in a page.
|
||||
*
|
||||
|
@ -394,6 +394,9 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
|
||||
* vm_page_alloc_freelist(). Some functions support only a subset
|
||||
* of the flags, and ignore others, see the flags legend.
|
||||
*
|
||||
* The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
|
||||
* and the vm_page_grab*() functions. See these functions for details.
|
||||
*
|
||||
* Bits 0 - 1 define class.
|
||||
* Bits 2 - 15 dedicated for flags.
|
||||
* Legend:
|
||||
@ -401,6 +404,7 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
|
||||
* (c) - vm_page_alloc_contig() supports the flag.
|
||||
* (f) - vm_page_alloc_freelist() supports the flag.
|
||||
* (g) - vm_page_grab() supports the flag.
|
||||
* (p) - vm_page_grab_pages() supports the flag.
|
||||
* Bits above 15 define the count of additional pages that the caller
|
||||
* intends to allocate.
|
||||
*/
|
||||
@ -408,13 +412,13 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
|
||||
#define VM_ALLOC_INTERRUPT 1
|
||||
#define VM_ALLOC_SYSTEM 2
|
||||
#define VM_ALLOC_CLASS_MASK 3
|
||||
#define VM_ALLOC_WIRED 0x0020 /* (acfg) Allocate non pageable page */
|
||||
#define VM_ALLOC_ZERO 0x0040 /* (acfg) Try to obtain a zeroed page */
|
||||
#define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */
|
||||
#define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */
|
||||
#define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */
|
||||
#define VM_ALLOC_NOBUSY 0x0200 /* (acg) Do not busy the page */
|
||||
#define VM_ALLOC_IGN_SBUSY 0x1000 /* (g) Ignore shared busy flag */
|
||||
#define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */
|
||||
#define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
|
||||
#define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
|
||||
#define VM_ALLOC_SBUSY 0x4000 /* (acg) Shared busy the page */
|
||||
#define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
|
||||
#define VM_ALLOC_NOWAIT 0x8000 /* (g) Do not sleep, return NULL */
|
||||
#define VM_ALLOC_COUNT_SHIFT 16
|
||||
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
|
||||
@ -466,6 +470,8 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
vm_paddr_t boundary, vm_memattr_t memattr);
|
||||
vm_page_t vm_page_alloc_freelist(int, int);
|
||||
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
|
||||
void vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
|
||||
vm_page_t *ma, int count);
|
||||
int vm_page_try_to_free (vm_page_t);
|
||||
void vm_page_deactivate (vm_page_t);
|
||||
void vm_page_deactivate_noreuse(vm_page_t);
|
||||
|
Loading…
Reference in New Issue
Block a user