mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-10 14:02:43 +00:00
In the common case, pmap_enter_quick() completes without sleeping.
In such cases, the busying of the page and the unlocking of the containing object by vm_map_pmap_enter() and vm_fault_prefault() is unnecessary overhead. To eliminate this overhead, this change modifies pmap_enter_quick() so that it expects the object to be locked on entry and it assumes the responsibility for busying the page and unlocking the object if it must sleep. Note: alpha, amd64, i386 and ia64 are the only implementations optimized by this change; arm, powerpc, and sparc64 still conservatively busy the page and unlock the object within every pmap_enter_quick() call. Additionally, this change is the first case where we synchronize access to the page's PG_BUSY flag and busy field using the containing object's lock rather than the global page queues lock. (Modifications to the page's PG_BUSY flag and busy field have asserted both locks for several weeks, enabling an incremental transition.)
This commit is contained in:
parent
99b6fea1af
commit
85f5b24573
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=138897
@ -153,6 +153,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/msgbuf.h>
|
||||
#include <sys/vmmeter.h>
|
||||
@ -334,7 +335,7 @@ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va,
|
||||
|
||||
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va);
|
||||
|
||||
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex);
|
||||
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
|
||||
static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
|
||||
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
|
||||
#ifdef SMP
|
||||
@ -1038,24 +1039,28 @@ pmap_pinit(pmap)
|
||||
* mapped correctly.
|
||||
*/
|
||||
static vm_page_t
|
||||
_pmap_allocpte(pmap, ptepindex)
|
||||
pmap_t pmap;
|
||||
unsigned ptepindex;
|
||||
_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
|
||||
{
|
||||
pt_entry_t* pte;
|
||||
vm_offset_t ptepa;
|
||||
vm_page_t m;
|
||||
|
||||
KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
|
||||
(flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
|
||||
("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
|
||||
|
||||
/*
|
||||
* Find or fabricate a new pagetable page
|
||||
*/
|
||||
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_unlock_queues();
|
||||
VM_WAIT;
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pmap);
|
||||
if (flags & M_WAITOK) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_unlock_queues();
|
||||
VM_WAIT;
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Indicate the need to retry. While waiting, the page table
|
||||
@ -1082,7 +1087,8 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
pt_entry_t* l1pte = &pmap->pm_lev1[l1index];
|
||||
pt_entry_t* l2map;
|
||||
if (!pmap_pte_v(l1pte)) {
|
||||
if (_pmap_allocpte(pmap, NUSERLEV3MAPS + l1index) == NULL) {
|
||||
if (_pmap_allocpte(pmap, NUSERLEV3MAPS + l1index,
|
||||
flags) == NULL) {
|
||||
--m->wire_count;
|
||||
vm_page_free(m);
|
||||
return (NULL);
|
||||
@ -1146,7 +1152,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va)
|
||||
* Here if the pte page isn't mapped, or if it has been
|
||||
* deallocated.
|
||||
*/
|
||||
m = _pmap_allocpte(pmap, ptepindex);
|
||||
m = _pmap_allocpte(pmap, ptepindex, M_WAITOK);
|
||||
if (m == NULL)
|
||||
goto retry;
|
||||
}
|
||||
@ -1842,9 +1848,20 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
}
|
||||
mpte->wire_count++;
|
||||
} else {
|
||||
mpte = _pmap_allocpte(pmap, ptepindex);
|
||||
if (mpte == NULL)
|
||||
mpte = _pmap_allocpte(pmap, ptepindex,
|
||||
M_NOWAIT);
|
||||
if (mpte == NULL) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
PMAP_LOCK(pmap);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -2053,9 +2053,19 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
mpte->wire_count++;
|
||||
} else {
|
||||
mpte = _pmap_allocpte(pmap, ptepindex,
|
||||
M_WAITOK);
|
||||
if (mpte == NULL)
|
||||
M_NOWAIT);
|
||||
if (mpte == NULL) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
PMAP_LOCK(pmap);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -3417,8 +3417,19 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
mtx_lock(&Giant);
|
||||
pmap_enter(pmap, va, m, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
|
||||
pmap_dcache_wbinv_all(pmap); /* XXX: shouldn't be needed */
|
||||
mtx_unlock(&Giant);
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
@ -2094,9 +2094,19 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
mpte->wire_count++;
|
||||
} else {
|
||||
mpte = _pmap_allocpte(pmap, ptepindex,
|
||||
M_WAITOK);
|
||||
if (mpte == NULL)
|
||||
M_NOWAIT);
|
||||
if (mpte == NULL) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
PMAP_LOCK(pmap);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -1631,9 +1631,13 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
while ((pte = pmap_find_pte(va)) == NULL) {
|
||||
pmap_install(oldpmap);
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
PMAP_LOCK(pmap);
|
||||
oldpmap = pmap_install(pmap);
|
||||
}
|
||||
|
@ -1048,9 +1048,17 @@ vm_page_t
|
||||
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
mtx_lock(&Giant);
|
||||
pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
|
||||
mtx_unlock(&Giant);
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
@ -1048,9 +1048,17 @@ vm_page_t
|
||||
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
mtx_lock(&Giant);
|
||||
pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
|
||||
mtx_unlock(&Giant);
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
@ -1048,9 +1048,17 @@ vm_page_t
|
||||
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
mtx_lock(&Giant);
|
||||
pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
|
||||
mtx_unlock(&Giant);
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
@ -1401,9 +1401,17 @@ vm_page_t
|
||||
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
mtx_lock(&Giant);
|
||||
pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
|
||||
mtx_unlock(&Giant);
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
@ -973,23 +973,17 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
||||
VM_OBJECT_UNLOCK(lobject);
|
||||
break;
|
||||
}
|
||||
vm_page_lock_queues();
|
||||
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
|
||||
(m->busy == 0) &&
|
||||
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
|
||||
|
||||
if ((m->queue - m->pc) == PQ_CACHE) {
|
||||
vm_page_lock_queues();
|
||||
vm_page_deactivate(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(lobject);
|
||||
mpte = pmap_enter_quick(pmap, addr, m, mpte);
|
||||
VM_OBJECT_LOCK(lobject);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(lobject);
|
||||
}
|
||||
}
|
||||
|
@ -1417,22 +1417,17 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||
cnt.v_free_count < cnt.v_free_reserved) {
|
||||
break;
|
||||
}
|
||||
vm_page_lock_queues();
|
||||
if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
|
||||
(p->busy == 0) &&
|
||||
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
|
||||
if ((p->queue - p->pc) == PQ_CACHE)
|
||||
if ((p->queue - p->pc) == PQ_CACHE) {
|
||||
vm_page_lock_queues();
|
||||
vm_page_deactivate(p);
|
||||
vm_page_busy(p);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
mpte = pmap_enter_quick(map->pmap,
|
||||
addr + ptoa(tmpidx), p, mpte);
|
||||
VM_OBJECT_LOCK(object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(p);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
unlock_return:
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
|
Loading…
Reference in New Issue
Block a user