mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-18 10:35:55 +00:00
Replace all uses of the vm page queues lock by a r/w lock that is private
to this pmap.c. This new r/w lock is used primarily to synchronize access to the PV lists. However, it will be used in a somewhat unconventional way. As finer-grained PV list locking is added to each of the pmap functions that acquire this r/w lock, its acquisition will be changed from write to read, enabling concurrent execution of the pmap functions with finer-grained locking. X-MFC after: r236045
This commit is contained in:
parent
0615959a81
commit
8b0f4e0a0d
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=236158
@ -118,6 +118,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/msgbuf.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/sf_buf.h>
|
||||
#include <sys/sx.h>
|
||||
#include <sys/vmmeter.h>
|
||||
@ -236,6 +237,7 @@ static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
|
||||
static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
|
||||
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
|
||||
static struct md_page *pv_table;
|
||||
static struct rwlock pvh_global_lock;
|
||||
static int shpgperproc = PMAP_SHPGPERPROC;
|
||||
|
||||
struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */
|
||||
@ -392,6 +394,12 @@ pmap_bootstrap(vm_paddr_t firstaddr)
|
||||
kernel_pmap->pm_root = NULL;
|
||||
CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
|
||||
TAILQ_INIT(&kernel_pmap->pm_pvchunk);
|
||||
|
||||
/*
|
||||
* Initialize the global pv list lock.
|
||||
*/
|
||||
rw_init(&pvh_global_lock, "pvh global");
|
||||
|
||||
LIST_INIT(&allpmaps);
|
||||
|
||||
/*
|
||||
@ -1276,7 +1284,7 @@ invlcaddr(void *caddr)
|
||||
* scans are across different pmaps. It is very wasteful
|
||||
* to do an entire invltlb for checking a single mapping.
|
||||
*
|
||||
* If the given pmap is not the current pmap, vm_page_queue_mtx
|
||||
* If the given pmap is not the current pmap, pvh_global_lock
|
||||
* must be held and curthread pinned to a CPU.
|
||||
*/
|
||||
static pt_entry_t *
|
||||
@ -1292,7 +1300,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t va)
|
||||
/* are we current address space or kernel? */
|
||||
if (pmap_is_current(pmap))
|
||||
return (vtopte(va));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
|
||||
newpf = *pde & PG_FRAME;
|
||||
if ((*PMAP1 & PG_FRAME) != newpf) {
|
||||
@ -1841,9 +1849,9 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
|
||||
if (flags & M_WAITOK) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
VM_WAIT;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
}
|
||||
|
||||
@ -2339,7 +2347,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv)
|
||||
struct pv_chunk *pc;
|
||||
int idx, field, bit;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
PV_STAT(pv_entry_frees++);
|
||||
PV_STAT(pv_entry_spare++);
|
||||
@ -2382,8 +2390,8 @@ get_pv_entry(pmap_t pmap, int try)
|
||||
struct pv_chunk *pc;
|
||||
vm_page_t m;
|
||||
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PV_STAT(pv_entry_allocs++);
|
||||
pv_entry_count++;
|
||||
if (pv_entry_count > pv_entry_high_water)
|
||||
@ -2455,7 +2463,7 @@ pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
|
||||
if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
|
||||
TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
|
||||
@ -2473,7 +2481,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
|
||||
vm_offset_t va_last;
|
||||
vm_page_t m;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
KASSERT((pa & PDRMASK) == 0,
|
||||
("pmap_pv_demote_pde: pa is not 4mpage aligned"));
|
||||
|
||||
@ -2506,7 +2514,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
|
||||
vm_offset_t va_last;
|
||||
vm_page_t m;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
KASSERT((pa & PDRMASK) == 0,
|
||||
("pmap_pv_promote_pde: pa is not 4mpage aligned"));
|
||||
|
||||
@ -2547,7 +2555,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
pmap_pvh_free(&m->md, pmap, va);
|
||||
if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
@ -2565,8 +2573,8 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
pv = get_pv_entry(pmap, FALSE);
|
||||
pv->pv_va = va;
|
||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
|
||||
@ -2580,8 +2588,8 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
if (pv_entry_count < pv_entry_high_water &&
|
||||
(pv = get_pv_entry(pmap, TRUE)) != NULL) {
|
||||
pv->pv_va = va;
|
||||
@ -2600,7 +2608,7 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
|
||||
struct md_page *pvh;
|
||||
pv_entry_t pv;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
if (pv_entry_count < pv_entry_high_water &&
|
||||
(pv = get_pv_entry(pmap, TRUE)) != NULL) {
|
||||
pv->pv_va = va;
|
||||
@ -2678,7 +2686,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
|
||||
*/
|
||||
if (va >= KERNBASE)
|
||||
firstpte = &KPTmap[i386_btop(trunc_4mpage(va))];
|
||||
else if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) {
|
||||
else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) {
|
||||
if ((*PMAP1 & PG_FRAME) != mptepa) {
|
||||
*PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
|
||||
#ifdef SMP
|
||||
@ -2837,7 +2845,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
|
||||
pt_entry_t oldpte;
|
||||
vm_page_t m;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
oldpte = pte_load_clear(ptq);
|
||||
if (oldpte & PG_W)
|
||||
@ -2868,7 +2876,7 @@ pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
|
||||
{
|
||||
pt_entry_t *pte;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
|
||||
@ -2900,7 +2908,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
|
||||
anyvalid = 0;
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
PMAP_LOCK(pmap);
|
||||
|
||||
@ -2989,7 +2997,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
sched_unpin();
|
||||
if (anyvalid)
|
||||
pmap_invalidate_all(pmap);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
pmap_free_zero_pages(free);
|
||||
}
|
||||
@ -3021,7 +3029,7 @@ pmap_remove_all(vm_page_t m)
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
free = NULL;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
@ -3062,7 +3070,7 @@ pmap_remove_all(vm_page_t m)
|
||||
}
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
pmap_free_zero_pages(free);
|
||||
}
|
||||
|
||||
@ -3138,7 +3146,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
else {
|
||||
pv_lists_locked = TRUE;
|
||||
resume:
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
}
|
||||
anychanged = FALSE;
|
||||
@ -3182,7 +3190,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
} else {
|
||||
if (!pv_lists_locked) {
|
||||
pv_lists_locked = TRUE;
|
||||
if (!mtx_trylock(&vm_page_queue_mtx)) {
|
||||
if (!rw_try_wlock(&pvh_global_lock)) {
|
||||
if (anychanged)
|
||||
pmap_invalidate_all(
|
||||
pmap);
|
||||
@ -3251,7 +3259,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
pmap_invalidate_all(pmap);
|
||||
if (pv_lists_locked) {
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
@ -3422,7 +3430,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
|
||||
mpte = NULL;
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
sched_pin();
|
||||
|
||||
@ -3592,7 +3600,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
pmap_promote_pde(pmap, pde, va);
|
||||
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
@ -3607,7 +3615,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
pd_entry_t *pde, newpde;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
pde = pmap_pde(pmap, va);
|
||||
if (*pde != 0) {
|
||||
@ -3676,7 +3684,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
psize = atop(end - start);
|
||||
mpte = NULL;
|
||||
m = m_start;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
va = start + ptoa(diff);
|
||||
@ -3690,7 +3698,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
mpte);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
@ -3707,10 +3715,10 @@ void
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
@ -3725,7 +3733,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->oflags & VPO_UNMANAGED) != 0,
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
|
||||
/*
|
||||
@ -3931,9 +3939,9 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
|
||||
if (!wired != ((*pde & PG_W) == 0)) {
|
||||
if (!are_queues_locked) {
|
||||
are_queues_locked = TRUE;
|
||||
if (!mtx_trylock(&vm_page_queue_mtx)) {
|
||||
if (!rw_try_wlock(&pvh_global_lock)) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
@ -3957,7 +3965,7 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
|
||||
pmap_pte_release(pte);
|
||||
out:
|
||||
if (are_queues_locked)
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
@ -3986,7 +3994,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
|
||||
if (!pmap_is_current(src_pmap))
|
||||
return;
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
if (dst_pmap < src_pmap) {
|
||||
PMAP_LOCK(dst_pmap);
|
||||
PMAP_LOCK(src_pmap);
|
||||
@ -4076,7 +4084,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
|
||||
}
|
||||
out:
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(src_pmap);
|
||||
PMAP_UNLOCK(dst_pmap);
|
||||
}
|
||||
@ -4218,7 +4226,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (PV_PMAP(pv) == pmap) {
|
||||
rv = TRUE;
|
||||
@ -4240,7 +4248,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
break;
|
||||
}
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
@ -4258,13 +4266,13 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
count = 0;
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
count = pmap_pvh_wired_mappings(&m->md, count);
|
||||
if ((m->flags & PG_FICTITIOUS) == 0) {
|
||||
count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
|
||||
count);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (count);
|
||||
}
|
||||
|
||||
@ -4280,7 +4288,7 @@ pmap_pvh_wired_mappings(struct md_page *pvh, int count)
|
||||
pt_entry_t *pte;
|
||||
pv_entry_t pv;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
sched_pin();
|
||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
|
||||
pmap = PV_PMAP(pv);
|
||||
@ -4305,11 +4313,11 @@ pmap_page_is_mapped(vm_page_t m)
|
||||
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
@ -4339,7 +4347,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
printf("warning: pmap_remove_pages called with non-current pmap\n");
|
||||
return;
|
||||
}
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
sched_pin();
|
||||
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
|
||||
@ -4451,7 +4459,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
}
|
||||
sched_unpin();
|
||||
pmap_invalidate_all(pmap);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
pmap_free_zero_pages(free);
|
||||
}
|
||||
@ -4479,11 +4487,11 @@ pmap_is_modified(vm_page_t m)
|
||||
if ((m->oflags & VPO_BUSY) == 0 &&
|
||||
(m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = pmap_is_modified_pvh(&m->md) ||
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
@ -4500,7 +4508,7 @@ pmap_is_modified_pvh(struct md_page *pvh)
|
||||
pmap_t pmap;
|
||||
boolean_t rv;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
rv = FALSE;
|
||||
sched_pin();
|
||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
|
||||
@ -4553,11 +4561,11 @@ pmap_is_referenced(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = pmap_is_referenced_pvh(&m->md) ||
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
@ -4573,7 +4581,7 @@ pmap_is_referenced_pvh(struct md_page *pvh)
|
||||
pmap_t pmap;
|
||||
boolean_t rv;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
rv = FALSE;
|
||||
sched_pin();
|
||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
|
||||
@ -4614,7 +4622,7 @@ pmap_remove_write(vm_page_t m)
|
||||
if ((m->oflags & VPO_BUSY) == 0 &&
|
||||
(m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
@ -4655,7 +4663,7 @@ pmap_remove_write(vm_page_t m)
|
||||
}
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4684,7 +4692,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
@ -4743,7 +4751,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
}
|
||||
out:
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (rtval);
|
||||
}
|
||||
|
||||
@ -4773,7 +4781,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
@ -4834,7 +4842,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4854,7 +4862,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
@ -4901,7 +4909,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user