mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-04 09:09:56 +00:00
Introduce the function pmap_enter_object(). It maps a sequence of resident
pages from the same object. Use it in vm_map_pmap_enter() to reduce the locking overhead of premapping objects. Reviewed by: tegge@
This commit is contained in:
parent
4bf452f14a
commit
ce142d9ec0
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=159303
@ -209,6 +209,8 @@ static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
|
||||
static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
|
||||
static void pmap_clear_ptes(vm_page_t m, long bit);
|
||||
|
||||
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
|
||||
vm_page_t m, vm_prot_t prot, vm_page_t mpte);
|
||||
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
|
||||
vm_offset_t sva, pd_entry_t ptepde);
|
||||
static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde);
|
||||
@ -2309,6 +2311,37 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps a sequence of resident pages belonging to the same object.
|
||||
* The sequence begins with the given page m_start. This page is
|
||||
* mapped at the given virtual address start. Each subsequent page is
|
||||
* mapped at a virtual address that is offset from start by the same
|
||||
* amount as the page is offset from m_start within the object. The
|
||||
* last page in the sequence is the page with the largest offset from
|
||||
* m_start that can be mapped at a virtual address less than the given
|
||||
* virtual address end. Not every virtual page between start and end
|
||||
* is mapped; only those for which a resident page exists with the
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
void
|
||||
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m, mpte;
|
||||
vm_pindex_t diff, psize;
|
||||
|
||||
psize = atop(end - start);
|
||||
mpte = NULL;
|
||||
m = m_start;
|
||||
PMAP_LOCK(pmap);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
|
||||
prot, mpte);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* this code makes some *MAJOR* assumptions:
|
||||
* 1. Current pmap & pmap exists.
|
||||
@ -2322,15 +2355,26 @@ vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (mpte);
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, vm_page_t mpte)
|
||||
{
|
||||
pt_entry_t *pte;
|
||||
vm_paddr_t pa;
|
||||
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
|
||||
("pmap_enter_quick: managed mapping within the clean submap"));
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
PMAP_LOCK(pmap);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
|
||||
/*
|
||||
* In the case that a page table page is not
|
||||
@ -2395,7 +2439,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pmap_unwire_pte_hold(pmap, va, mpte);
|
||||
mpte = NULL;
|
||||
}
|
||||
goto out;
|
||||
return (mpte);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2422,8 +2466,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pte_store(pte, pa | PG_V | PG_U);
|
||||
else
|
||||
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
|
||||
out:
|
||||
PMAP_UNLOCK(pmap);
|
||||
return mpte;
|
||||
}
|
||||
|
||||
|
@ -3578,6 +3578,34 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps a sequence of resident pages belonging to the same object.
|
||||
* The sequence begins with the given page m_start. This page is
|
||||
* mapped at the given virtual address start. Each subsequent page is
|
||||
* mapped at a virtual address that is offset from start by the same
|
||||
* amount as the page is offset from m_start within the object. The
|
||||
* last page in the sequence is the page with the largest offset from
|
||||
* m_start that can be mapped at a virtual address less than the given
|
||||
* virtual address end. Not every virtual page between start and end
|
||||
* is mapped; only those for which a resident page exists with the
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
void
|
||||
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_pindex_t diff, psize;
|
||||
|
||||
psize = atop(end - start);
|
||||
m = m_start;
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* this code makes some *MAJOR* assumptions:
|
||||
* 1. Current pmap & pmap exists.
|
||||
@ -3592,7 +3620,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
return (NULL);
|
||||
|
@ -268,6 +268,8 @@ static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
|
||||
static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
|
||||
static void pmap_clear_ptes(vm_page_t m, int bit);
|
||||
|
||||
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
|
||||
vm_page_t m, vm_prot_t prot, vm_page_t mpte);
|
||||
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva);
|
||||
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
|
||||
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
|
||||
@ -2385,6 +2387,37 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps a sequence of resident pages belonging to the same object.
|
||||
* The sequence begins with the given page m_start. This page is
|
||||
* mapped at the given virtual address start. Each subsequent page is
|
||||
* mapped at a virtual address that is offset from start by the same
|
||||
* amount as the page is offset from m_start within the object. The
|
||||
* last page in the sequence is the page with the largest offset from
|
||||
* m_start that can be mapped at a virtual address less than the given
|
||||
* virtual address end. Not every virtual page between start and end
|
||||
* is mapped; only those for which a resident page exists with the
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
void
|
||||
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m, mpte;
|
||||
vm_pindex_t diff, psize;
|
||||
|
||||
psize = atop(end - start);
|
||||
mpte = NULL;
|
||||
m = m_start;
|
||||
PMAP_LOCK(pmap);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
|
||||
prot, mpte);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* this code makes some *MAJOR* assumptions:
|
||||
* 1. Current pmap & pmap exists.
|
||||
@ -2398,15 +2431,26 @@ vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (mpte);
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, vm_page_t mpte)
|
||||
{
|
||||
pt_entry_t *pte;
|
||||
vm_paddr_t pa;
|
||||
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
|
||||
("pmap_enter_quick: managed mapping within the clean submap"));
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
PMAP_LOCK(pmap);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
|
||||
/*
|
||||
* In the case that a page table page is not
|
||||
@ -2471,7 +2515,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pmap_unwire_pte_hold(pmap, mpte);
|
||||
mpte = NULL;
|
||||
}
|
||||
goto out;
|
||||
return (mpte);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2496,8 +2540,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pte_store(pte, pa | PG_V | PG_U);
|
||||
else
|
||||
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
|
||||
out:
|
||||
PMAP_UNLOCK(pmap);
|
||||
return mpte;
|
||||
}
|
||||
|
||||
|
@ -236,6 +236,8 @@ static struct ia64_lpte *pmap_find_vhpt(vm_offset_t va);
|
||||
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
|
||||
static pv_entry_t get_pv_entry(pmap_t locked_pmap);
|
||||
|
||||
static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
|
||||
vm_page_t m, vm_prot_t prot);
|
||||
static pmap_t pmap_install(pmap_t);
|
||||
static void pmap_invalidate_all(pmap_t pmap);
|
||||
static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte,
|
||||
@ -1625,6 +1627,35 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps a sequence of resident pages belonging to the same object.
|
||||
* The sequence begins with the given page m_start. This page is
|
||||
* mapped at the given virtual address start. Each subsequent page is
|
||||
* mapped at a virtual address that is offset from start by the same
|
||||
* amount as the page is offset from m_start within the object. The
|
||||
* last page in the sequence is the page with the largest offset from
|
||||
* m_start that can be mapped at a virtual address less than the given
|
||||
* virtual address end. Not every virtual page between start and end
|
||||
* is mapped; only those for which a resident page exists with the
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
void
|
||||
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_pindex_t diff, psize;
|
||||
|
||||
psize = atop(end - start);
|
||||
m = m_start;
|
||||
PMAP_LOCK(pmap);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* this code makes some *MAJOR* assumptions:
|
||||
* 1. Current pmap & pmap exists.
|
||||
@ -1638,16 +1669,27 @@ vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_quick_locked(pmap, va, m, prot);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
struct ia64_lpte *pte;
|
||||
pmap_t oldpmap;
|
||||
boolean_t managed;
|
||||
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
|
||||
("pmap_enter_quick: managed mapping within the clean submap"));
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
PMAP_LOCK(pmap);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
oldpmap = pmap_install(pmap);
|
||||
|
||||
while ((pte = pmap_find_pte(va)) == NULL) {
|
||||
@ -1683,8 +1725,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
}
|
||||
|
||||
pmap_install(oldpmap);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -290,6 +290,8 @@ static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
|
||||
/*
|
||||
* Utility routines.
|
||||
*/
|
||||
static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
static struct pvo_entry *moea_rkva_alloc(mmu_t);
|
||||
static void moea_pa_map(struct pvo_entry *, vm_offset_t,
|
||||
struct pte *, int *);
|
||||
@ -309,6 +311,8 @@ void moea_clear_modify(mmu_t, vm_page_t);
|
||||
void moea_clear_reference(mmu_t, vm_page_t);
|
||||
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
||||
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
vm_page_t moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
vm_page_t);
|
||||
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
|
||||
@ -345,6 +349,7 @@ static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_clear_reference, moea_clear_reference),
|
||||
MMUMETHOD(mmu_copy_page, moea_copy_page),
|
||||
MMUMETHOD(mmu_enter, moea_enter),
|
||||
MMUMETHOD(mmu_enter_object, moea_enter_object),
|
||||
MMUMETHOD(mmu_enter_quick, moea_enter_quick),
|
||||
MMUMETHOD(mmu_extract, moea_extract),
|
||||
MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold),
|
||||
@ -828,7 +833,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
struct vm_page m;
|
||||
|
||||
m.phys_addr = translations[i].om_pa + off;
|
||||
moea_enter(mmup, &ofw_pmap,
|
||||
moea_enter_locked(&ofw_pmap,
|
||||
translations[i].om_va + off, &m,
|
||||
VM_PROT_ALL, 1);
|
||||
ofw_mappings++;
|
||||
@ -1031,6 +1036,25 @@ void
|
||||
moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_locked(pmap, va, m, prot, wired);
|
||||
vm_page_unlock_queues();
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the given physical page at the specified virtual address in the
|
||||
* target pmap with the protection requested. If specified the page
|
||||
* will be wired down.
|
||||
*
|
||||
* The page queues and pmap must be locked.
|
||||
*/
|
||||
static void
|
||||
moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
{
|
||||
struct pvo_head *pvo_head;
|
||||
uma_zone_t zone;
|
||||
vm_page_t pg;
|
||||
@ -1051,8 +1075,8 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
was_exec = 0;
|
||||
}
|
||||
if (pmap_bootstrapped)
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pmap);
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
|
||||
/* XXX change the pvo head for fake pages */
|
||||
if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS)
|
||||
@ -1115,12 +1139,39 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (pg != NULL)
|
||||
moea_attr_save(pg, PTE_EXEC);
|
||||
}
|
||||
if (pmap_bootstrapped)
|
||||
vm_page_unlock_queues();
|
||||
|
||||
/* XXX syncicache always until problems are sorted */
|
||||
moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps a sequence of resident pages belonging to the same object.
|
||||
* The sequence begins with the given page m_start. This page is
|
||||
* mapped at the given virtual address start. Each subsequent page is
|
||||
* mapped at a virtual address that is offset from start by the same
|
||||
* amount as the page is offset from m_start within the object. The
|
||||
* last page in the sequence is the page with the largest offset from
|
||||
* m_start that can be mapped at a virtual address less than the given
|
||||
* virtual address end. Not every virtual page between start and end
|
||||
* is mapped; only those for which a resident page exists with the
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
void
|
||||
moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_pindex_t diff, psize;
|
||||
|
||||
psize = atop(end - start);
|
||||
m = m_start;
|
||||
PMAP_LOCK(pm);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
moea_enter_locked(pm, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
@ -1128,16 +1179,10 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, vm_page_t mpte)
|
||||
{
|
||||
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
mtx_lock(&Giant);
|
||||
moea_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
PMAP_LOCK(pm);
|
||||
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
mtx_unlock(&Giant);
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
PMAP_UNLOCK(pm);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
@ -200,6 +200,25 @@ METHOD void enter {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @brief Maps a sequence of resident pages belonging to the same object.
|
||||
*
|
||||
* @param _pmap physical map
|
||||
* @param _start virtual range start
|
||||
* @param _end virtual range end
|
||||
* @param _m_start physical page mapped at start
|
||||
* @param _prot mapping page protection
|
||||
*/
|
||||
METHOD void enter_object {
|
||||
mmu_t _mmu;
|
||||
pmap_t _pmap;
|
||||
vm_offset_t _start;
|
||||
vm_offset_t _end;
|
||||
vm_page_t _m_start;
|
||||
vm_prot_t _prot;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @brief A faster entry point for page mapping where it is possible
|
||||
* to short-circuit some of the tests in pmap_enter.
|
||||
|
@ -290,6 +290,8 @@ static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
|
||||
/*
|
||||
* Utility routines.
|
||||
*/
|
||||
static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
static struct pvo_entry *moea_rkva_alloc(mmu_t);
|
||||
static void moea_pa_map(struct pvo_entry *, vm_offset_t,
|
||||
struct pte *, int *);
|
||||
@ -309,6 +311,8 @@ void moea_clear_modify(mmu_t, vm_page_t);
|
||||
void moea_clear_reference(mmu_t, vm_page_t);
|
||||
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
||||
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
vm_page_t moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
vm_page_t);
|
||||
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
|
||||
@ -345,6 +349,7 @@ static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_clear_reference, moea_clear_reference),
|
||||
MMUMETHOD(mmu_copy_page, moea_copy_page),
|
||||
MMUMETHOD(mmu_enter, moea_enter),
|
||||
MMUMETHOD(mmu_enter_object, moea_enter_object),
|
||||
MMUMETHOD(mmu_enter_quick, moea_enter_quick),
|
||||
MMUMETHOD(mmu_extract, moea_extract),
|
||||
MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold),
|
||||
@ -828,7 +833,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
struct vm_page m;
|
||||
|
||||
m.phys_addr = translations[i].om_pa + off;
|
||||
moea_enter(mmup, &ofw_pmap,
|
||||
moea_enter_locked(&ofw_pmap,
|
||||
translations[i].om_va + off, &m,
|
||||
VM_PROT_ALL, 1);
|
||||
ofw_mappings++;
|
||||
@ -1031,6 +1036,25 @@ void
|
||||
moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_locked(pmap, va, m, prot, wired);
|
||||
vm_page_unlock_queues();
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the given physical page at the specified virtual address in the
|
||||
* target pmap with the protection requested. If specified the page
|
||||
* will be wired down.
|
||||
*
|
||||
* The page queues and pmap must be locked.
|
||||
*/
|
||||
static void
|
||||
moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
{
|
||||
struct pvo_head *pvo_head;
|
||||
uma_zone_t zone;
|
||||
vm_page_t pg;
|
||||
@ -1051,8 +1075,8 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
was_exec = 0;
|
||||
}
|
||||
if (pmap_bootstrapped)
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pmap);
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
|
||||
/* XXX change the pvo head for fake pages */
|
||||
if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS)
|
||||
@ -1115,12 +1139,39 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (pg != NULL)
|
||||
moea_attr_save(pg, PTE_EXEC);
|
||||
}
|
||||
if (pmap_bootstrapped)
|
||||
vm_page_unlock_queues();
|
||||
|
||||
/* XXX syncicache always until problems are sorted */
|
||||
moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps a sequence of resident pages belonging to the same object.
|
||||
* The sequence begins with the given page m_start. This page is
|
||||
* mapped at the given virtual address start. Each subsequent page is
|
||||
* mapped at a virtual address that is offset from start by the same
|
||||
* amount as the page is offset from m_start within the object. The
|
||||
* last page in the sequence is the page with the largest offset from
|
||||
* m_start that can be mapped at a virtual address less than the given
|
||||
* virtual address end. Not every virtual page between start and end
|
||||
* is mapped; only those for which a resident page exists with the
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
void
|
||||
moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_pindex_t diff, psize;
|
||||
|
||||
psize = atop(end - start);
|
||||
m = m_start;
|
||||
PMAP_LOCK(pm);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
moea_enter_locked(pm, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
@ -1128,16 +1179,10 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, vm_page_t mpte)
|
||||
{
|
||||
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
mtx_lock(&Giant);
|
||||
moea_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
PMAP_LOCK(pm);
|
||||
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
mtx_unlock(&Giant);
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
PMAP_UNLOCK(pm);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
@ -147,6 +147,16 @@ struct pmap kernel_pmap_store;
|
||||
*/
|
||||
static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size);
|
||||
|
||||
/*
|
||||
* Map the given physical page at the specified virtual address in the
|
||||
* target pmap with the protection requested. If specified the page
|
||||
* will be wired down.
|
||||
*
|
||||
* The page queues and pmap must be locked.
|
||||
*/
|
||||
static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired);
|
||||
|
||||
extern int tl1_immu_miss_patch_1[];
|
||||
extern int tl1_immu_miss_patch_2[];
|
||||
extern int tl1_dmmu_miss_patch_1[];
|
||||
@ -1259,11 +1269,32 @@ void
|
||||
pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pm);
|
||||
pmap_enter_locked(pm, va, m, prot, wired);
|
||||
vm_page_unlock_queues();
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the given physical page at the specified virtual address in the
|
||||
* target pmap with the protection requested. If specified the page
|
||||
* will be wired down.
|
||||
*
|
||||
* The page queues and pmap must be locked.
|
||||
*/
|
||||
static void
|
||||
pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
{
|
||||
struct tte *tp;
|
||||
vm_paddr_t pa;
|
||||
u_long data;
|
||||
int i;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pm, MA_OWNED);
|
||||
PMAP_STATS_INC(pmap_nenter);
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
@ -1284,9 +1315,6 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
"pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
|
||||
pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired);
|
||||
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pm);
|
||||
|
||||
/*
|
||||
* If there is an existing mapping, and the physical address has not
|
||||
* changed, must be protection or wiring change.
|
||||
@ -1383,7 +1411,35 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
|
||||
tsb_tte_enter(pm, m, va, TS_8K, data);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps a sequence of resident pages belonging to the same object.
|
||||
* The sequence begins with the given page m_start. This page is
|
||||
* mapped at the given virtual address start. Each subsequent page is
|
||||
* mapped at a virtual address that is offset from start by the same
|
||||
* amount as the page is offset from m_start within the object. The
|
||||
* last page in the sequence is the page with the largest offset from
|
||||
* m_start that can be mapped at a virtual address less than the given
|
||||
* virtual address end. Not every virtual page between start and end
|
||||
* is mapped; only those for which a resident page exists with the
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
void
|
||||
pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_pindex_t diff, psize;
|
||||
|
||||
psize = atop(end - start);
|
||||
m = m_start;
|
||||
PMAP_LOCK(pm);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
pmap_enter_locked(pm, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
@ -1392,13 +1448,10 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
{
|
||||
|
||||
vm_page_busy(m);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
VM_OBJECT_LOCK(m->object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
PMAP_LOCK(pm);
|
||||
pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
PMAP_UNLOCK(pm);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
@ -99,6 +99,8 @@ void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
boolean_t);
|
||||
vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, vm_page_t mpte);
|
||||
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
|
||||
vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
|
||||
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
|
||||
vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va,
|
||||
vm_prot_t prot);
|
||||
|
@ -1437,9 +1437,9 @@ void
|
||||
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
|
||||
{
|
||||
vm_offset_t tmpidx;
|
||||
vm_offset_t start, tmpidx;
|
||||
int psize;
|
||||
vm_page_t p, mpte;
|
||||
vm_page_t p, p_start;
|
||||
boolean_t are_queues_locked;
|
||||
|
||||
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
|
||||
@ -1465,7 +1465,8 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||
}
|
||||
|
||||
are_queues_locked = FALSE;
|
||||
mpte = NULL;
|
||||
start = 0;
|
||||
p_start = NULL;
|
||||
|
||||
if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
|
||||
if (p->pindex < pindex) {
|
||||
@ -1493,16 +1494,25 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||
if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
|
||||
(p->busy == 0) &&
|
||||
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
|
||||
if (p_start == NULL) {
|
||||
start = addr + ptoa(tmpidx);
|
||||
p_start = p;
|
||||
}
|
||||
if (!are_queues_locked) {
|
||||
are_queues_locked = TRUE;
|
||||
vm_page_lock_queues();
|
||||
}
|
||||
if (VM_PAGE_INQUEUE1(p, PQ_CACHE))
|
||||
vm_page_deactivate(p);
|
||||
mpte = pmap_enter_quick(map->pmap,
|
||||
addr + ptoa(tmpidx), p, prot, mpte);
|
||||
} else if (p_start != NULL) {
|
||||
pmap_enter_object(map->pmap, start, addr +
|
||||
ptoa(tmpidx), p_start, prot);
|
||||
p_start = NULL;
|
||||
}
|
||||
}
|
||||
if (p_start != NULL)
|
||||
pmap_enter_object(map->pmap, start, addr + size, p_start,
|
||||
prot);
|
||||
if (are_queues_locked)
|
||||
vm_page_unlock_queues();
|
||||
unlock_return:
|
||||
|
Loading…
Reference in New Issue
Block a user