1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-02-04 17:15:50 +00:00

Introduce a new software PTE flag that indicates whether the mapping is

for a managed page.

Tested by:	jchandra
This commit is contained in:
Alan Cox 2012-09-01 03:46:28 +00:00
parent 43981b6c53
commit 2d1f72d4b4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=239964
3 changed files with 29 additions and 41 deletions

View File

@ -171,7 +171,6 @@ void pmap_bootstrap(void);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
vm_offset_t pmap_steal_memory(vm_size_t size);
int page_is_managed(vm_paddr_t pa);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr);
void pmap_kremove(vm_offset_t va);

View File

@ -57,8 +57,8 @@ typedef pt_entry_t *pd_entry_t;
/*
* PFN for EntryLo register. Upper bits are 0, which is to say that
* bit 29 is the last hardware bit; Bits 30 and upwards (EntryLo is
* 64 bit though it can be referred to in 32-bits providing 2 software
* bit 28 is the last hardware bit; Bits 29 and upwards (EntryLo is
* 64 bit though it can be referred to in 32-bits providing 3 software
* bits safely. We use it as 64 bits to get many software bits, and
* god knows what else.) are unacknowledged by hardware. They may be
* written as anything, but otherwise they have as much meaning as
@ -68,11 +68,11 @@ typedef pt_entry_t *pd_entry_t;
#define TLBLO_SWBITS_SHIFT (34)
#define TLBLO_PFN_MASK 0x3FFFFFFC0ULL
#else
#define TLBLO_SWBITS_SHIFT (30)
#define TLBLO_PFN_MASK (0x3FFFFFC0)
#define TLBLO_SWBITS_SHIFT (29)
#define TLBLO_PFN_MASK (0x1FFFFFC0)
#endif
#define TLBLO_PFN_SHIFT (6)
#define TLBLO_SWBITS_MASK ((pt_entry_t)0x3 << TLBLO_SWBITS_SHIFT)
#define TLBLO_SWBITS_MASK ((pt_entry_t)0x7 << TLBLO_SWBITS_SHIFT)
#define TLBLO_PA_TO_PFN(pa) ((((pa) >> TLB_PAGE_SHIFT) << TLBLO_PFN_SHIFT) & TLBLO_PFN_MASK)
#define TLBLO_PFN_TO_PA(pfn) ((vm_paddr_t)((pfn) >> TLBLO_PFN_SHIFT) << TLB_PAGE_SHIFT)
#define TLBLO_PTE_TO_PFN(pte) ((pte) & TLBLO_PFN_MASK)
@ -132,9 +132,11 @@ typedef pt_entry_t *pd_entry_t;
* RO: Read only. Never set PTE_D on this page, and don't
* listen to requests to write to it.
* W: Wired. ???
* MANAGED:Managed. This PTE maps a managed page.
*/
#define PTE_RO ((pt_entry_t)0x01 << TLBLO_SWBITS_SHIFT)
#define PTE_W ((pt_entry_t)0x02 << TLBLO_SWBITS_SHIFT)
#define PTE_MANAGED ((pt_entry_t)0x04 << TLBLO_SWBITS_SHIFT)
/*
* PTE management functions for bits defined above.
@ -160,7 +162,7 @@ typedef pt_entry_t *pd_entry_t;
#define PTESIZE 4
#define PTE_L lw
#define PTE_MTC0 mtc0
#define CLEAR_PTE_SWBITS(r) sll r, 2; srl r, 2 /* remove 2 high bits */
#define CLEAR_PTE_SWBITS(r) sll r, 3; srl r, 3 /* remove 3 high bits */
#endif /* defined(__mips_n64) || defined(__mips_n32) */
#if defined(__mips_n64)

View File

@ -1691,9 +1691,9 @@ pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
pmap->pm_stats.wired_count -= 1;
pmap->pm_stats.resident_count -= 1;
pa = TLBLO_PTE_TO_PA(oldpte);
if (page_is_managed(pa)) {
if (pte_test(&oldpte, PTE_MANAGED)) {
pa = TLBLO_PTE_TO_PA(oldpte);
m = PHYS_TO_VM_PAGE(pa);
if (pte_test(&oldpte, PTE_D)) {
KASSERT(!pte_test(&oldpte, PTE_RO),
@ -1930,8 +1930,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (!pte_test(pte, PTE_V))
continue;
pbits = *pte;
pa = TLBLO_PTE_TO_PA(pbits);
if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) {
if (pte_test(&pbits, PTE_MANAGED | PTE_D)) {
pa = TLBLO_PTE_TO_PA(pbits);
m = PHYS_TO_VM_PAGE(pa);
vm_page_dirty(m);
}
@ -1975,6 +1975,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
("pmap_enter: page %p is not busy", m));
pa = VM_PAGE_TO_PHYS(m);
newpte = TLBLO_PA_TO_PFN(pa) | PTE_V;
mpte = NULL;
@ -1997,7 +1999,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
(void *)pmap->pm_segtab, (void *)va);
}
pa = VM_PAGE_TO_PHYS(m);
om = NULL;
origpte = *pte;
opa = TLBLO_PTE_TO_PA(origpte);
@ -2027,8 +2028,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if (mpte)
mpte->wire_count--;
if (page_is_managed(opa)) {
if (pte_test(&origpte, PTE_MANAGED)) {
om = m;
newpte |= PTE_MANAGED;
}
goto validate;
}
@ -2043,7 +2045,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if (pte_test(&origpte, PTE_W))
pmap->pm_stats.wired_count--;
if (page_is_managed(opa)) {
if (pte_test(&origpte, PTE_MANAGED)) {
om = PHYS_TO_VM_PAGE(opa);
pv = pmap_pvh_remove(&om->md, pmap, va);
}
@ -2068,6 +2070,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pv = get_pv_entry(pmap, FALSE);
pv->pv_va = va;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
newpte |= PTE_MANAGED;
} else if (pv != NULL)
free_pv_entry(pmap, pv);
@ -2088,7 +2091,7 @@ validate:
/*
* Now validate mapping with desired protection/wiring.
*/
newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V;
newpte |= rw;
if (is_cacheable_mem(pa))
newpte |= PTE_C_CACHE;
@ -2108,7 +2111,7 @@ validate:
if (origpte != newpte) {
if (pte_test(&origpte, PTE_V)) {
*pte = newpte;
if (page_is_managed(opa) && (opa != pa)) {
if (pte_test(&origpte, PTE_MANAGED) && opa != pa) {
if (om->md.pv_flags & PV_TABLE_REF)
vm_page_aflag_set(om, PGA_REFERENCED);
om->md.pv_flags &= ~PV_TABLE_REF;
@ -2117,10 +2120,10 @@ validate:
KASSERT(!pte_test(&origpte, PTE_RO),
("pmap_enter: modified page not writable:"
" va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
if (page_is_managed(opa))
if (pte_test(&origpte, PTE_MANAGED))
vm_page_dirty(om);
}
if (page_is_managed(opa) &&
if (pte_test(&origpte, PTE_MANAGED) &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_aflag_clear(om, PGA_WRITEABLE);
} else {
@ -2247,6 +2250,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* Now validate mapping with RO protection
*/
*pte = TLBLO_PA_TO_PFN(pa) | PTE_V;
if ((m->oflags & VPO_UNMANAGED) == 0)
*pte |= PTE_MANAGED;
if (is_cacheable_mem(pa))
*pte |= PTE_C_CACHE;
@ -2982,7 +2987,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
vm_paddr_t pa;
vm_page_t m;
int val;
boolean_t managed;
PMAP_LOCK(pmap);
retry:
@ -2996,8 +3000,7 @@ retry:
if (pte_test(&pte, PTE_D))
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
pa = TLBLO_PTE_TO_PA(pte);
managed = page_is_managed(pa);
if (managed) {
if (pte_test(&pte, PTE_MANAGED)) {
/*
* This may falsely report the given address as
* MINCORE_REFERENCED. Unfortunately, due to the lack of
@ -3009,7 +3012,8 @@ retry:
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
pte_test(&pte, PTE_MANAGED)) {
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
goto retry;
@ -3232,23 +3236,6 @@ pmap_asid_alloc(pmap)
}
}
int
page_is_managed(vm_paddr_t pa)
{
vm_offset_t pgnum = atop(pa);
if (pgnum >= first_page) {
vm_page_t m;
m = PHYS_TO_VM_PAGE(pa);
if (m == NULL)
return (0);
if ((m->oflags & VPO_UNMANAGED) == 0)
return (1);
}
return (0);
}
static pt_entry_t
init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot)
{
@ -3305,9 +3292,9 @@ pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
}
pte_set(pte, PTE_D);
tlb_update(pmap, va, *pte);
pa = TLBLO_PTE_TO_PA(*pte);
if (!page_is_managed(pa))
if (!pte_test(pte, PTE_MANAGED))
panic("pmap_emulate_modified: unmanaged page");
pa = TLBLO_PTE_TO_PA(*pte);
m = PHYS_TO_VM_PAGE(pa);
m->md.pv_flags |= PV_TABLE_REF;
PMAP_UNLOCK(pmap);