1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-03 09:00:21 +00:00

MFother arches :

date: 2006/04/12 04:22:50;  author: alc;  state: Exp;  lines: +14 -41
Retire pmap_track_modified().  We no longer need it because we do not
create managed mappings within the clean submap.  To prevent regressions,
add assertions blocking the creation of managed mappings within the clean
submap.

Reviewed by: tegge
This commit is contained in:
Olivier Houchard 2006-04-22 22:51:32 +00:00
parent 4c0e8f41f6
commit 477d836c4d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=157970

View File

@ -1203,18 +1203,6 @@ pmap_dcache_wbinv_all(pmap_t pm)
cpu_dcache_wbinv_all();
}
/*
* this routine defines the region(s) of memory that should
* not be tested for the modified bit.
*/
static PMAP_INLINE int
pmap_track_modified(vm_offset_t va)
{
if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
return 1;
else
return 0;
}
/*
* PTE_SYNC_CURRENT:
*
@ -1559,8 +1547,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
ptep = &l2b->l2b_kva[l2pte_index(va)];
npte = opte = *ptep;
if (maskbits & (PVF_WRITE|PVF_MOD) &&
!pmap_track_modified(pv->pv_va)) {
if (maskbits & (PVF_WRITE|PVF_MOD)) {
if ((pv->pv_flags & PVF_NC)) {
/*
* Entry is not cacheable:
@ -1618,7 +1605,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
}
}
if (maskbits & PVF_REF && !pmap_track_modified(pv->pv_va)) {
if (maskbits & PVF_REF) {
if ((pv->pv_flags & PVF_NC) == 0 &&
(maskbits & (PVF_WRITE|PVF_MOD)) == 0) {
/*
@ -2056,10 +2043,8 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
goto out;
}
if (pmap_track_modified(pv->pv_va)) {
pg->md.pvh_attrs |= PVF_REF | PVF_MOD;
vm_page_dirty(pg);
}
pg->md.pvh_attrs |= PVF_REF | PVF_MOD;
vm_page_dirty(pg);
pv->pv_flags |= PVF_REF | PVF_MOD;
/*
@ -3267,8 +3252,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
f = pmap_modify_pv(pg, pm, sva,
PVF_WRITE, 0);
pmap_vac_me_harder(pg, pm, sva);
if (pmap_track_modified(sva))
vm_page_dirty(pg);
vm_page_dirty(pg);
} else
f = PVF_REF | PVF_EXEC;
@ -3391,8 +3375,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* writable from the outset.
*/
nflags |= PVF_MOD;
if (!(m->md.pvh_attrs & PVF_MOD) &&
pmap_track_modified(va))
if (!(m->md.pvh_attrs & PVF_MOD))
vm_page_dirty(m);
}
if (m && opte)
@ -3477,8 +3460,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((pve = pmap_get_pv_entry()) == NULL) {
panic("pmap_enter: no pv entries");
}
if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS)))
if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS))) {
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
pmap_enter_pv(m, pve, pmap, va, nflags);
}
}
/*
* Make sure userland mappings get the right permissions