mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-04 12:52:15 +00:00
1) Start using a cleaner and more consistant page allocator instead
of the various ad-hoc schemes. 2) When bringing in UPAGES, the pmap code needs to do another vm_page_lookup. 3) When appropriate, set the PG_A or PG_M bits a-priori to both avoid some processor errata, and to minimize redundant processor updating of page tables. 4) Modify pmap_protect so that it can only remove permissions (as it originally supported.) The additional capability is not needed. 5) Streamline read-only to read-write page mappings. 6) For pmap_copy_page, don't enable write mapping for source page. 7) Correct and clean-up pmap_incore. 8) Cluster initial kern_exec pagin. 9) Removal of some minor lint from kern_malloc. 10) Correct some ioopt code. 11) Remove some dead code from the MI swapout routine. 12) Correct vm_object_deallocate (to remove backing_object ref.) 13) Fix dead object handling, that had problems under heavy memory load. 14) Add minor vm_page_lookup improvements. 15) Some pages are not in objects, and make sure that the vm_page.c can properly support such pages. 16) Add some more page deficit handling. 17) Some minor code readability improvements.
This commit is contained in:
parent
47cfdb166d
commit
95461b450d
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=33109
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.180 1998/02/03 22:09:01 bde Exp $
|
||||
* $Id: pmap.c,v 1.181 1998/02/04 22:32:10 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -215,7 +215,6 @@ static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
|
||||
static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p));
|
||||
static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex));
|
||||
static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
|
||||
static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex));
|
||||
static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
|
||||
static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
|
||||
vm_offset_t pmap_kmem_choose(vm_offset_t addr) ;
|
||||
@ -818,19 +817,6 @@ pmap_kremove(va)
|
||||
invltlb_1pg(va);
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
pmap_page_alloc(object, pindex)
|
||||
vm_object_t object;
|
||||
vm_pindex_t pindex;
|
||||
{
|
||||
vm_page_t m;
|
||||
m = vm_page_alloc(object, pindex, VM_ALLOC_ZERO);
|
||||
if (m == NULL) {
|
||||
VM_WAIT;
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
pmap_page_lookup(object, pindex)
|
||||
vm_object_t object;
|
||||
@ -876,8 +862,10 @@ pmap_new_proc(p)
|
||||
if ((up = p->p_addr) == NULL) {
|
||||
up = (struct user *) kmem_alloc_pageable(kernel_map,
|
||||
UPAGES * PAGE_SIZE);
|
||||
#if !defined(MAX_PERF)
|
||||
if (up == NULL)
|
||||
panic("pmap_new_proc: u_map allocation failed");
|
||||
#endif
|
||||
p->p_addr = up;
|
||||
}
|
||||
|
||||
@ -888,10 +876,7 @@ pmap_new_proc(p)
|
||||
/*
|
||||
* Get a kernel stack page
|
||||
*/
|
||||
while ((m = vm_page_alloc(upobj,
|
||||
i, VM_ALLOC_NORMAL)) == NULL) {
|
||||
VM_WAIT;
|
||||
}
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
/*
|
||||
* Wire the page
|
||||
@ -912,8 +897,9 @@ pmap_new_proc(p)
|
||||
}
|
||||
}
|
||||
|
||||
m->flags &= ~(PG_ZERO|PG_BUSY);
|
||||
m->flags |= PG_MAPPED|PG_WRITEABLE;
|
||||
PAGE_WAKEUP(m);
|
||||
m->flags &= ~PG_ZERO;
|
||||
m->flags |= PG_MAPPED | PG_WRITEABLE;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
if (updateneeded)
|
||||
@ -940,6 +926,7 @@ pmap_dispose_proc(p)
|
||||
|
||||
if ((m = vm_page_lookup(upobj, i)) == NULL)
|
||||
panic("pmap_dispose_proc: upage already missing???");
|
||||
|
||||
m->flags |= PG_BUSY;
|
||||
|
||||
oldpte = *(ptek + i);
|
||||
@ -950,7 +937,7 @@ pmap_dispose_proc(p)
|
||||
vm_page_free(m);
|
||||
}
|
||||
|
||||
if (cpu_class < CPUCLASS_586)
|
||||
if (cpu_class <= CPUCLASS_386)
|
||||
invltlb();
|
||||
}
|
||||
|
||||
@ -986,43 +973,31 @@ void
|
||||
pmap_swapin_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i;
|
||||
int i,rv;
|
||||
vm_object_t upobj;
|
||||
vm_page_t m;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
for(i=0;i<UPAGES;i++) {
|
||||
int s;
|
||||
s = splvm();
|
||||
retry:
|
||||
if ((m = vm_page_lookup(upobj, i)) == NULL) {
|
||||
if ((m = vm_page_alloc(upobj, i, VM_ALLOC_NORMAL)) == NULL) {
|
||||
VM_WAIT;
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
if ((m->flags & PG_BUSY) || m->busy) {
|
||||
m->flags |= PG_WANTED;
|
||||
tsleep(m, PVM, "swinuw",0);
|
||||
goto retry;
|
||||
}
|
||||
m->flags |= PG_BUSY;
|
||||
}
|
||||
vm_page_wire(m);
|
||||
splx(s);
|
||||
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
|
||||
VM_PAGE_TO_PHYS(m));
|
||||
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
int rv;
|
||||
rv = vm_pager_get_pages(upobj, &m, 1, 0);
|
||||
#if !defined(MAX_PERF)
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid);
|
||||
#endif
|
||||
m = vm_page_lookup(upobj, i);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
|
||||
vm_page_wire(m);
|
||||
PAGE_WAKEUP(m);
|
||||
m->flags |= PG_MAPPED|PG_WRITEABLE;
|
||||
m->flags |= PG_MAPPED | PG_WRITEABLE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1175,9 +1150,8 @@ pmap_pinit(pmap)
|
||||
* allocate the page directory page
|
||||
*/
|
||||
retry:
|
||||
ptdpg = pmap_page_alloc( pmap->pm_pteobj, PTDPTDI);
|
||||
if (ptdpg == NULL)
|
||||
goto retry;
|
||||
ptdpg = vm_page_grab( pmap->pm_pteobj, PTDPTDI,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
ptdpg->wire_count = 1;
|
||||
++cnt.v_wire_count;
|
||||
@ -1224,10 +1198,8 @@ pmap_release_free_page(pmap, p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (p->flags & PG_WANTED) {
|
||||
p->flags &= ~PG_WANTED;
|
||||
wakeup(p);
|
||||
}
|
||||
p->flags |= PG_BUSY;
|
||||
splx(s);
|
||||
|
||||
/*
|
||||
* Remove the page table page from the processes address space.
|
||||
@ -1235,9 +1207,11 @@ pmap_release_free_page(pmap, p)
|
||||
pde[p->pindex] = 0;
|
||||
pmap->pm_stats.resident_count--;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (p->hold_count) {
|
||||
panic("pmap_release: freeing held page table page");
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Page directory pages need to have the kernel
|
||||
* stuff cleared, so they can go into the zero queue also.
|
||||
@ -1254,9 +1228,7 @@ pmap_release_free_page(pmap, p)
|
||||
if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
|
||||
pmap->pm_ptphint = NULL;
|
||||
|
||||
p->flags |= PG_BUSY;
|
||||
vm_page_free_zero(p);
|
||||
splx(s);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1271,28 +1243,12 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
{
|
||||
vm_offset_t pteva, ptepa;
|
||||
vm_page_t m;
|
||||
int needszero = 0;
|
||||
|
||||
/*
|
||||
* Find or fabricate a new pagetable page
|
||||
*/
|
||||
retry:
|
||||
m = vm_page_lookup(pmap->pm_pteobj, ptepindex);
|
||||
if (m == NULL) {
|
||||
m = pmap_page_alloc(pmap->pm_pteobj, ptepindex);
|
||||
if (m == NULL)
|
||||
goto retry;
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
needszero = 1;
|
||||
m->flags &= ~(PG_ZERO|PG_BUSY);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
} else {
|
||||
if ((m->flags & PG_BUSY) || m->busy) {
|
||||
m->flags |= PG_WANTED;
|
||||
tsleep(m, PVM, "ptewai", 0);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
m = vm_page_grab(pmap->pm_pteobj, ptepindex,
|
||||
VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
|
||||
if (m->queue != PQ_NONE) {
|
||||
int s = splvm();
|
||||
@ -1318,7 +1274,8 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
pmap->pm_stats.resident_count++;
|
||||
|
||||
ptepa = VM_PAGE_TO_PHYS(m);
|
||||
pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V);
|
||||
pmap->pm_pdir[ptepindex] =
|
||||
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A);
|
||||
|
||||
/*
|
||||
* Set the page table hint
|
||||
@ -1329,7 +1286,7 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
* Try to use the new mapping, but if we cannot, then
|
||||
* do it with the routine that maps the page explicitly.
|
||||
*/
|
||||
if (needszero) {
|
||||
if ((m->flags & PG_ZERO) == 0) {
|
||||
if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
|
||||
(((unsigned) PTDpde) & PG_FRAME)) {
|
||||
pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex);
|
||||
@ -1340,6 +1297,7 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
}
|
||||
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->flags &= ~(PG_ZERO|PG_BUSY);
|
||||
m->flags |= PG_MAPPED;
|
||||
|
||||
return m;
|
||||
@ -1480,10 +1438,11 @@ pmap_growkernel(vm_offset_t addr)
|
||||
/*
|
||||
* This index is bogus, but out of the way
|
||||
*/
|
||||
nkpg = vm_page_alloc(kernel_object,
|
||||
ptpidx, VM_ALLOC_SYSTEM);
|
||||
nkpg = vm_page_alloc(kernel_object, ptpidx, VM_ALLOC_SYSTEM);
|
||||
#if !defined(MAX_PERF)
|
||||
if (!nkpg)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
#endif
|
||||
|
||||
vm_page_wire(nkpg);
|
||||
vm_page_remove(nkpg);
|
||||
@ -1528,7 +1487,9 @@ pmap_destroy(pmap)
|
||||
count = --pmap->pm_count;
|
||||
if (count == 0) {
|
||||
pmap_release(pmap);
|
||||
#if !defined(MAX_PERF)
|
||||
panic("destroying a pmap is not yet implemented");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -1648,12 +1609,12 @@ pmap_remove_entry(pmap, ppv, va)
|
||||
|
||||
rtval = 0;
|
||||
if (pv) {
|
||||
|
||||
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
|
||||
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
|
||||
ppv->pv_list_count--;
|
||||
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
|
||||
}
|
||||
if (TAILQ_FIRST(&ppv->pv_list) == NULL)
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED | PG_WRITEABLE);
|
||||
|
||||
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
|
||||
free_pv_entry(pv);
|
||||
@ -1728,6 +1689,8 @@ pmap_remove_pte(pmap, ptq, va)
|
||||
if (pmap_track_modified(va))
|
||||
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
if (oldpte & PG_A)
|
||||
ppv->pv_vm_page->flags |= PG_REFERENCED;
|
||||
return pmap_remove_entry(pmap, ppv, va);
|
||||
} else {
|
||||
return pmap_unuse_pt(pmap, va, NULL);
|
||||
@ -1910,6 +1873,10 @@ pmap_remove_all(pa)
|
||||
*pte = 0;
|
||||
if (tpte & PG_W)
|
||||
pv->pv_pmap->pm_stats.wired_count--;
|
||||
|
||||
if (tpte & PG_A)
|
||||
ppv->pv_vm_page->flags |= PG_REFERENCED;
|
||||
|
||||
/*
|
||||
* Update the vm_page_t clean and reference bits.
|
||||
*/
|
||||
@ -1934,11 +1901,12 @@ pmap_remove_all(pa)
|
||||
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
|
||||
free_pv_entry(pv);
|
||||
}
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
|
||||
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED | PG_WRITEABLE);
|
||||
|
||||
if (update_needed)
|
||||
invltlb();
|
||||
|
||||
splx(s);
|
||||
return;
|
||||
}
|
||||
@ -1951,9 +1919,8 @@ void
|
||||
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
{
|
||||
register unsigned *ptbase;
|
||||
vm_offset_t pdnxt;
|
||||
vm_offset_t ptpaddr;
|
||||
vm_offset_t sindex, eindex;
|
||||
vm_offset_t pdnxt, ptpaddr;
|
||||
vm_pindex_t sindex, eindex;
|
||||
int anychanged;
|
||||
|
||||
|
||||
@ -1965,6 +1932,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
return;
|
||||
}
|
||||
|
||||
if (prot & VM_PROT_WRITE)
|
||||
return;
|
||||
|
||||
anychanged = 0;
|
||||
|
||||
ptbase = get_ptbase(pmap);
|
||||
@ -1999,27 +1969,32 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
|
||||
for (; sindex != pdnxt; sindex++) {
|
||||
|
||||
unsigned pbits = ptbase[sindex];
|
||||
unsigned pbits;
|
||||
pv_table_t *ppv;
|
||||
|
||||
if (prot & VM_PROT_WRITE) {
|
||||
if ((pbits & (PG_RW|PG_V)) == PG_V) {
|
||||
if (pbits & PG_MANAGED) {
|
||||
vm_page_t m = PHYS_TO_VM_PAGE(pbits);
|
||||
m->flags |= PG_WRITEABLE;
|
||||
m->object->flags |= OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY;
|
||||
}
|
||||
ptbase[sindex] = pbits | PG_RW;
|
||||
anychanged = 1;
|
||||
pbits = ptbase[sindex];
|
||||
|
||||
if (pbits & PG_MANAGED) {
|
||||
ppv = NULL;
|
||||
if (pbits & PG_A) {
|
||||
ppv = pa_to_pvh(pbits);
|
||||
ppv->pv_vm_page->flags |= PG_REFERENCED;
|
||||
pbits &= ~PG_A;
|
||||
}
|
||||
} else if (pbits & PG_RW) {
|
||||
if (pbits & PG_M) {
|
||||
vm_offset_t sva1 = i386_ptob(sindex);
|
||||
if ((pbits & PG_MANAGED) && pmap_track_modified(sva1)) {
|
||||
vm_page_t m = PHYS_TO_VM_PAGE(pbits);
|
||||
m->dirty = VM_PAGE_BITS_ALL;
|
||||
if (pmap_track_modified(i386_ptob(sindex))) {
|
||||
if (ppv == NULL)
|
||||
ppv = pa_to_pvh(pbits);
|
||||
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
|
||||
pbits &= ~PG_M;
|
||||
}
|
||||
}
|
||||
ptbase[sindex] = pbits & ~(PG_M|PG_RW);
|
||||
}
|
||||
|
||||
pbits &= ~PG_RW;
|
||||
|
||||
if (pbits != ptbase[sindex]) {
|
||||
ptbase[sindex] = pbits;
|
||||
anychanged = 1;
|
||||
}
|
||||
}
|
||||
@ -2089,6 +2064,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
#endif
|
||||
|
||||
pte = pmap_pte(pmap, va);
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
/*
|
||||
* Page Directory table entry not valid, we need a new PT page
|
||||
*/
|
||||
@ -2096,12 +2073,16 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
panic("pmap_enter: invalid page directory, pdir=%p, va=0x%lx\n",
|
||||
pmap->pm_pdir[PTDPTDI], va);
|
||||
}
|
||||
#endif
|
||||
|
||||
origpte = *(vm_offset_t *)pte;
|
||||
pa &= PG_FRAME;
|
||||
opa = origpte & PG_FRAME;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (origpte & PG_PS)
|
||||
panic("pmap_enter: attempted pmap_enter on 4MB page");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mapping has not changed, must be protection or wiring change.
|
||||
@ -2124,24 +2105,32 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Remove extra pte reference
|
||||
*/
|
||||
if (mpte)
|
||||
mpte->hold_count--;
|
||||
|
||||
if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) {
|
||||
if ((origpte & PG_RW) == 0) {
|
||||
*pte |= PG_RW;
|
||||
invltlb_1pg(va);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We might be turning off write access to the page,
|
||||
* so we go ahead and sense modify status.
|
||||
*/
|
||||
if (origpte & PG_MANAGED) {
|
||||
vm_page_t m;
|
||||
if (origpte & PG_M) {
|
||||
if (pmap_track_modified(va)) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
m->dirty = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
if ((origpte & PG_M) && pmap_track_modified(va)) {
|
||||
pv_table_t *ppv;
|
||||
ppv = pa_to_pvh(opa);
|
||||
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
pa |= PG_MANAGED;
|
||||
}
|
||||
|
||||
if (mpte)
|
||||
mpte->hold_count--;
|
||||
|
||||
goto validate;
|
||||
}
|
||||
/*
|
||||
@ -2151,8 +2140,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
if (opa) {
|
||||
int err;
|
||||
err = pmap_remove_pte(pmap, pte, va);
|
||||
#if !defined(MAX_PERF)
|
||||
if (err)
|
||||
panic("pmap_enter: pte vanished, va: 0x%x", va);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2242,8 +2233,10 @@ pmap_enter_quick(pmap, va, pa, mpte)
|
||||
* the hold count, and activate it.
|
||||
*/
|
||||
if (ptepa) {
|
||||
#if !defined(MAX_PERF)
|
||||
if (ptepa & PG_PS)
|
||||
panic("pmap_enter_quick: unexpected mapping into 4MB page");
|
||||
#endif
|
||||
if (pmap->pm_ptphint &&
|
||||
(pmap->pm_ptphint->pindex == ptepindex)) {
|
||||
mpte = pmap->pm_ptphint;
|
||||
@ -2498,6 +2491,9 @@ pmap_prefault(pmap, addra, entry)
|
||||
unsigned *pte;
|
||||
|
||||
addr = addra + pmap_prefault_pageorder[i];
|
||||
if (addr > addra + (PFFOR * PAGE_SIZE))
|
||||
addr = 0;
|
||||
|
||||
if (addr < starta || addr >= entry->end)
|
||||
continue;
|
||||
|
||||
@ -2614,8 +2610,10 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
|
||||
vm_offset_t srcptepaddr;
|
||||
unsigned ptepindex;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (addr >= UPT_MIN_ADDRESS)
|
||||
panic("pmap_copy: invalid to pmap_copy page tables\n");
|
||||
#endif
|
||||
|
||||
pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
|
||||
ptepindex = addr >> PDRSHIFT;
|
||||
@ -2661,7 +2659,7 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
|
||||
* accessed (referenced) bits
|
||||
* during the copy.
|
||||
*/
|
||||
*dst_pte = ptetemp & ~(PG_M|PG_A);
|
||||
*dst_pte = ptetemp & ~(PG_M | PG_A);
|
||||
dst_pmap->pm_stats.resident_count++;
|
||||
pmap_insert_entry(dst_pmap, addr,
|
||||
dstmpte,
|
||||
@ -2701,10 +2699,12 @@ pmap_zero_page(phys)
|
||||
vm_offset_t phys;
|
||||
{
|
||||
#ifdef SMP
|
||||
#if !defined(MAX_PERF)
|
||||
if (*(int *) prv_CMAP3)
|
||||
panic("pmap_zero_page: prv_CMAP3 busy");
|
||||
#endif
|
||||
|
||||
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME);
|
||||
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
invltlb_1pg((vm_offset_t) &prv_CPAGE3);
|
||||
|
||||
bzero(&prv_CPAGE3, PAGE_SIZE);
|
||||
@ -2712,10 +2712,12 @@ pmap_zero_page(phys)
|
||||
*(int *) prv_CMAP3 = 0;
|
||||
invltlb_1pg((vm_offset_t) &prv_CPAGE3);
|
||||
#else
|
||||
#if !defined(MAX_PERF)
|
||||
if (*(int *) CMAP2)
|
||||
panic("pmap_zero_page: CMAP busy");
|
||||
#endif
|
||||
|
||||
*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME);
|
||||
*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
bzero(CADDR2, PAGE_SIZE);
|
||||
*(int *) CMAP2 = 0;
|
||||
invltlb_1pg((vm_offset_t) CADDR2);
|
||||
@ -2734,13 +2736,15 @@ pmap_copy_page(src, dst)
|
||||
vm_offset_t dst;
|
||||
{
|
||||
#ifdef SMP
|
||||
#if !defined(MAX_PERF)
|
||||
if (*(int *) prv_CMAP1)
|
||||
panic("pmap_copy_page: prv_CMAP1 busy");
|
||||
if (*(int *) prv_CMAP2)
|
||||
panic("pmap_copy_page: prv_CMAP2 busy");
|
||||
#endif
|
||||
|
||||
*(int *) prv_CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
|
||||
*(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
|
||||
*(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
|
||||
*(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
|
||||
|
||||
invltlb_2pg( (vm_offset_t) &prv_CPAGE1, (vm_offset_t) &prv_CPAGE2);
|
||||
|
||||
@ -2750,11 +2754,13 @@ pmap_copy_page(src, dst)
|
||||
*(int *) prv_CMAP2 = 0;
|
||||
invltlb_2pg( (vm_offset_t) &prv_CPAGE1, (vm_offset_t) &prv_CPAGE2);
|
||||
#else
|
||||
#if !defined(MAX_PERF)
|
||||
if (*(int *) CMAP1 || *(int *) CMAP2)
|
||||
panic("pmap_copy_page: CMAP busy");
|
||||
#endif
|
||||
|
||||
*(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
|
||||
*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
|
||||
*(int *) CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
|
||||
*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
|
||||
|
||||
bcopy(CADDR1, CADDR2, PAGE_SIZE);
|
||||
|
||||
@ -2891,7 +2897,7 @@ pmap_remove_pages(pmap, sva, eva)
|
||||
ppv->pv_list_count--;
|
||||
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
|
||||
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
|
||||
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
|
||||
@ -3082,6 +3088,7 @@ pmap_ts_referenced(vm_offset_t pa)
|
||||
for (pv = TAILQ_FIRST(&ppv->pv_list);
|
||||
pv;
|
||||
pv = TAILQ_NEXT(pv, pv_list)) {
|
||||
|
||||
/*
|
||||
* if the bit being tested is the modified bit, then
|
||||
* mark clean_map and ptes as never
|
||||
@ -3094,11 +3101,15 @@ pmap_ts_referenced(vm_offset_t pa)
|
||||
if (pte == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (*pte & PG_A) {
|
||||
rtval++;
|
||||
*pte &= ~PG_A;
|
||||
if (rtval > 16)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
splx(s);
|
||||
if (rtval) {
|
||||
invltlb();
|
||||
@ -3187,8 +3198,10 @@ pmap_mapdev(pa, size)
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
|
||||
va = kmem_alloc_pageable(kernel_map, size);
|
||||
#if !defined(MAX_PERF)
|
||||
if (!va)
|
||||
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
|
||||
#endif
|
||||
|
||||
pa = pa & PG_FRAME;
|
||||
for (tmpva = va; size > 0;) {
|
||||
@ -3213,6 +3226,7 @@ pmap_mincore(pmap, addr)
|
||||
{
|
||||
|
||||
unsigned *ptep, pte;
|
||||
vm_page_t m;
|
||||
int val = 0;
|
||||
|
||||
ptep = pmap_pte(pmap, addr);
|
||||
@ -3221,10 +3235,18 @@ pmap_mincore(pmap, addr)
|
||||
}
|
||||
|
||||
if (pte = *ptep) {
|
||||
pv_table_t *ppv;
|
||||
vm_offset_t pa;
|
||||
|
||||
val = MINCORE_INCORE;
|
||||
if ((pte & PG_MANAGED) == 0)
|
||||
return val;
|
||||
|
||||
pa = pte & PG_FRAME;
|
||||
|
||||
ppv = pa_to_pvh((pa & PG_FRAME));
|
||||
m = ppv->pv_vm_page;
|
||||
|
||||
/*
|
||||
* Modified by us
|
||||
*/
|
||||
@ -3233,22 +3255,20 @@ pmap_mincore(pmap, addr)
|
||||
/*
|
||||
* Modified by someone
|
||||
*/
|
||||
else if (PHYS_TO_VM_PAGE(pa)->dirty ||
|
||||
pmap_is_modified(pa))
|
||||
else if (m->dirty || pmap_is_modified(pa))
|
||||
val |= MINCORE_MODIFIED_OTHER;
|
||||
/*
|
||||
* Referenced by us
|
||||
*/
|
||||
if (pte & PG_U)
|
||||
if (pte & PG_A)
|
||||
val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
|
||||
|
||||
/*
|
||||
* Referenced by someone
|
||||
*/
|
||||
else if ((PHYS_TO_VM_PAGE(pa)->flags & PG_REFERENCED) ||
|
||||
pmap_ts_referenced(pa)) {
|
||||
else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) {
|
||||
val |= MINCORE_REFERENCED_OTHER;
|
||||
PHYS_TO_VM_PAGE(pa)->flags |= PG_REFERENCED;
|
||||
m->flags |= PG_REFERENCED;
|
||||
}
|
||||
}
|
||||
return val;
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
|
||||
* $Id: vmparam.h,v 1.26 1997/06/25 20:18:58 tegge Exp $
|
||||
* $Id: vmparam.h,v 1.27 1997/10/27 00:38:46 jkh Exp $
|
||||
*/
|
||||
|
||||
|
||||
@ -120,4 +120,9 @@
|
||||
#define VM_KMEM_SIZE (32 * 1024 * 1024)
|
||||
#endif
|
||||
|
||||
/* initial pagein size of beginning of executable file */
|
||||
#ifndef VM_INITIAL_PAGEIN
|
||||
#define VM_INITIAL_PAGEIN 16
|
||||
#endif
|
||||
|
||||
#endif /* _MACHINE_VMPARAM_H_ */
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
|
||||
* $Id: spec_vnops.c,v 1.53 1998/01/06 05:21:23 dyson Exp $
|
||||
* $Id: spec_vnops.c,v 1.54 1998/02/04 22:32:51 eivind Exp $
|
||||
*/
|
||||
|
||||
#include "opt_diagnostic.h"
|
||||
@ -837,7 +837,10 @@ spec_getpages(ap)
|
||||
* now tell them that it is ok to use.
|
||||
*/
|
||||
if (!error) {
|
||||
vm_page_deactivate(ap->a_m[i]);
|
||||
if (ap->a_m[i]->flags & PG_WANTED)
|
||||
vm_page_activate(ap->a_m[i]);
|
||||
else
|
||||
vm_page_deactivate(ap->a_m[i]);
|
||||
PAGE_WAKEUP(ap->a_m[i]);
|
||||
} else
|
||||
vnode_pager_freepage(ap->a_m[i]);
|
||||
|
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.180 1998/02/03 22:09:01 bde Exp $
|
||||
* $Id: pmap.c,v 1.181 1998/02/04 22:32:10 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -215,7 +215,6 @@ static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
|
||||
static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p));
|
||||
static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex));
|
||||
static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
|
||||
static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex));
|
||||
static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
|
||||
static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
|
||||
vm_offset_t pmap_kmem_choose(vm_offset_t addr) ;
|
||||
@ -818,19 +817,6 @@ pmap_kremove(va)
|
||||
invltlb_1pg(va);
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
pmap_page_alloc(object, pindex)
|
||||
vm_object_t object;
|
||||
vm_pindex_t pindex;
|
||||
{
|
||||
vm_page_t m;
|
||||
m = vm_page_alloc(object, pindex, VM_ALLOC_ZERO);
|
||||
if (m == NULL) {
|
||||
VM_WAIT;
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
pmap_page_lookup(object, pindex)
|
||||
vm_object_t object;
|
||||
@ -876,8 +862,10 @@ pmap_new_proc(p)
|
||||
if ((up = p->p_addr) == NULL) {
|
||||
up = (struct user *) kmem_alloc_pageable(kernel_map,
|
||||
UPAGES * PAGE_SIZE);
|
||||
#if !defined(MAX_PERF)
|
||||
if (up == NULL)
|
||||
panic("pmap_new_proc: u_map allocation failed");
|
||||
#endif
|
||||
p->p_addr = up;
|
||||
}
|
||||
|
||||
@ -888,10 +876,7 @@ pmap_new_proc(p)
|
||||
/*
|
||||
* Get a kernel stack page
|
||||
*/
|
||||
while ((m = vm_page_alloc(upobj,
|
||||
i, VM_ALLOC_NORMAL)) == NULL) {
|
||||
VM_WAIT;
|
||||
}
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
/*
|
||||
* Wire the page
|
||||
@ -912,8 +897,9 @@ pmap_new_proc(p)
|
||||
}
|
||||
}
|
||||
|
||||
m->flags &= ~(PG_ZERO|PG_BUSY);
|
||||
m->flags |= PG_MAPPED|PG_WRITEABLE;
|
||||
PAGE_WAKEUP(m);
|
||||
m->flags &= ~PG_ZERO;
|
||||
m->flags |= PG_MAPPED | PG_WRITEABLE;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
if (updateneeded)
|
||||
@ -940,6 +926,7 @@ pmap_dispose_proc(p)
|
||||
|
||||
if ((m = vm_page_lookup(upobj, i)) == NULL)
|
||||
panic("pmap_dispose_proc: upage already missing???");
|
||||
|
||||
m->flags |= PG_BUSY;
|
||||
|
||||
oldpte = *(ptek + i);
|
||||
@ -950,7 +937,7 @@ pmap_dispose_proc(p)
|
||||
vm_page_free(m);
|
||||
}
|
||||
|
||||
if (cpu_class < CPUCLASS_586)
|
||||
if (cpu_class <= CPUCLASS_386)
|
||||
invltlb();
|
||||
}
|
||||
|
||||
@ -986,43 +973,31 @@ void
|
||||
pmap_swapin_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i;
|
||||
int i,rv;
|
||||
vm_object_t upobj;
|
||||
vm_page_t m;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
for(i=0;i<UPAGES;i++) {
|
||||
int s;
|
||||
s = splvm();
|
||||
retry:
|
||||
if ((m = vm_page_lookup(upobj, i)) == NULL) {
|
||||
if ((m = vm_page_alloc(upobj, i, VM_ALLOC_NORMAL)) == NULL) {
|
||||
VM_WAIT;
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
if ((m->flags & PG_BUSY) || m->busy) {
|
||||
m->flags |= PG_WANTED;
|
||||
tsleep(m, PVM, "swinuw",0);
|
||||
goto retry;
|
||||
}
|
||||
m->flags |= PG_BUSY;
|
||||
}
|
||||
vm_page_wire(m);
|
||||
splx(s);
|
||||
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
|
||||
VM_PAGE_TO_PHYS(m));
|
||||
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
int rv;
|
||||
rv = vm_pager_get_pages(upobj, &m, 1, 0);
|
||||
#if !defined(MAX_PERF)
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid);
|
||||
#endif
|
||||
m = vm_page_lookup(upobj, i);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
|
||||
vm_page_wire(m);
|
||||
PAGE_WAKEUP(m);
|
||||
m->flags |= PG_MAPPED|PG_WRITEABLE;
|
||||
m->flags |= PG_MAPPED | PG_WRITEABLE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1175,9 +1150,8 @@ pmap_pinit(pmap)
|
||||
* allocate the page directory page
|
||||
*/
|
||||
retry:
|
||||
ptdpg = pmap_page_alloc( pmap->pm_pteobj, PTDPTDI);
|
||||
if (ptdpg == NULL)
|
||||
goto retry;
|
||||
ptdpg = vm_page_grab( pmap->pm_pteobj, PTDPTDI,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
ptdpg->wire_count = 1;
|
||||
++cnt.v_wire_count;
|
||||
@ -1224,10 +1198,8 @@ pmap_release_free_page(pmap, p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (p->flags & PG_WANTED) {
|
||||
p->flags &= ~PG_WANTED;
|
||||
wakeup(p);
|
||||
}
|
||||
p->flags |= PG_BUSY;
|
||||
splx(s);
|
||||
|
||||
/*
|
||||
* Remove the page table page from the processes address space.
|
||||
@ -1235,9 +1207,11 @@ pmap_release_free_page(pmap, p)
|
||||
pde[p->pindex] = 0;
|
||||
pmap->pm_stats.resident_count--;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (p->hold_count) {
|
||||
panic("pmap_release: freeing held page table page");
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Page directory pages need to have the kernel
|
||||
* stuff cleared, so they can go into the zero queue also.
|
||||
@ -1254,9 +1228,7 @@ pmap_release_free_page(pmap, p)
|
||||
if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
|
||||
pmap->pm_ptphint = NULL;
|
||||
|
||||
p->flags |= PG_BUSY;
|
||||
vm_page_free_zero(p);
|
||||
splx(s);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1271,28 +1243,12 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
{
|
||||
vm_offset_t pteva, ptepa;
|
||||
vm_page_t m;
|
||||
int needszero = 0;
|
||||
|
||||
/*
|
||||
* Find or fabricate a new pagetable page
|
||||
*/
|
||||
retry:
|
||||
m = vm_page_lookup(pmap->pm_pteobj, ptepindex);
|
||||
if (m == NULL) {
|
||||
m = pmap_page_alloc(pmap->pm_pteobj, ptepindex);
|
||||
if (m == NULL)
|
||||
goto retry;
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
needszero = 1;
|
||||
m->flags &= ~(PG_ZERO|PG_BUSY);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
} else {
|
||||
if ((m->flags & PG_BUSY) || m->busy) {
|
||||
m->flags |= PG_WANTED;
|
||||
tsleep(m, PVM, "ptewai", 0);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
m = vm_page_grab(pmap->pm_pteobj, ptepindex,
|
||||
VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
|
||||
if (m->queue != PQ_NONE) {
|
||||
int s = splvm();
|
||||
@ -1318,7 +1274,8 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
pmap->pm_stats.resident_count++;
|
||||
|
||||
ptepa = VM_PAGE_TO_PHYS(m);
|
||||
pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V);
|
||||
pmap->pm_pdir[ptepindex] =
|
||||
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A);
|
||||
|
||||
/*
|
||||
* Set the page table hint
|
||||
@ -1329,7 +1286,7 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
* Try to use the new mapping, but if we cannot, then
|
||||
* do it with the routine that maps the page explicitly.
|
||||
*/
|
||||
if (needszero) {
|
||||
if ((m->flags & PG_ZERO) == 0) {
|
||||
if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
|
||||
(((unsigned) PTDpde) & PG_FRAME)) {
|
||||
pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex);
|
||||
@ -1340,6 +1297,7 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
}
|
||||
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->flags &= ~(PG_ZERO|PG_BUSY);
|
||||
m->flags |= PG_MAPPED;
|
||||
|
||||
return m;
|
||||
@ -1480,10 +1438,11 @@ pmap_growkernel(vm_offset_t addr)
|
||||
/*
|
||||
* This index is bogus, but out of the way
|
||||
*/
|
||||
nkpg = vm_page_alloc(kernel_object,
|
||||
ptpidx, VM_ALLOC_SYSTEM);
|
||||
nkpg = vm_page_alloc(kernel_object, ptpidx, VM_ALLOC_SYSTEM);
|
||||
#if !defined(MAX_PERF)
|
||||
if (!nkpg)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
#endif
|
||||
|
||||
vm_page_wire(nkpg);
|
||||
vm_page_remove(nkpg);
|
||||
@ -1528,7 +1487,9 @@ pmap_destroy(pmap)
|
||||
count = --pmap->pm_count;
|
||||
if (count == 0) {
|
||||
pmap_release(pmap);
|
||||
#if !defined(MAX_PERF)
|
||||
panic("destroying a pmap is not yet implemented");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -1648,12 +1609,12 @@ pmap_remove_entry(pmap, ppv, va)
|
||||
|
||||
rtval = 0;
|
||||
if (pv) {
|
||||
|
||||
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
|
||||
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
|
||||
ppv->pv_list_count--;
|
||||
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
|
||||
}
|
||||
if (TAILQ_FIRST(&ppv->pv_list) == NULL)
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED | PG_WRITEABLE);
|
||||
|
||||
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
|
||||
free_pv_entry(pv);
|
||||
@ -1728,6 +1689,8 @@ pmap_remove_pte(pmap, ptq, va)
|
||||
if (pmap_track_modified(va))
|
||||
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
if (oldpte & PG_A)
|
||||
ppv->pv_vm_page->flags |= PG_REFERENCED;
|
||||
return pmap_remove_entry(pmap, ppv, va);
|
||||
} else {
|
||||
return pmap_unuse_pt(pmap, va, NULL);
|
||||
@ -1910,6 +1873,10 @@ pmap_remove_all(pa)
|
||||
*pte = 0;
|
||||
if (tpte & PG_W)
|
||||
pv->pv_pmap->pm_stats.wired_count--;
|
||||
|
||||
if (tpte & PG_A)
|
||||
ppv->pv_vm_page->flags |= PG_REFERENCED;
|
||||
|
||||
/*
|
||||
* Update the vm_page_t clean and reference bits.
|
||||
*/
|
||||
@ -1934,11 +1901,12 @@ pmap_remove_all(pa)
|
||||
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
|
||||
free_pv_entry(pv);
|
||||
}
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
|
||||
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED | PG_WRITEABLE);
|
||||
|
||||
if (update_needed)
|
||||
invltlb();
|
||||
|
||||
splx(s);
|
||||
return;
|
||||
}
|
||||
@ -1951,9 +1919,8 @@ void
|
||||
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
{
|
||||
register unsigned *ptbase;
|
||||
vm_offset_t pdnxt;
|
||||
vm_offset_t ptpaddr;
|
||||
vm_offset_t sindex, eindex;
|
||||
vm_offset_t pdnxt, ptpaddr;
|
||||
vm_pindex_t sindex, eindex;
|
||||
int anychanged;
|
||||
|
||||
|
||||
@ -1965,6 +1932,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
return;
|
||||
}
|
||||
|
||||
if (prot & VM_PROT_WRITE)
|
||||
return;
|
||||
|
||||
anychanged = 0;
|
||||
|
||||
ptbase = get_ptbase(pmap);
|
||||
@ -1999,27 +1969,32 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
|
||||
for (; sindex != pdnxt; sindex++) {
|
||||
|
||||
unsigned pbits = ptbase[sindex];
|
||||
unsigned pbits;
|
||||
pv_table_t *ppv;
|
||||
|
||||
if (prot & VM_PROT_WRITE) {
|
||||
if ((pbits & (PG_RW|PG_V)) == PG_V) {
|
||||
if (pbits & PG_MANAGED) {
|
||||
vm_page_t m = PHYS_TO_VM_PAGE(pbits);
|
||||
m->flags |= PG_WRITEABLE;
|
||||
m->object->flags |= OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY;
|
||||
}
|
||||
ptbase[sindex] = pbits | PG_RW;
|
||||
anychanged = 1;
|
||||
pbits = ptbase[sindex];
|
||||
|
||||
if (pbits & PG_MANAGED) {
|
||||
ppv = NULL;
|
||||
if (pbits & PG_A) {
|
||||
ppv = pa_to_pvh(pbits);
|
||||
ppv->pv_vm_page->flags |= PG_REFERENCED;
|
||||
pbits &= ~PG_A;
|
||||
}
|
||||
} else if (pbits & PG_RW) {
|
||||
if (pbits & PG_M) {
|
||||
vm_offset_t sva1 = i386_ptob(sindex);
|
||||
if ((pbits & PG_MANAGED) && pmap_track_modified(sva1)) {
|
||||
vm_page_t m = PHYS_TO_VM_PAGE(pbits);
|
||||
m->dirty = VM_PAGE_BITS_ALL;
|
||||
if (pmap_track_modified(i386_ptob(sindex))) {
|
||||
if (ppv == NULL)
|
||||
ppv = pa_to_pvh(pbits);
|
||||
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
|
||||
pbits &= ~PG_M;
|
||||
}
|
||||
}
|
||||
ptbase[sindex] = pbits & ~(PG_M|PG_RW);
|
||||
}
|
||||
|
||||
pbits &= ~PG_RW;
|
||||
|
||||
if (pbits != ptbase[sindex]) {
|
||||
ptbase[sindex] = pbits;
|
||||
anychanged = 1;
|
||||
}
|
||||
}
|
||||
@ -2089,6 +2064,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
#endif
|
||||
|
||||
pte = pmap_pte(pmap, va);
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
/*
|
||||
* Page Directory table entry not valid, we need a new PT page
|
||||
*/
|
||||
@ -2096,12 +2073,16 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
panic("pmap_enter: invalid page directory, pdir=%p, va=0x%lx\n",
|
||||
pmap->pm_pdir[PTDPTDI], va);
|
||||
}
|
||||
#endif
|
||||
|
||||
origpte = *(vm_offset_t *)pte;
|
||||
pa &= PG_FRAME;
|
||||
opa = origpte & PG_FRAME;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (origpte & PG_PS)
|
||||
panic("pmap_enter: attempted pmap_enter on 4MB page");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mapping has not changed, must be protection or wiring change.
|
||||
@ -2124,24 +2105,32 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Remove extra pte reference
|
||||
*/
|
||||
if (mpte)
|
||||
mpte->hold_count--;
|
||||
|
||||
if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) {
|
||||
if ((origpte & PG_RW) == 0) {
|
||||
*pte |= PG_RW;
|
||||
invltlb_1pg(va);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We might be turning off write access to the page,
|
||||
* so we go ahead and sense modify status.
|
||||
*/
|
||||
if (origpte & PG_MANAGED) {
|
||||
vm_page_t m;
|
||||
if (origpte & PG_M) {
|
||||
if (pmap_track_modified(va)) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
m->dirty = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
if ((origpte & PG_M) && pmap_track_modified(va)) {
|
||||
pv_table_t *ppv;
|
||||
ppv = pa_to_pvh(opa);
|
||||
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
pa |= PG_MANAGED;
|
||||
}
|
||||
|
||||
if (mpte)
|
||||
mpte->hold_count--;
|
||||
|
||||
goto validate;
|
||||
}
|
||||
/*
|
||||
@ -2151,8 +2140,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
if (opa) {
|
||||
int err;
|
||||
err = pmap_remove_pte(pmap, pte, va);
|
||||
#if !defined(MAX_PERF)
|
||||
if (err)
|
||||
panic("pmap_enter: pte vanished, va: 0x%x", va);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2242,8 +2233,10 @@ pmap_enter_quick(pmap, va, pa, mpte)
|
||||
* the hold count, and activate it.
|
||||
*/
|
||||
if (ptepa) {
|
||||
#if !defined(MAX_PERF)
|
||||
if (ptepa & PG_PS)
|
||||
panic("pmap_enter_quick: unexpected mapping into 4MB page");
|
||||
#endif
|
||||
if (pmap->pm_ptphint &&
|
||||
(pmap->pm_ptphint->pindex == ptepindex)) {
|
||||
mpte = pmap->pm_ptphint;
|
||||
@ -2498,6 +2491,9 @@ pmap_prefault(pmap, addra, entry)
|
||||
unsigned *pte;
|
||||
|
||||
addr = addra + pmap_prefault_pageorder[i];
|
||||
if (addr > addra + (PFFOR * PAGE_SIZE))
|
||||
addr = 0;
|
||||
|
||||
if (addr < starta || addr >= entry->end)
|
||||
continue;
|
||||
|
||||
@ -2614,8 +2610,10 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
|
||||
vm_offset_t srcptepaddr;
|
||||
unsigned ptepindex;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (addr >= UPT_MIN_ADDRESS)
|
||||
panic("pmap_copy: invalid to pmap_copy page tables\n");
|
||||
#endif
|
||||
|
||||
pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
|
||||
ptepindex = addr >> PDRSHIFT;
|
||||
@ -2661,7 +2659,7 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
|
||||
* accessed (referenced) bits
|
||||
* during the copy.
|
||||
*/
|
||||
*dst_pte = ptetemp & ~(PG_M|PG_A);
|
||||
*dst_pte = ptetemp & ~(PG_M | PG_A);
|
||||
dst_pmap->pm_stats.resident_count++;
|
||||
pmap_insert_entry(dst_pmap, addr,
|
||||
dstmpte,
|
||||
@ -2701,10 +2699,12 @@ pmap_zero_page(phys)
|
||||
vm_offset_t phys;
|
||||
{
|
||||
#ifdef SMP
|
||||
#if !defined(MAX_PERF)
|
||||
if (*(int *) prv_CMAP3)
|
||||
panic("pmap_zero_page: prv_CMAP3 busy");
|
||||
#endif
|
||||
|
||||
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME);
|
||||
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
invltlb_1pg((vm_offset_t) &prv_CPAGE3);
|
||||
|
||||
bzero(&prv_CPAGE3, PAGE_SIZE);
|
||||
@ -2712,10 +2712,12 @@ pmap_zero_page(phys)
|
||||
*(int *) prv_CMAP3 = 0;
|
||||
invltlb_1pg((vm_offset_t) &prv_CPAGE3);
|
||||
#else
|
||||
#if !defined(MAX_PERF)
|
||||
if (*(int *) CMAP2)
|
||||
panic("pmap_zero_page: CMAP busy");
|
||||
#endif
|
||||
|
||||
*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME);
|
||||
*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
bzero(CADDR2, PAGE_SIZE);
|
||||
*(int *) CMAP2 = 0;
|
||||
invltlb_1pg((vm_offset_t) CADDR2);
|
||||
@ -2734,13 +2736,15 @@ pmap_copy_page(src, dst)
|
||||
vm_offset_t dst;
|
||||
{
|
||||
#ifdef SMP
|
||||
#if !defined(MAX_PERF)
|
||||
if (*(int *) prv_CMAP1)
|
||||
panic("pmap_copy_page: prv_CMAP1 busy");
|
||||
if (*(int *) prv_CMAP2)
|
||||
panic("pmap_copy_page: prv_CMAP2 busy");
|
||||
#endif
|
||||
|
||||
*(int *) prv_CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
|
||||
*(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
|
||||
*(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
|
||||
*(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
|
||||
|
||||
invltlb_2pg( (vm_offset_t) &prv_CPAGE1, (vm_offset_t) &prv_CPAGE2);
|
||||
|
||||
@ -2750,11 +2754,13 @@ pmap_copy_page(src, dst)
|
||||
*(int *) prv_CMAP2 = 0;
|
||||
invltlb_2pg( (vm_offset_t) &prv_CPAGE1, (vm_offset_t) &prv_CPAGE2);
|
||||
#else
|
||||
#if !defined(MAX_PERF)
|
||||
if (*(int *) CMAP1 || *(int *) CMAP2)
|
||||
panic("pmap_copy_page: CMAP busy");
|
||||
#endif
|
||||
|
||||
*(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
|
||||
*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
|
||||
*(int *) CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
|
||||
*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
|
||||
|
||||
bcopy(CADDR1, CADDR2, PAGE_SIZE);
|
||||
|
||||
@ -2891,7 +2897,7 @@ pmap_remove_pages(pmap, sva, eva)
|
||||
ppv->pv_list_count--;
|
||||
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
|
||||
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
|
||||
ppv->pv_vm_page->flags &= ~(PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
|
||||
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
|
||||
@ -3082,6 +3088,7 @@ pmap_ts_referenced(vm_offset_t pa)
|
||||
for (pv = TAILQ_FIRST(&ppv->pv_list);
|
||||
pv;
|
||||
pv = TAILQ_NEXT(pv, pv_list)) {
|
||||
|
||||
/*
|
||||
* if the bit being tested is the modified bit, then
|
||||
* mark clean_map and ptes as never
|
||||
@ -3094,11 +3101,15 @@ pmap_ts_referenced(vm_offset_t pa)
|
||||
if (pte == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (*pte & PG_A) {
|
||||
rtval++;
|
||||
*pte &= ~PG_A;
|
||||
if (rtval > 16)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
splx(s);
|
||||
if (rtval) {
|
||||
invltlb();
|
||||
@ -3187,8 +3198,10 @@ pmap_mapdev(pa, size)
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
|
||||
va = kmem_alloc_pageable(kernel_map, size);
|
||||
#if !defined(MAX_PERF)
|
||||
if (!va)
|
||||
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
|
||||
#endif
|
||||
|
||||
pa = pa & PG_FRAME;
|
||||
for (tmpva = va; size > 0;) {
|
||||
@ -3213,6 +3226,7 @@ pmap_mincore(pmap, addr)
|
||||
{
|
||||
|
||||
unsigned *ptep, pte;
|
||||
vm_page_t m;
|
||||
int val = 0;
|
||||
|
||||
ptep = pmap_pte(pmap, addr);
|
||||
@ -3221,10 +3235,18 @@ pmap_mincore(pmap, addr)
|
||||
}
|
||||
|
||||
if (pte = *ptep) {
|
||||
pv_table_t *ppv;
|
||||
vm_offset_t pa;
|
||||
|
||||
val = MINCORE_INCORE;
|
||||
if ((pte & PG_MANAGED) == 0)
|
||||
return val;
|
||||
|
||||
pa = pte & PG_FRAME;
|
||||
|
||||
ppv = pa_to_pvh((pa & PG_FRAME));
|
||||
m = ppv->pv_vm_page;
|
||||
|
||||
/*
|
||||
* Modified by us
|
||||
*/
|
||||
@ -3233,22 +3255,20 @@ pmap_mincore(pmap, addr)
|
||||
/*
|
||||
* Modified by someone
|
||||
*/
|
||||
else if (PHYS_TO_VM_PAGE(pa)->dirty ||
|
||||
pmap_is_modified(pa))
|
||||
else if (m->dirty || pmap_is_modified(pa))
|
||||
val |= MINCORE_MODIFIED_OTHER;
|
||||
/*
|
||||
* Referenced by us
|
||||
*/
|
||||
if (pte & PG_U)
|
||||
if (pte & PG_A)
|
||||
val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
|
||||
|
||||
/*
|
||||
* Referenced by someone
|
||||
*/
|
||||
else if ((PHYS_TO_VM_PAGE(pa)->flags & PG_REFERENCED) ||
|
||||
pmap_ts_referenced(pa)) {
|
||||
else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) {
|
||||
val |= MINCORE_REFERENCED_OTHER;
|
||||
PHYS_TO_VM_PAGE(pa)->flags |= PG_REFERENCED;
|
||||
m->flags |= PG_REFERENCED;
|
||||
}
|
||||
}
|
||||
return val;
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
|
||||
* $Id: vmparam.h,v 1.26 1997/06/25 20:18:58 tegge Exp $
|
||||
* $Id: vmparam.h,v 1.27 1997/10/27 00:38:46 jkh Exp $
|
||||
*/
|
||||
|
||||
|
||||
@ -120,4 +120,9 @@
|
||||
#define VM_KMEM_SIZE (32 * 1024 * 1024)
|
||||
#endif
|
||||
|
||||
/* initial pagein size of beginning of executable file */
|
||||
#ifndef VM_INITIAL_PAGEIN
|
||||
#define VM_INITIAL_PAGEIN 16
|
||||
#endif
|
||||
|
||||
#endif /* _MACHINE_VMPARAM_H_ */
|
||||
|
@ -23,7 +23,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: kern_exec.c,v 1.74 1998/01/11 21:35:38 dyson Exp $
|
||||
* $Id: kern_exec.c,v 1.75 1998/02/04 22:32:31 eivind Exp $
|
||||
*/
|
||||
|
||||
#include "opt_diagnostic.h"
|
||||
@ -359,8 +359,9 @@ int
|
||||
exec_map_first_page(imgp)
|
||||
struct image_params *imgp;
|
||||
{
|
||||
int s;
|
||||
vm_page_t m;
|
||||
int s, rv, i;
|
||||
int initial_pagein;
|
||||
vm_page_t ma[VM_INITIAL_PAGEIN];
|
||||
vm_object_t object;
|
||||
|
||||
|
||||
@ -371,40 +372,45 @@ exec_map_first_page(imgp)
|
||||
object = imgp->vp->v_object;
|
||||
s = splvm();
|
||||
|
||||
retry:
|
||||
m = vm_page_lookup(object, 0);
|
||||
if (m == NULL) {
|
||||
m = vm_page_alloc(object, 0, VM_ALLOC_NORMAL);
|
||||
if (m == NULL) {
|
||||
VM_WAIT;
|
||||
goto retry;
|
||||
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
|
||||
initial_pagein = VM_INITIAL_PAGEIN;
|
||||
if (initial_pagein > object->size)
|
||||
initial_pagein = object->size;
|
||||
for (i = 1; i < initial_pagein; i++) {
|
||||
if (ma[i] = vm_page_lookup(object, i)) {
|
||||
if ((ma[i]->flags & PG_BUSY) || ma[i]->busy)
|
||||
break;
|
||||
if (ma[i]->valid)
|
||||
break;
|
||||
ma[i]->flags |= PG_BUSY;
|
||||
} else {
|
||||
ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL);
|
||||
if (ma[i] == NULL)
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if ((m->flags & PG_BUSY) || m->busy) {
|
||||
m->flags |= PG_WANTED;
|
||||
tsleep(m, PVM, "execpw", 0);
|
||||
goto retry;
|
||||
}
|
||||
initial_pagein = i;
|
||||
|
||||
m->flags |= PG_BUSY;
|
||||
rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
|
||||
ma[0] = vm_page_lookup(object, 0);
|
||||
|
||||
if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
|
||||
int rv;
|
||||
rv = vm_pager_get_pages(object, &m, 1, 0);
|
||||
if (rv != VM_PAGER_OK) {
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_deactivate(m);
|
||||
PAGE_WAKEUP(m);
|
||||
if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
|
||||
vm_page_protect(ma[0], VM_PROT_NONE);
|
||||
vm_page_deactivate(ma[0]);
|
||||
PAGE_WAKEUP(ma[0]);
|
||||
splx(s);
|
||||
return EIO;
|
||||
}
|
||||
}
|
||||
|
||||
vm_page_wire(m);
|
||||
PAGE_WAKEUP(m);
|
||||
vm_page_wire(ma[0]);
|
||||
PAGE_WAKEUP(ma[0]);
|
||||
splx(s);
|
||||
|
||||
pmap_kenter((vm_offset_t) imgp->image_header, VM_PAGE_TO_PHYS(m));
|
||||
imgp->firstpage = m;
|
||||
pmap_kenter((vm_offset_t) imgp->image_header, VM_PAGE_TO_PHYS(ma[0]));
|
||||
imgp->firstpage = ma[0];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
|
||||
* $Id: kern_malloc.c,v 1.39 1998/01/22 17:29:47 dyson Exp $
|
||||
* $Id: kern_malloc.c,v 1.40 1998/02/04 22:32:32 eivind Exp $
|
||||
*/
|
||||
|
||||
#include "opt_diagnostic.h"
|
||||
@ -197,7 +197,7 @@ malloc(size, type, flags)
|
||||
kbp->kb_next = ((struct freelist *)va)->next;
|
||||
#ifdef DIAGNOSTIC
|
||||
freep = (struct freelist *)va;
|
||||
savedtype = type->ks_shortdesc;
|
||||
savedtype = (char *) type->ks_shortdesc;
|
||||
#if BYTE_ORDER == BIG_ENDIAN
|
||||
freep->type = (struct malloc_type *)WEIRD_ADDR >> 16;
|
||||
#endif
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
|
||||
* $Id: kern_subr.c,v 1.16 1998/01/22 17:29:49 dyson Exp $
|
||||
* $Id: kern_subr.c,v 1.17 1998/02/04 22:32:34 eivind Exp $
|
||||
*/
|
||||
|
||||
#include "opt_diagnostic.h"
|
||||
@ -193,8 +193,9 @@ uioread(n, uio, obj, nread)
|
||||
int error;
|
||||
|
||||
*nread = 0;
|
||||
if (vfs_ioopt > 1)
|
||||
if (vfs_ioopt < 2)
|
||||
return 0;
|
||||
|
||||
error = 0;
|
||||
|
||||
while (n > 0 && uio->uio_resid) {
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
|
||||
* $Id: vfs_subr.c,v 1.127 1998/01/31 01:17:58 tegge Exp $
|
||||
* $Id: vfs_subr.c,v 1.128 1998/02/04 22:32:40 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -1170,6 +1170,7 @@ vclean(vp, flags, p)
|
||||
struct proc *p;
|
||||
{
|
||||
int active;
|
||||
vm_object_t obj;
|
||||
|
||||
/*
|
||||
* Check to see if the vnode is in use. If so we have to reference it
|
||||
@ -1199,18 +1200,18 @@ vclean(vp, flags, p)
|
||||
* Clean out any buffers associated with the vnode.
|
||||
*/
|
||||
vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
|
||||
if (vp->v_object) {
|
||||
if (vp->v_object->ref_count == 0) {
|
||||
if (obj = vp->v_object) {
|
||||
if (obj->ref_count == 0) {
|
||||
/*
|
||||
* This is a normal way of shutting down the object/vnode
|
||||
* association.
|
||||
*/
|
||||
vm_object_terminate(vp->v_object);
|
||||
vm_object_terminate(obj);
|
||||
} else {
|
||||
/*
|
||||
* Woe to the process that tries to page now :-).
|
||||
*/
|
||||
vm_pager_deallocate(vp->v_object);
|
||||
vm_pager_deallocate(obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
|
||||
* $Id: vfs_subr.c,v 1.127 1998/01/31 01:17:58 tegge Exp $
|
||||
* $Id: vfs_subr.c,v 1.128 1998/02/04 22:32:40 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -1170,6 +1170,7 @@ vclean(vp, flags, p)
|
||||
struct proc *p;
|
||||
{
|
||||
int active;
|
||||
vm_object_t obj;
|
||||
|
||||
/*
|
||||
* Check to see if the vnode is in use. If so we have to reference it
|
||||
@ -1199,18 +1200,18 @@ vclean(vp, flags, p)
|
||||
* Clean out any buffers associated with the vnode.
|
||||
*/
|
||||
vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
|
||||
if (vp->v_object) {
|
||||
if (vp->v_object->ref_count == 0) {
|
||||
if (obj = vp->v_object) {
|
||||
if (obj->ref_count == 0) {
|
||||
/*
|
||||
* This is a normal way of shutting down the object/vnode
|
||||
* association.
|
||||
*/
|
||||
vm_object_terminate(vp->v_object);
|
||||
vm_object_terminate(obj);
|
||||
} else {
|
||||
/*
|
||||
* Woe to the process that tries to page now :-).
|
||||
*/
|
||||
vm_pager_deallocate(vp->v_object);
|
||||
vm_pager_deallocate(obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
|
||||
* $Id: spec_vnops.c,v 1.53 1998/01/06 05:21:23 dyson Exp $
|
||||
* $Id: spec_vnops.c,v 1.54 1998/02/04 22:32:51 eivind Exp $
|
||||
*/
|
||||
|
||||
#include "opt_diagnostic.h"
|
||||
@ -837,7 +837,10 @@ spec_getpages(ap)
|
||||
* now tell them that it is ok to use.
|
||||
*/
|
||||
if (!error) {
|
||||
vm_page_deactivate(ap->a_m[i]);
|
||||
if (ap->a_m[i]->flags & PG_WANTED)
|
||||
vm_page_activate(ap->a_m[i]);
|
||||
else
|
||||
vm_page_deactivate(ap->a_m[i]);
|
||||
PAGE_WAKEUP(ap->a_m[i]);
|
||||
} else
|
||||
vnode_pager_freepage(ap->a_m[i]);
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
|
||||
* $Id: ufs_readwrite.c,v 1.40 1998/01/22 17:30:22 dyson Exp $
|
||||
* $Id: ufs_readwrite.c,v 1.41 1998/01/30 11:34:05 phk Exp $
|
||||
*/
|
||||
|
||||
#define BLKSIZE(a, b, c) blksize(a, b, c)
|
||||
@ -74,6 +74,7 @@ READ(ap)
|
||||
int error;
|
||||
u_short mode;
|
||||
int seqcount;
|
||||
vm_object_t object;
|
||||
|
||||
vp = ap->a_vp;
|
||||
seqcount = ap->a_ioflag >> 16;
|
||||
@ -95,32 +96,56 @@ READ(ap)
|
||||
if ((u_int64_t)uio->uio_offset > fs->fs_maxfilesize)
|
||||
return (EFBIG);
|
||||
|
||||
for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
|
||||
object = vp->v_object;
|
||||
|
||||
bytesinfile = ip->i_size - uio->uio_offset;
|
||||
if (bytesinfile <= 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (object)
|
||||
vm_object_reference(object);
|
||||
#if 1
|
||||
if ((vfs_ioopt > 1) && object) {
|
||||
int nread, toread;
|
||||
toread = uio->uio_resid;
|
||||
if (toread > bytesinfile)
|
||||
toread = bytesinfile;
|
||||
if (toread >= PAGE_SIZE) {
|
||||
error = uioread(toread, uio, object, &nread);
|
||||
if ((uio->uio_resid == 0) || (error != 0)) {
|
||||
if (!(vp->v_mount->mnt_flag & MNT_NOATIME))
|
||||
ip->i_flag |= IN_ACCESS;
|
||||
if (object)
|
||||
vm_object_vndeallocate(object);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
|
||||
if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
|
||||
break;
|
||||
|
||||
#if 0
|
||||
if ((vfs_ioopt > 1) && vp->v_object) {
|
||||
#if 1
|
||||
if ((vfs_ioopt > 1) && object) {
|
||||
int nread, toread;
|
||||
vm_object_reference(vp->v_object);
|
||||
toread = uio->uio_resid;
|
||||
if (toread > bytesinfile)
|
||||
toread = bytesinfile;
|
||||
if (toread >= PAGE_SIZE) {
|
||||
error = uioread(toread, uio, vp->v_object, &nread);
|
||||
error = uioread(toread, uio, object, &nread);
|
||||
if ((uio->uio_resid == 0) || (error != 0)) {
|
||||
if (!(vp->v_mount->mnt_flag & MNT_NOATIME))
|
||||
ip->i_flag |= IN_ACCESS;
|
||||
vm_object_vndeallocate(vp->v_object);
|
||||
if (object)
|
||||
vm_object_vndeallocate(object);
|
||||
return error;
|
||||
}
|
||||
if (nread > 0) {
|
||||
vm_object_vndeallocate(vp->v_object);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
vm_object_vndeallocate(vp->v_object);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -167,13 +192,13 @@ READ(ap)
|
||||
xfersize = size;
|
||||
}
|
||||
|
||||
if (vfs_ioopt &&
|
||||
if (vfs_ioopt && object &&
|
||||
(bp->b_flags & B_VMIO) &&
|
||||
((blkoffset & PAGE_MASK) == 0) &&
|
||||
((xfersize & PAGE_MASK) == 0)) {
|
||||
error =
|
||||
uiomoveco((char *)bp->b_data + blkoffset,
|
||||
(int)xfersize, uio, vp->v_object);
|
||||
(int)xfersize, uio, object);
|
||||
} else {
|
||||
error =
|
||||
uiomove((char *)bp->b_data + blkoffset,
|
||||
@ -187,6 +212,8 @@ READ(ap)
|
||||
}
|
||||
if (bp != NULL)
|
||||
bqrelse(bp);
|
||||
if (object)
|
||||
vm_object_vndeallocate(object);
|
||||
if (!(vp->v_mount->mnt_flag & MNT_NOATIME))
|
||||
ip->i_flag |= IN_ACCESS;
|
||||
return (error);
|
||||
@ -214,6 +241,7 @@ WRITE(ap)
|
||||
off_t osize;
|
||||
int blkoffset, error, extended, flags, ioflag, resid, size, xfersize;
|
||||
struct timeval tv;
|
||||
vm_object_t object;
|
||||
|
||||
extended = 0;
|
||||
ioflag = ap->a_ioflag;
|
||||
@ -221,6 +249,10 @@ WRITE(ap)
|
||||
vp = ap->a_vp;
|
||||
ip = VTOI(vp);
|
||||
|
||||
object = vp->v_object;
|
||||
if (object)
|
||||
vm_object_reference(object);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (uio->uio_rw != UIO_WRITE)
|
||||
panic("%s: mode", WRITE_S);
|
||||
@ -230,8 +262,11 @@ WRITE(ap)
|
||||
case VREG:
|
||||
if (ioflag & IO_APPEND)
|
||||
uio->uio_offset = ip->i_size;
|
||||
if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
|
||||
if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) {
|
||||
if (object)
|
||||
vm_object_vndeallocate(object);
|
||||
return (EPERM);
|
||||
}
|
||||
/* FALLTHROUGH */
|
||||
case VLNK:
|
||||
break;
|
||||
@ -245,8 +280,11 @@ WRITE(ap)
|
||||
|
||||
fs = ip->I_FS;
|
||||
if (uio->uio_offset < 0 ||
|
||||
(u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
|
||||
(u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) {
|
||||
if (object)
|
||||
vm_object_vndeallocate(object);
|
||||
return (EFBIG);
|
||||
}
|
||||
/*
|
||||
* Maybe this should be above the vnode op call, but so long as
|
||||
* file servers have no limits, I don't think it matters.
|
||||
@ -256,6 +294,8 @@ WRITE(ap)
|
||||
uio->uio_offset + uio->uio_resid >
|
||||
p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
|
||||
psignal(p, SIGXFSZ);
|
||||
if (object)
|
||||
vm_object_vndeallocate(object);
|
||||
return (EFBIG);
|
||||
}
|
||||
|
||||
@ -263,6 +303,12 @@ WRITE(ap)
|
||||
osize = ip->i_size;
|
||||
flags = ioflag & IO_SYNC ? B_SYNC : 0;
|
||||
|
||||
if (object && (object->flags & OBJ_OPT)) {
|
||||
vm_freeze_copyopts(object,
|
||||
OFF_TO_IDX(uio->uio_offset),
|
||||
OFF_TO_IDX(uio->uio_offset + uio->uio_resid + PAGE_MASK));
|
||||
}
|
||||
|
||||
for (error = 0; uio->uio_resid > 0;) {
|
||||
lbn = lblkno(fs, uio->uio_offset);
|
||||
blkoffset = blkoff(fs, uio->uio_offset);
|
||||
@ -292,14 +338,6 @@ WRITE(ap)
|
||||
if (size < xfersize)
|
||||
xfersize = size;
|
||||
|
||||
if (vfs_ioopt &&
|
||||
vp->v_object && (vp->v_object->flags & OBJ_OPT) &&
|
||||
vp->v_object->shadow_count) {
|
||||
vm_freeze_copyopts(vp->v_object,
|
||||
OFF_TO_IDX(uio->uio_offset),
|
||||
OFF_TO_IDX(uio->uio_offset + size));
|
||||
}
|
||||
|
||||
error =
|
||||
uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
|
||||
if (ioflag & IO_VMIO)
|
||||
@ -343,6 +381,9 @@ WRITE(ap)
|
||||
if (!error)
|
||||
VN_POLLEVENT(vp, POLLWRITE | (extended ? POLLEXTEND : 0));
|
||||
|
||||
if (object)
|
||||
vm_object_vndeallocate(object);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_glue.c,v 1.70 1998/01/22 17:30:34 dyson Exp $
|
||||
* $Id: vm_glue.c,v 1.71 1998/02/04 22:33:44 eivind Exp $
|
||||
*/
|
||||
|
||||
#include "opt_diagnostic.h"
|
||||
@ -214,6 +214,7 @@ vm_fork(p1, p2, flags)
|
||||
}
|
||||
|
||||
while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
|
||||
vm_pageout_deficit += (UPAGES + VM_INITIAL_PAGEIN);
|
||||
VM_WAIT;
|
||||
}
|
||||
|
||||
@ -332,13 +333,12 @@ scheduler(dummy)
|
||||
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
|
||||
if (p->p_stat == SRUN &&
|
||||
(p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
|
||||
int mempri;
|
||||
|
||||
pri = p->p_swtime + p->p_slptime;
|
||||
if ((p->p_flag & P_SWAPINREQ) == 0) {
|
||||
pri -= p->p_nice * 8;
|
||||
}
|
||||
mempri = pri > 0 ? pri : 0;
|
||||
|
||||
/*
|
||||
* if this process is higher priority and there is
|
||||
* enough space, then select this process instead of
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_kern.c,v 1.41 1998/01/31 11:56:35 dyson Exp $
|
||||
* $Id: vm_kern.c,v 1.42 1998/02/04 22:33:45 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -180,10 +180,8 @@ kmem_alloc(map, size)
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
vm_page_t mem;
|
||||
|
||||
while ((mem = vm_page_alloc(kernel_object,
|
||||
OFF_TO_IDX(offset + i), VM_ALLOC_ZERO)) == NULL) {
|
||||
VM_WAIT;
|
||||
}
|
||||
mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
|
||||
VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
if ((mem->flags & PG_ZERO) == 0)
|
||||
vm_page_zero_fill(mem);
|
||||
mem->flags &= ~(PG_BUSY|PG_ZERO);
|
||||
@ -365,7 +363,7 @@ kmem_malloc(map, size, waitflag)
|
||||
PAGE_WAKEUP(m);
|
||||
pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
|
||||
VM_PROT_ALL, 1);
|
||||
m->flags |= PG_MAPPED|PG_WRITEABLE;
|
||||
m->flags |= PG_MAPPED | PG_WRITEABLE | PG_REFERENCED;
|
||||
}
|
||||
vm_map_unlock(map);
|
||||
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_map.c,v 1.109 1998/01/31 11:56:38 dyson Exp $
|
||||
* $Id: vm_map.c,v 1.110 1998/02/04 22:33:47 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -175,7 +175,6 @@ static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
|
||||
static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
|
||||
static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
|
||||
vm_map_entry_t));
|
||||
static vm_page_t vm_freeze_page_alloc __P((vm_object_t, vm_pindex_t));
|
||||
|
||||
void
|
||||
vm_map_startup()
|
||||
@ -2607,27 +2606,6 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* local routine to allocate a page for an object.
|
||||
*/
|
||||
static vm_page_t
|
||||
vm_freeze_page_alloc(object, pindex)
|
||||
vm_object_t object;
|
||||
vm_pindex_t pindex;
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
while ((m = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL)) == NULL) {
|
||||
VM_WAIT;
|
||||
if (m = vm_page_lookup(object, pindex))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
m->valid = 0;
|
||||
m->dirty = 0;
|
||||
return m;
|
||||
}
|
||||
|
||||
/*
|
||||
* Performs the copy_on_write operations necessary to allow the virtual copies
|
||||
* into user space to work. This has to be called for write(2) system calls
|
||||
@ -2638,7 +2616,7 @@ vm_freeze_copyopts(object, froma, toa)
|
||||
vm_object_t object;
|
||||
vm_pindex_t froma, toa;
|
||||
{
|
||||
int s;
|
||||
int s, rv;
|
||||
vm_object_t robject, robjectn;
|
||||
vm_pindex_t idx, from, to;
|
||||
|
||||
@ -2674,64 +2652,32 @@ vm_freeze_copyopts(object, froma, toa)
|
||||
for (idx = 0; idx < robject->size; idx++) {
|
||||
|
||||
m_outretry:
|
||||
m_out = vm_page_lookup(robject, idx);
|
||||
if( m_out && (m_out->flags & PG_BUSY)) {
|
||||
s = splvm();
|
||||
while (m_out && (m_out->flags & PG_BUSY)) {
|
||||
m_out->flags |= PG_WANTED;
|
||||
tsleep(m_out, PVM, "pwtfrz", 0);
|
||||
splx(s);
|
||||
goto m_outretry;
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
if (m_out == NULL) {
|
||||
m_out = vm_freeze_page_alloc(robject, idx);
|
||||
if (m_out == NULL)
|
||||
goto m_outretry;
|
||||
}
|
||||
m_out = vm_page_grab(robject, idx,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
if (m_out->valid == 0) {
|
||||
m_out->flags |= PG_BUSY;
|
||||
m_inretry:
|
||||
m_in = vm_page_lookup(object, bo_pindex + idx);
|
||||
if (m_in == NULL) {
|
||||
int rv;
|
||||
m_in = vm_freeze_page_alloc(object, bo_pindex + idx);
|
||||
if (m_in == NULL)
|
||||
goto m_inretry;
|
||||
m_in = vm_page_grab(object, bo_pindex + idx,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (m_in->valid == 0) {
|
||||
rv = vm_pager_get_pages(object, &m_in, 1, 0);
|
||||
if (rv != VM_PAGER_OK) {
|
||||
printf("vm_freeze_copyopts: cannot read page from file: %x\n", m_in->pindex);
|
||||
continue;
|
||||
}
|
||||
} else if(m_in->busy || (m_in->flags & PG_BUSY)) {
|
||||
s = splvm();
|
||||
while (m_in && (m_in->busy || (m_in->flags & PG_BUSY))) {
|
||||
m_in->flags |= PG_WANTED;
|
||||
tsleep(m_in, PVM, "pwtfrz", 0);
|
||||
splx(s);
|
||||
goto m_inretry;
|
||||
}
|
||||
splx(s);
|
||||
if (m_in == NULL) {
|
||||
goto m_inretry;
|
||||
}
|
||||
vm_page_deactivate(m_in);
|
||||
}
|
||||
|
||||
m_in->flags |= PG_BUSY;
|
||||
vm_page_protect(m_in, VM_PROT_NONE);
|
||||
pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
|
||||
m_out->valid = VM_PAGE_BITS_ALL;
|
||||
m_out->valid = m_in->valid;
|
||||
m_out->dirty = VM_PAGE_BITS_ALL;
|
||||
|
||||
vm_page_deactivate(m_out);
|
||||
vm_page_deactivate(m_in);
|
||||
vm_page_activate(m_out);
|
||||
|
||||
PAGE_WAKEUP(m_out);
|
||||
PAGE_WAKEUP(m_in);
|
||||
}
|
||||
PAGE_WAKEUP(m_out);
|
||||
}
|
||||
|
||||
object->shadow_count--;
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_object.c,v 1.109 1998/01/31 11:56:41 dyson Exp $
|
||||
* $Id: vm_object.c,v 1.110 1998/02/04 22:33:50 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -93,9 +93,7 @@
|
||||
#include <vm/vm_zone.h>
|
||||
|
||||
static void vm_object_qcollapse __P((vm_object_t object));
|
||||
#ifdef not_used
|
||||
static void vm_object_deactivate_pages __P((vm_object_t));
|
||||
#endif
|
||||
static void vm_object_dispose __P((vm_object_t));
|
||||
|
||||
/*
|
||||
* Virtual memory objects maintain the actual data
|
||||
@ -249,7 +247,9 @@ vm_object_reference(object)
|
||||
object->ref_count++;
|
||||
if (object->type == OBJT_VNODE) {
|
||||
while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) {
|
||||
#if !defined(MAX_PERF)
|
||||
printf("vm_object_reference: delay in getting object\n");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -381,6 +381,7 @@ vm_object_deallocate(object)
|
||||
if (temp->ref_count == 0)
|
||||
temp->flags &= ~OBJ_OPT;
|
||||
temp->generation++;
|
||||
object->backing_object = NULL;
|
||||
}
|
||||
vm_object_terminate(object);
|
||||
/* unlocks and deallocates object */
|
||||
@ -418,7 +419,7 @@ vm_object_terminate(object)
|
||||
|
||||
#if defined(DIAGNOSTIC)
|
||||
if (object->paging_in_progress != 0)
|
||||
panic("vm_object_deallocate: pageout in progress");
|
||||
panic("vm_object_terminate: pageout in progress");
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -441,28 +442,34 @@ vm_object_terminate(object)
|
||||
vp = (struct vnode *) object->handle;
|
||||
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
|
||||
|
||||
} else {
|
||||
} else if (object->type != OBJT_DEAD) {
|
||||
|
||||
/*
|
||||
* Now free the pages. For internal objects, this also removes them
|
||||
* from paging queues.
|
||||
*/
|
||||
while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
|
||||
#if !defined(MAX_PERF)
|
||||
if (p->busy || (p->flags & PG_BUSY))
|
||||
printf("vm_object_terminate: freeing busy page\n");
|
||||
#endif
|
||||
p->flags |= PG_BUSY;
|
||||
vm_page_free(p);
|
||||
cnt.v_pfree++;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Let the pager know object is dead.
|
||||
*/
|
||||
vm_pager_deallocate(object);
|
||||
if (object->type != OBJT_DEAD) {
|
||||
/*
|
||||
* Let the pager know object is dead.
|
||||
*/
|
||||
vm_pager_deallocate(object);
|
||||
}
|
||||
|
||||
if (object->ref_count == 0) {
|
||||
vm_object_dispose(object);
|
||||
if ((object->type != OBJT_DEAD) || (object->resident_page_count == 0))
|
||||
vm_object_dispose(object);
|
||||
}
|
||||
}
|
||||
|
||||
@ -471,7 +478,7 @@ vm_object_terminate(object)
|
||||
*
|
||||
* Dispose the object.
|
||||
*/
|
||||
void
|
||||
static void
|
||||
vm_object_dispose(object)
|
||||
vm_object_t object;
|
||||
{
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_object.h,v 1.43 1998/01/22 17:30:40 dyson Exp $
|
||||
* $Id: vm_object.h,v 1.44 1998/01/31 11:56:43 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -171,7 +171,6 @@ void vm_object_collapse __P((vm_object_t));
|
||||
void vm_object_copy __P((vm_object_t, vm_pindex_t, vm_object_t *, vm_pindex_t *, boolean_t *));
|
||||
void vm_object_deallocate __P((vm_object_t));
|
||||
void vm_object_terminate __P((vm_object_t));
|
||||
void vm_object_dispose __P((vm_object_t));
|
||||
void vm_object_vndeallocate __P((vm_object_t));
|
||||
void vm_object_init __P((void));
|
||||
void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
|
||||
|
113
sys/vm/vm_page.c
113
sys/vm/vm_page.c
@ -34,7 +34,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_page.c,v 1.89 1998/01/31 20:30:18 dyson Exp $
|
||||
* $Id: vm_page.c,v 1.90 1998/02/04 22:33:52 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -96,10 +96,10 @@ static vm_page_t vm_page_select_free __P((vm_object_t object,
|
||||
* page structure.
|
||||
*/
|
||||
|
||||
static int vm_page_bucket_generation; /* generation id for buckets */
|
||||
static struct pglist *vm_page_buckets; /* Array of buckets */
|
||||
static int vm_page_bucket_count; /* How big is array? */
|
||||
static int vm_page_hash_mask; /* Mask for hash function */
|
||||
static volatile int vm_page_bucket_generation;
|
||||
|
||||
struct pglist vm_page_queue_free[PQ_L2_SIZE] = {0};
|
||||
struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {0};
|
||||
@ -355,7 +355,6 @@ vm_page_startup(starta, enda, vaddr)
|
||||
pa += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
return (mapped);
|
||||
}
|
||||
|
||||
@ -391,8 +390,10 @@ vm_page_insert(m, object, pindex)
|
||||
{
|
||||
register struct pglist *bucket;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (m->flags & PG_TABLED)
|
||||
panic("vm_page_insert: already inserted");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Record the object/offset pair in this page
|
||||
@ -446,13 +447,16 @@ vm_page_remove(m)
|
||||
register vm_page_t m;
|
||||
{
|
||||
register struct pglist *bucket;
|
||||
vm_object_t object;
|
||||
|
||||
if (!(m->flags & PG_TABLED))
|
||||
return;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if ((m->flags & PG_BUSY) == 0) {
|
||||
panic("vm_page_remove: page not busy");
|
||||
}
|
||||
#endif
|
||||
|
||||
m->flags &= ~PG_BUSY;
|
||||
if (m->flags & PG_WANTED) {
|
||||
@ -460,14 +464,15 @@ vm_page_remove(m)
|
||||
wakeup(m);
|
||||
}
|
||||
|
||||
if (m->object->page_hint == m)
|
||||
m->object->page_hint = NULL;
|
||||
object = m->object;
|
||||
if (object->page_hint == m)
|
||||
object->page_hint = NULL;
|
||||
|
||||
if (m->wire_count)
|
||||
m->object->wire_count--;
|
||||
object->wire_count--;
|
||||
|
||||
if ((m->queue - m->pc) == PQ_CACHE)
|
||||
m->object->cache_count--;
|
||||
object->cache_count--;
|
||||
|
||||
/*
|
||||
* Remove from the object_object/offset hash table
|
||||
@ -481,14 +486,15 @@ vm_page_remove(m)
|
||||
* Now remove from the object's list of backed pages.
|
||||
*/
|
||||
|
||||
TAILQ_REMOVE(&m->object->memq, m, listq);
|
||||
TAILQ_REMOVE(&object->memq, m, listq);
|
||||
|
||||
/*
|
||||
* And show that the object has one fewer resident page.
|
||||
*/
|
||||
|
||||
m->object->resident_page_count--;
|
||||
m->object->generation++;
|
||||
object->resident_page_count--;
|
||||
object->generation++;
|
||||
m->object = NULL;
|
||||
|
||||
m->flags &= ~PG_TABLED;
|
||||
}
|
||||
@ -509,25 +515,30 @@ vm_page_lookup(object, pindex)
|
||||
{
|
||||
register vm_page_t m;
|
||||
register struct pglist *bucket;
|
||||
int curgeneration;
|
||||
int generation;
|
||||
int s;
|
||||
|
||||
/*
|
||||
* Search the hash table for this object/offset pair
|
||||
*/
|
||||
|
||||
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
|
||||
if (object->page_hint && (object->page_hint->pindex == pindex) &&
|
||||
(object->page_hint->object == object))
|
||||
return object->page_hint;
|
||||
|
||||
restart:
|
||||
curgeneration = vm_page_bucket_generation;
|
||||
retry:
|
||||
generation = vm_page_bucket_generation;
|
||||
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
|
||||
for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) {
|
||||
if (curgeneration != vm_page_bucket_generation)
|
||||
goto restart;
|
||||
if ((m->object == object) && (m->pindex == pindex)) {
|
||||
if (vm_page_bucket_generation != generation)
|
||||
goto retry;
|
||||
m->object->page_hint = m;
|
||||
return (m);
|
||||
}
|
||||
}
|
||||
if (vm_page_bucket_generation != generation)
|
||||
goto retry;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -569,7 +580,8 @@ vm_page_unqueue_nowakeup(m)
|
||||
(*pq->cnt)--;
|
||||
(*pq->lcnt)--;
|
||||
if ((queue - m->pc) == PQ_CACHE) {
|
||||
m->object->cache_count--;
|
||||
if (m->object)
|
||||
m->object->cache_count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -593,7 +605,8 @@ vm_page_unqueue(m)
|
||||
if ((cnt.v_cache_count + cnt.v_free_count) <
|
||||
(cnt.v_free_reserved + cnt.v_cache_min))
|
||||
pagedaemon_wakeup();
|
||||
m->object->cache_count--;
|
||||
if (m->object)
|
||||
m->object->cache_count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -890,7 +903,10 @@ vm_page_alloc(object, pindex, page_req)
|
||||
break;
|
||||
|
||||
default:
|
||||
m = NULL;
|
||||
#if !defined(MAX_PERF)
|
||||
panic("vm_page_alloc: invalid allocation class");
|
||||
#endif
|
||||
}
|
||||
|
||||
queue = m->queue;
|
||||
@ -1045,12 +1061,15 @@ vm_page_freechk_and_unqueue(m)
|
||||
}
|
||||
|
||||
if (m->wire_count != 0) {
|
||||
#if !defined(MAX_PERF)
|
||||
if (m->wire_count > 1) {
|
||||
panic("vm_page_free: invalid wire count (%d), pindex: 0x%x",
|
||||
m->wire_count, m->pindex);
|
||||
}
|
||||
#endif
|
||||
m->wire_count = 0;
|
||||
m->object->wire_count--;
|
||||
if (m->object)
|
||||
m->object->wire_count--;
|
||||
cnt.v_wire_count--;
|
||||
}
|
||||
|
||||
@ -1223,7 +1242,9 @@ vm_page_unwire(m)
|
||||
cnt.v_active_count++;
|
||||
}
|
||||
} else {
|
||||
#if !defined(MAX_PERF)
|
||||
panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
|
||||
#endif
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
@ -1278,17 +1299,21 @@ vm_page_cache(m)
|
||||
{
|
||||
int s;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if ((m->flags & PG_BUSY) || m->busy || m->wire_count) {
|
||||
printf("vm_page_cache: attempting to cache busy page\n");
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if ((m->queue - m->pc) == PQ_CACHE)
|
||||
return;
|
||||
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
#if !defined(MAX_PERF)
|
||||
if (m->dirty != 0) {
|
||||
panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex);
|
||||
}
|
||||
#endif
|
||||
s = splvm();
|
||||
vm_page_unqueue_nowakeup(m);
|
||||
m->queue = PQ_CACHE + m->pc;
|
||||
@ -1300,6 +1325,54 @@ vm_page_cache(m)
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab a page, waiting until we are waken up due to the page
|
||||
* changing state. We keep on waiting, if the page continues
|
||||
* to be in the object. If the page doesn't exist, allocate it.
|
||||
*/
|
||||
vm_page_t
|
||||
vm_page_grab(object, pindex, allocflags)
|
||||
vm_object_t object;
|
||||
vm_pindex_t pindex;
|
||||
int allocflags;
|
||||
{
|
||||
|
||||
vm_page_t m;
|
||||
int s, generation;
|
||||
|
||||
retrylookup:
|
||||
if ((m = vm_page_lookup(object, pindex)) != NULL) {
|
||||
if (m->busy || (m->flags & PG_BUSY)) {
|
||||
generation = object->generation;
|
||||
|
||||
s = splvm();
|
||||
while ((object->generation == generation) &&
|
||||
(m->busy || (m->flags & PG_BUSY))) {
|
||||
m->flags |= PG_WANTED | PG_REFERENCED;
|
||||
tsleep(m, PVM, "pgrbwt", 0);
|
||||
if ((allocflags & VM_ALLOC_RETRY) == 0) {
|
||||
splx(s);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
goto retrylookup;
|
||||
} else {
|
||||
m->flags |= PG_BUSY;
|
||||
return m;
|
||||
}
|
||||
}
|
||||
|
||||
m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
|
||||
if (m == NULL) {
|
||||
VM_WAIT;
|
||||
if ((allocflags & VM_ALLOC_RETRY) == 0)
|
||||
return NULL;
|
||||
goto retrylookup;
|
||||
}
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
/*
|
||||
* mapping function for valid bits or for dirty bits in
|
||||
@ -1400,12 +1473,14 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
|
||||
vm_page_t pga = vm_page_array;
|
||||
|
||||
size = round_page(size);
|
||||
#if !defined(MAX_PERF)
|
||||
if (size == 0)
|
||||
panic("contigmalloc1: size must not be 0");
|
||||
if ((alignment & (alignment - 1)) != 0)
|
||||
panic("contigmalloc1: alignment must be a power of 2");
|
||||
if ((boundary & (boundary - 1)) != 0)
|
||||
panic("contigmalloc1: boundary must be a power of 2");
|
||||
#endif
|
||||
|
||||
start = 0;
|
||||
for (pass = 0; pass <= 1; pass++) {
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_page.h,v 1.35 1997/02/22 09:48:32 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -284,13 +284,15 @@ extern vm_offset_t last_phys_addr; /* physical address for last_page */
|
||||
#define VM_PAGE_BITS_ALL 0xffff
|
||||
#endif
|
||||
|
||||
#define VM_ALLOC_NORMAL 0
|
||||
#define VM_ALLOC_INTERRUPT 1
|
||||
#define VM_ALLOC_SYSTEM 2
|
||||
#define VM_ALLOC_ZERO 3
|
||||
#define VM_ALLOC_NORMAL 0
|
||||
#define VM_ALLOC_INTERRUPT 1
|
||||
#define VM_ALLOC_SYSTEM 2
|
||||
#define VM_ALLOC_ZERO 3
|
||||
#define VM_ALLOC_RETRY 0x80
|
||||
|
||||
void vm_page_activate __P((vm_page_t));
|
||||
vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
|
||||
vm_page_t vm_page_grab __P((vm_object_t, vm_pindex_t, int));
|
||||
void vm_page_cache __P((register vm_page_t));
|
||||
static __inline void vm_page_copy __P((vm_page_t, vm_page_t));
|
||||
void vm_page_deactivate __P((vm_page_t));
|
||||
|
@ -65,7 +65,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_pageout.c,v 1.110 1998/01/31 11:56:49 dyson Exp $
|
||||
* $Id: vm_pageout.c,v 1.111 1998/02/04 22:33:56 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -369,7 +369,7 @@ vm_pageout_flush(mc, count, sync)
|
||||
{
|
||||
register vm_object_t object;
|
||||
int pageout_status[count];
|
||||
int anyok = 0;
|
||||
int numpagedout = 0;
|
||||
int i;
|
||||
|
||||
object = mc[0]->object;
|
||||
@ -384,10 +384,10 @@ vm_pageout_flush(mc, count, sync)
|
||||
|
||||
switch (pageout_status[i]) {
|
||||
case VM_PAGER_OK:
|
||||
anyok++;
|
||||
numpagedout++;
|
||||
break;
|
||||
case VM_PAGER_PEND:
|
||||
anyok++;
|
||||
numpagedout++;
|
||||
break;
|
||||
case VM_PAGER_BAD:
|
||||
/*
|
||||
@ -423,7 +423,7 @@ vm_pageout_flush(mc, count, sync)
|
||||
PAGE_WAKEUP(mt);
|
||||
}
|
||||
}
|
||||
return anyok;
|
||||
return numpagedout;
|
||||
}
|
||||
|
||||
#if !defined(NO_SWAPPING)
|
||||
@ -644,6 +644,7 @@ vm_pageout_scan()
|
||||
|
||||
pages_freed = 0;
|
||||
addl_page_shortage = vm_pageout_deficit;
|
||||
vm_pageout_deficit = 0;
|
||||
|
||||
if (max_page_launder == 0)
|
||||
max_page_launder = 1;
|
||||
|
@ -38,7 +38,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
|
||||
* $Id: vnode_pager.c,v 1.81 1998/01/31 11:56:53 dyson Exp $
|
||||
* $Id: vnode_pager.c,v 1.82 1998/02/04 22:34:03 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -181,9 +181,9 @@ vnode_pager_dealloc(object)
|
||||
}
|
||||
|
||||
object->handle = NULL;
|
||||
object->type = OBJT_DEFAULT;
|
||||
object->type = OBJT_DEAD;
|
||||
vp->v_object = NULL;
|
||||
vp->v_flag &= ~(VTEXT|VOBJBUF);
|
||||
vp->v_flag &= ~(VTEXT | VOBJBUF);
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
@ -763,7 +763,10 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
|
||||
* now tell them that it is ok to use
|
||||
*/
|
||||
if (!error) {
|
||||
vm_page_deactivate(m[i]);
|
||||
if (m[i]->flags & PG_WANTED)
|
||||
vm_page_activate(m[i]);
|
||||
else
|
||||
vm_page_deactivate(m[i]);
|
||||
PAGE_WAKEUP(m[i]);
|
||||
} else {
|
||||
vnode_pager_freepage(m[i]);
|
||||
@ -880,8 +883,10 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
|
||||
if (i < ncount) {
|
||||
rtvals[i] = VM_PAGER_OK;
|
||||
}
|
||||
if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED))
|
||||
if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED)) {
|
||||
vm_page_activate(m[i]);
|
||||
wakeup(m[i]);
|
||||
}
|
||||
}
|
||||
return rtvals[0];
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user