mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-25 16:13:17 +00:00
Tie up some loose ends in vnode/object management. Remove an unneeded
config option in pmap. Fix a problem with faulting in pages. Clean-up some loose ends in swap pager memory management. The system should be much more stable, but all subtile bugs aren't fixed yet.
This commit is contained in:
parent
d806a7ad47
commit
4722175765
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=32585
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.175 1997/12/22 00:36:48 dyson Exp $
|
||||
* $Id: pmap.c,v 1.176 1997/12/22 10:06:09 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -115,8 +115,6 @@
|
||||
#define PMAP_INLINE
|
||||
#endif
|
||||
|
||||
#define PTPHINT
|
||||
|
||||
/*
|
||||
* Get PDEs and PTEs for user/kernel address space
|
||||
*/
|
||||
@ -1051,10 +1049,8 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
|
||||
invltlb_1pg(pteva);
|
||||
}
|
||||
|
||||
#if defined(PTPHINT)
|
||||
if (pmap->pm_ptphint == m)
|
||||
pmap->pm_ptphint = NULL;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the page is finally unwired, simply free it.
|
||||
@ -1100,7 +1096,6 @@ pmap_unuse_pt(pmap, va, mpte)
|
||||
|
||||
if (mpte == NULL) {
|
||||
ptepindex = (va >> PDRSHIFT);
|
||||
#if defined(PTPHINT)
|
||||
if (pmap->pm_ptphint &&
|
||||
(pmap->pm_ptphint->pindex == ptepindex)) {
|
||||
mpte = pmap->pm_ptphint;
|
||||
@ -1108,9 +1103,6 @@ pmap_unuse_pt(pmap, va, mpte)
|
||||
mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
pmap->pm_ptphint = mpte;
|
||||
}
|
||||
#else
|
||||
mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
#endif
|
||||
}
|
||||
|
||||
return pmap_unwire_pte_hold(pmap, mpte);
|
||||
@ -1244,11 +1236,9 @@ pmap_release_free_page(pmap, p)
|
||||
pmap_kremove((vm_offset_t) pmap->pm_pdir);
|
||||
}
|
||||
|
||||
#if defined(PTPHINT)
|
||||
if (pmap->pm_ptphint &&
|
||||
(pmap->pm_ptphint->pindex == p->pindex))
|
||||
pmap->pm_ptphint = NULL;
|
||||
#endif
|
||||
|
||||
vm_page_free_zero(p);
|
||||
splx(s);
|
||||
@ -1315,12 +1305,10 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
ptepa = VM_PAGE_TO_PHYS(m);
|
||||
pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V);
|
||||
|
||||
#if defined(PTPHINT)
|
||||
/*
|
||||
* Set the page table hint
|
||||
*/
|
||||
pmap->pm_ptphint = m;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Try to use the new mapping, but if we cannot, then
|
||||
@ -1376,7 +1364,6 @@ pmap_allocpte(pmap, va)
|
||||
* hold count, and activate it.
|
||||
*/
|
||||
if (ptepa) {
|
||||
#if defined(PTPHINT)
|
||||
/*
|
||||
* In order to get the page table page, try the
|
||||
* hint first.
|
||||
@ -1388,9 +1375,6 @@ pmap_allocpte(pmap, va)
|
||||
m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
pmap->pm_ptphint = m;
|
||||
}
|
||||
#else
|
||||
m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
#endif
|
||||
++m->hold_count;
|
||||
return m;
|
||||
}
|
||||
@ -2252,7 +2236,6 @@ pmap_enter_quick(pmap, va, pa, mpte)
|
||||
if (ptepa) {
|
||||
if (ptepa & PG_PS)
|
||||
panic("pmap_enter_quick: unexpected mapping into 4MB page");
|
||||
#if defined(PTPHINT)
|
||||
if (pmap->pm_ptphint &&
|
||||
(pmap->pm_ptphint->pindex == ptepindex)) {
|
||||
mpte = pmap->pm_ptphint;
|
||||
@ -2260,9 +2243,6 @@ pmap_enter_quick(pmap, va, pa, mpte)
|
||||
mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
pmap->pm_ptphint = mpte;
|
||||
}
|
||||
#else
|
||||
mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
#endif
|
||||
if (mpte == NULL)
|
||||
goto retry;
|
||||
++mpte->hold_count;
|
||||
@ -2469,12 +2449,14 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
|
||||
* of pmap_object_init_pt, except it runs at page fault time instead
|
||||
* of mmap time.
|
||||
*/
|
||||
#define PFBAK 2
|
||||
#define PFFOR 2
|
||||
#define PFBAK 3
|
||||
#define PFFOR 3
|
||||
#define PAGEORDER_SIZE (PFBAK+PFFOR)
|
||||
|
||||
static int pmap_prefault_pageorder[] = {
|
||||
-PAGE_SIZE, PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE
|
||||
-PAGE_SIZE, PAGE_SIZE,
|
||||
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
|
||||
-3 * PAGE_SIZE, 3 * PAGE_SIZE
|
||||
};
|
||||
|
||||
void
|
||||
|
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.175 1997/12/22 00:36:48 dyson Exp $
|
||||
* $Id: pmap.c,v 1.176 1997/12/22 10:06:09 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -115,8 +115,6 @@
|
||||
#define PMAP_INLINE
|
||||
#endif
|
||||
|
||||
#define PTPHINT
|
||||
|
||||
/*
|
||||
* Get PDEs and PTEs for user/kernel address space
|
||||
*/
|
||||
@ -1051,10 +1049,8 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
|
||||
invltlb_1pg(pteva);
|
||||
}
|
||||
|
||||
#if defined(PTPHINT)
|
||||
if (pmap->pm_ptphint == m)
|
||||
pmap->pm_ptphint = NULL;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the page is finally unwired, simply free it.
|
||||
@ -1100,7 +1096,6 @@ pmap_unuse_pt(pmap, va, mpte)
|
||||
|
||||
if (mpte == NULL) {
|
||||
ptepindex = (va >> PDRSHIFT);
|
||||
#if defined(PTPHINT)
|
||||
if (pmap->pm_ptphint &&
|
||||
(pmap->pm_ptphint->pindex == ptepindex)) {
|
||||
mpte = pmap->pm_ptphint;
|
||||
@ -1108,9 +1103,6 @@ pmap_unuse_pt(pmap, va, mpte)
|
||||
mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
pmap->pm_ptphint = mpte;
|
||||
}
|
||||
#else
|
||||
mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
#endif
|
||||
}
|
||||
|
||||
return pmap_unwire_pte_hold(pmap, mpte);
|
||||
@ -1244,11 +1236,9 @@ pmap_release_free_page(pmap, p)
|
||||
pmap_kremove((vm_offset_t) pmap->pm_pdir);
|
||||
}
|
||||
|
||||
#if defined(PTPHINT)
|
||||
if (pmap->pm_ptphint &&
|
||||
(pmap->pm_ptphint->pindex == p->pindex))
|
||||
pmap->pm_ptphint = NULL;
|
||||
#endif
|
||||
|
||||
vm_page_free_zero(p);
|
||||
splx(s);
|
||||
@ -1315,12 +1305,10 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
ptepa = VM_PAGE_TO_PHYS(m);
|
||||
pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V);
|
||||
|
||||
#if defined(PTPHINT)
|
||||
/*
|
||||
* Set the page table hint
|
||||
*/
|
||||
pmap->pm_ptphint = m;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Try to use the new mapping, but if we cannot, then
|
||||
@ -1376,7 +1364,6 @@ pmap_allocpte(pmap, va)
|
||||
* hold count, and activate it.
|
||||
*/
|
||||
if (ptepa) {
|
||||
#if defined(PTPHINT)
|
||||
/*
|
||||
* In order to get the page table page, try the
|
||||
* hint first.
|
||||
@ -1388,9 +1375,6 @@ pmap_allocpte(pmap, va)
|
||||
m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
pmap->pm_ptphint = m;
|
||||
}
|
||||
#else
|
||||
m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
#endif
|
||||
++m->hold_count;
|
||||
return m;
|
||||
}
|
||||
@ -2252,7 +2236,6 @@ pmap_enter_quick(pmap, va, pa, mpte)
|
||||
if (ptepa) {
|
||||
if (ptepa & PG_PS)
|
||||
panic("pmap_enter_quick: unexpected mapping into 4MB page");
|
||||
#if defined(PTPHINT)
|
||||
if (pmap->pm_ptphint &&
|
||||
(pmap->pm_ptphint->pindex == ptepindex)) {
|
||||
mpte = pmap->pm_ptphint;
|
||||
@ -2260,9 +2243,6 @@ pmap_enter_quick(pmap, va, pa, mpte)
|
||||
mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
pmap->pm_ptphint = mpte;
|
||||
}
|
||||
#else
|
||||
mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
|
||||
#endif
|
||||
if (mpte == NULL)
|
||||
goto retry;
|
||||
++mpte->hold_count;
|
||||
@ -2469,12 +2449,14 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
|
||||
* of pmap_object_init_pt, except it runs at page fault time instead
|
||||
* of mmap time.
|
||||
*/
|
||||
#define PFBAK 2
|
||||
#define PFFOR 2
|
||||
#define PFBAK 3
|
||||
#define PFFOR 3
|
||||
#define PAGEORDER_SIZE (PFBAK+PFFOR)
|
||||
|
||||
static int pmap_prefault_pageorder[] = {
|
||||
-PAGE_SIZE, PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE
|
||||
-PAGE_SIZE, PAGE_SIZE,
|
||||
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
|
||||
-3 * PAGE_SIZE, 3 * PAGE_SIZE
|
||||
};
|
||||
|
||||
void
|
||||
|
@ -11,7 +11,7 @@
|
||||
* 2. Absolutely no warranty of function or purpose is made by the author
|
||||
* John S. Dyson.
|
||||
*
|
||||
* $Id: vfs_bio.c,v 1.141 1998/01/06 05:15:55 dyson Exp $
|
||||
* $Id: vfs_bio.c,v 1.142 1998/01/12 01:46:25 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -793,11 +793,6 @@ vfs_vmio_release(bp)
|
||||
vp = bp->b_vp;
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_free(m);
|
||||
if (vp && VSHOULDFREE(vp) &&
|
||||
(vp->v_flag & (VFREE|VTBFREE)) == 0) {
|
||||
TAILQ_INSERT_TAIL(&vnode_tobefree_list, vp, v_freelist);
|
||||
vp->v_flag |= VTBFREE;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
@ -892,7 +887,9 @@ vfs_bio_awrite(struct buf * bp)
|
||||
splx(s);
|
||||
return nwritten;
|
||||
}
|
||||
} else if ((vp->v_flag & VOBJBUF) && (vp->v_type == VBLK) &&
|
||||
}
|
||||
#if 0
|
||||
else if ((vp->v_flag & VOBJBUF) && (vp->v_type == VBLK) &&
|
||||
((size = bp->b_bufsize) >= PAGE_SIZE)) {
|
||||
maxcl = MAXPHYS / size;
|
||||
for (i = 1; i < maxcl; i++) {
|
||||
@ -913,11 +910,11 @@ vfs_bio_awrite(struct buf * bp)
|
||||
*/
|
||||
if (ncl != 1) {
|
||||
nwritten = cluster_wbuild(vp, size, lblkno, ncl);
|
||||
printf("Block cluster: (%d, %d)\n", lblkno, nwritten);
|
||||
splx(s);
|
||||
return nwritten;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bremfree(bp);
|
||||
splx(s);
|
||||
@ -1086,6 +1083,7 @@ getnewbuf(struct vnode *vp, daddr_t blkno,
|
||||
brelvp(bp);
|
||||
|
||||
fillbuf:
|
||||
|
||||
/* we are not free, nor do we contain interesting data */
|
||||
if (bp->b_rcred != NOCRED) {
|
||||
crfree(bp->b_rcred);
|
||||
@ -1112,7 +1110,7 @@ getnewbuf(struct vnode *vp, daddr_t blkno,
|
||||
bp->b_npages = 0;
|
||||
bp->b_dirtyoff = bp->b_dirtyend = 0;
|
||||
bp->b_validoff = bp->b_validend = 0;
|
||||
bp->b_usecount = 4;
|
||||
bp->b_usecount = 5;
|
||||
|
||||
maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
|
||||
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
|
||||
* $Id: vfs_subr.c,v 1.122 1998/01/12 01:46:30 dyson Exp $
|
||||
* $Id: vfs_subr.c,v 1.123 1998/01/12 03:15:01 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -66,6 +66,7 @@
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/pmap.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_pager.h>
|
||||
#include <vm/vnode_pager.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
@ -77,7 +78,6 @@ static void insmntque __P((struct vnode *vp, struct mount *mp));
|
||||
#ifdef DDB
|
||||
static void printlockedvnodes __P((void));
|
||||
#endif
|
||||
static void vbusy __P((struct vnode *));
|
||||
static void vclean __P((struct vnode *vp, int flags, struct proc *p));
|
||||
static void vfree __P((struct vnode *));
|
||||
static void vgonel __P((struct vnode *vp, struct proc *p));
|
||||
@ -110,7 +110,7 @@ SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "")
|
||||
static u_long freevnodes = 0;
|
||||
SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
|
||||
|
||||
int vfs_ioopt = 2;
|
||||
int vfs_ioopt = 0;
|
||||
SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
|
||||
|
||||
struct mntlist mountlist; /* mounted filesystem list */
|
||||
@ -374,8 +374,11 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) {
|
||||
nvp = TAILQ_NEXT(vp, v_freelist);
|
||||
TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
|
||||
TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist);
|
||||
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
|
||||
vp->v_flag &= ~VTBFREE;
|
||||
vp->v_flag |= VFREE;
|
||||
if (vp->v_usecount)
|
||||
panic("tobe free vnode isn't");
|
||||
freevnodes++;
|
||||
}
|
||||
|
||||
@ -1171,9 +1174,6 @@ vclean(vp, flags, p)
|
||||
if ((active = vp->v_usecount))
|
||||
vp->v_usecount++;
|
||||
|
||||
if (vp->v_object) {
|
||||
vp->v_object->flags |= OBJ_DEAD;
|
||||
}
|
||||
/*
|
||||
* Prevent the vnode from being recycled or brought into use while we
|
||||
* clean it out.
|
||||
@ -1193,10 +1193,21 @@ vclean(vp, flags, p)
|
||||
/*
|
||||
* Clean out any buffers associated with the vnode.
|
||||
*/
|
||||
if (vp->v_object)
|
||||
vm_object_terminate(vp->v_object);
|
||||
else
|
||||
vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
|
||||
vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
|
||||
if (vp->v_object) {
|
||||
if (vp->v_object->ref_count == 0) {
|
||||
/*
|
||||
* This is a normal way of shutting down the object/vnode
|
||||
* association.
|
||||
*/
|
||||
vm_object_terminate(vp->v_object);
|
||||
} else {
|
||||
/*
|
||||
* Woe to the process that tries to page now :-).
|
||||
*/
|
||||
vm_pager_deallocate(vp->v_object);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If purging an active vnode, it must be closed and
|
||||
@ -2186,7 +2197,7 @@ vfree(vp)
|
||||
splx(s);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
vbusy(vp)
|
||||
struct vnode *vp;
|
||||
{
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
|
||||
* $Id: vfs_subr.c,v 1.122 1998/01/12 01:46:30 dyson Exp $
|
||||
* $Id: vfs_subr.c,v 1.123 1998/01/12 03:15:01 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -66,6 +66,7 @@
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/pmap.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_pager.h>
|
||||
#include <vm/vnode_pager.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
@ -77,7 +78,6 @@ static void insmntque __P((struct vnode *vp, struct mount *mp));
|
||||
#ifdef DDB
|
||||
static void printlockedvnodes __P((void));
|
||||
#endif
|
||||
static void vbusy __P((struct vnode *));
|
||||
static void vclean __P((struct vnode *vp, int flags, struct proc *p));
|
||||
static void vfree __P((struct vnode *));
|
||||
static void vgonel __P((struct vnode *vp, struct proc *p));
|
||||
@ -110,7 +110,7 @@ SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "")
|
||||
static u_long freevnodes = 0;
|
||||
SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
|
||||
|
||||
int vfs_ioopt = 2;
|
||||
int vfs_ioopt = 0;
|
||||
SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
|
||||
|
||||
struct mntlist mountlist; /* mounted filesystem list */
|
||||
@ -374,8 +374,11 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) {
|
||||
nvp = TAILQ_NEXT(vp, v_freelist);
|
||||
TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
|
||||
TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist);
|
||||
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
|
||||
vp->v_flag &= ~VTBFREE;
|
||||
vp->v_flag |= VFREE;
|
||||
if (vp->v_usecount)
|
||||
panic("tobe free vnode isn't");
|
||||
freevnodes++;
|
||||
}
|
||||
|
||||
@ -1171,9 +1174,6 @@ vclean(vp, flags, p)
|
||||
if ((active = vp->v_usecount))
|
||||
vp->v_usecount++;
|
||||
|
||||
if (vp->v_object) {
|
||||
vp->v_object->flags |= OBJ_DEAD;
|
||||
}
|
||||
/*
|
||||
* Prevent the vnode from being recycled or brought into use while we
|
||||
* clean it out.
|
||||
@ -1193,10 +1193,21 @@ vclean(vp, flags, p)
|
||||
/*
|
||||
* Clean out any buffers associated with the vnode.
|
||||
*/
|
||||
if (vp->v_object)
|
||||
vm_object_terminate(vp->v_object);
|
||||
else
|
||||
vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
|
||||
vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
|
||||
if (vp->v_object) {
|
||||
if (vp->v_object->ref_count == 0) {
|
||||
/*
|
||||
* This is a normal way of shutting down the object/vnode
|
||||
* association.
|
||||
*/
|
||||
vm_object_terminate(vp->v_object);
|
||||
} else {
|
||||
/*
|
||||
* Woe to the process that tries to page now :-).
|
||||
*/
|
||||
vm_pager_deallocate(vp->v_object);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If purging an active vnode, it must be closed and
|
||||
@ -2186,7 +2197,7 @@ vfree(vp)
|
||||
splx(s);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
vbusy(vp)
|
||||
struct vnode *vp;
|
||||
{
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
|
||||
* $Id: vnode.h,v 1.63 1998/01/06 05:23:04 dyson Exp $
|
||||
* $Id: vnode.h,v 1.64 1998/01/12 01:44:08 dyson Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_VNODE_H_
|
||||
@ -281,7 +281,7 @@ extern void (*lease_updatetime) __P((int deltat));
|
||||
!((vp)->v_object->ref_count || (vp)->v_object->resident_page_count)))
|
||||
|
||||
#define VSHOULDBUSY(vp) \
|
||||
(((vp)->v_flag & VFREE) && \
|
||||
(((vp)->v_flag & (VFREE|VTBFREE)) && \
|
||||
((vp)->v_holdcnt || (vp)->v_usecount))
|
||||
|
||||
#endif /* KERNEL */
|
||||
@ -525,6 +525,7 @@ struct vnode *
|
||||
void vput __P((struct vnode *vp));
|
||||
void vrele __P((struct vnode *vp));
|
||||
void vref __P((struct vnode *vp));
|
||||
void vbusy __P((struct vnode *vp));
|
||||
|
||||
extern vop_t **default_vnodeop_p;
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
|
||||
* $Id: ffs_vfsops.c,v 1.63 1997/12/29 00:24:28 dyson Exp $
|
||||
* $Id: ffs_vfsops.c,v 1.64 1998/01/06 05:23:41 dyson Exp $
|
||||
*/
|
||||
|
||||
#include "opt_quota.h"
|
||||
@ -747,9 +747,6 @@ ffs_unmount(mp, mntflags, p)
|
||||
ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
|
||||
|
||||
vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0);
|
||||
if (ump->um_devvp->v_object)
|
||||
vm_object_terminate(ump->um_devvp->v_object);
|
||||
|
||||
error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
|
||||
NOCRED, p);
|
||||
|
||||
|
@ -39,7 +39,7 @@
|
||||
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
|
||||
*
|
||||
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
|
||||
* $Id: swap_pager.c,v 1.79 1997/12/02 21:07:19 phk Exp $
|
||||
* $Id: swap_pager.c,v 1.80 1997/12/24 15:05:21 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -477,30 +477,33 @@ swap_pager_free_swap(object)
|
||||
/*
|
||||
* Free left over swap blocks
|
||||
*/
|
||||
s = splbio();
|
||||
for (i = 0, swb = object->un_pager.swp.swp_blocks;
|
||||
i < object->un_pager.swp.swp_nblocks; i++, swb++) {
|
||||
s = splvm();
|
||||
swb = object->un_pager.swp.swp_blocks;
|
||||
if (!swb)
|
||||
return;
|
||||
|
||||
for (i = 0; i < object->un_pager.swp.swp_nblocks; i++, swb++) {
|
||||
for (j = 0; j < SWB_NPAGES; j++) {
|
||||
if (swb->swb_block[j] != SWB_EMPTY) {
|
||||
/*
|
||||
* initially the length of the run is zero
|
||||
*/
|
||||
* initially the length of the run is zero
|
||||
*/
|
||||
if (block_count == 0) {
|
||||
first_block = swb->swb_block[j];
|
||||
block_count = btodb(PAGE_SIZE);
|
||||
swb->swb_block[j] = SWB_EMPTY;
|
||||
/*
|
||||
* if the new block can be included into the current run
|
||||
*/
|
||||
* if the new block can be included into the current run
|
||||
*/
|
||||
} else if (swb->swb_block[j] == first_block + block_count) {
|
||||
block_count += btodb(PAGE_SIZE);
|
||||
swb->swb_block[j] = SWB_EMPTY;
|
||||
/*
|
||||
* terminate the previous run, and start a new one
|
||||
*/
|
||||
* terminate the previous run, and start a new one
|
||||
*/
|
||||
} else {
|
||||
swap_pager_freeswapspace(object, first_block,
|
||||
(unsigned) first_block + block_count - 1);
|
||||
(unsigned) first_block + block_count - 1);
|
||||
first_block = swb->swb_block[j];
|
||||
block_count = btodb(PAGE_SIZE);
|
||||
swb->swb_block[j] = SWB_EMPTY;
|
||||
@ -719,6 +722,7 @@ swap_pager_dealloc(object)
|
||||
vm_object_t object;
|
||||
{
|
||||
int s;
|
||||
sw_blk_t swb;
|
||||
|
||||
/*
|
||||
* Remove from list right away so lookups will fail if we block for
|
||||
@ -753,11 +757,14 @@ swap_pager_dealloc(object)
|
||||
printf("swap_pager_dealloc: *warning* freeing pager with %d blocks\n",
|
||||
object->un_pager.swp.swp_allocsize);
|
||||
}
|
||||
/*
|
||||
* Free swap management resources
|
||||
*/
|
||||
free(object->un_pager.swp.swp_blocks, M_VMPGDATA);
|
||||
object->un_pager.swp.swp_blocks = NULL;
|
||||
swb = object->un_pager.swp.swp_blocks;
|
||||
if (swb) {
|
||||
/*
|
||||
* Free swap management resources
|
||||
*/
|
||||
free(swb, M_VMPGDATA);
|
||||
object->un_pager.swp.swp_blocks = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -66,7 +66,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_fault.c,v 1.73 1998/01/06 05:25:54 dyson Exp $
|
||||
* $Id: vm_fault.c,v 1.74 1998/01/12 01:44:25 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -96,8 +96,8 @@
|
||||
|
||||
int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *));
|
||||
|
||||
#define VM_FAULT_READ_AHEAD 4
|
||||
#define VM_FAULT_READ_BEHIND 3
|
||||
#define VM_FAULT_READ_AHEAD 8
|
||||
#define VM_FAULT_READ_BEHIND 7
|
||||
#define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
|
||||
|
||||
/*
|
||||
@ -151,7 +151,10 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
|
||||
|
||||
#define RELEASE_PAGE(m) { \
|
||||
PAGE_WAKEUP(m); \
|
||||
if (m->queue != PQ_ACTIVE) vm_page_activate(m); \
|
||||
if (m->queue != PQ_ACTIVE) { \
|
||||
vm_page_activate(m); \
|
||||
m->act_count = 0; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define UNLOCK_MAP { \
|
||||
@ -168,7 +171,10 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
|
||||
vm_object_pip_wakeup(first_object); \
|
||||
} \
|
||||
UNLOCK_MAP; \
|
||||
if (vp != NULL) VOP_UNLOCK(vp, 0, p); \
|
||||
if (vp != NULL) { \
|
||||
vput(vp); \
|
||||
vp = NULL; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define UNLOCK_AND_DEALLOCATE { \
|
||||
@ -183,11 +189,35 @@ RetryFault:;
|
||||
* Find the backing store object and offset into it to begin the
|
||||
* search.
|
||||
*/
|
||||
|
||||
if ((result = vm_map_lookup(&map, vaddr,
|
||||
fault_type, &entry, &first_object,
|
||||
&first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
|
||||
return (result);
|
||||
if ((result != KERN_PROTECTION_FAILURE) ||
|
||||
((fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)) {
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are user-wiring a r/w segment, and it is COW, then
|
||||
* we need to do the COW operation. Note that we don't COW
|
||||
* currently RO sections now, because it is NOT desirable
|
||||
* to COW .text. We simply keep .text from ever being COW'ed
|
||||
* and take the heat that one cannot debug wired .text sections.
|
||||
*/
|
||||
result = vm_map_lookup(&map, vaddr,
|
||||
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
|
||||
&entry, &first_object, &first_pindex, &prot, &wired, &su);
|
||||
if (result != KERN_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we don't COW now, on a user wire, the user will never
|
||||
* be able to write to the mapping. If we don't make this
|
||||
* restriction, the bookkeeping would be nearly impossible.
|
||||
*/
|
||||
if ((entry->protection & VM_PROT_WRITE) == 0)
|
||||
entry->max_protection &= ~VM_PROT_WRITE;
|
||||
}
|
||||
|
||||
if (entry->eflags & MAP_ENTRY_NOFAULT) {
|
||||
@ -195,33 +225,6 @@ RetryFault:;
|
||||
vaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are user-wiring a r/w segment, and it is COW, then
|
||||
* we need to do the COW operation. Note that we don't COW
|
||||
* currently RO sections now, because it is NOT desirable
|
||||
* to COW .text. We simply keep .text from ever being COW'ed
|
||||
* and take the heat that one cannot debug wired .text sections.
|
||||
*/
|
||||
if (((fault_flags & VM_FAULT_WIRE_MASK) == VM_FAULT_USER_WIRE) &&
|
||||
(entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
|
||||
if(entry->protection & VM_PROT_WRITE) {
|
||||
int tresult;
|
||||
vm_map_lookup_done(map, entry);
|
||||
|
||||
tresult = vm_map_lookup(&map, vaddr, VM_PROT_READ|VM_PROT_WRITE,
|
||||
&entry, &first_object, &first_pindex, &prot, &wired, &su);
|
||||
if (tresult != KERN_SUCCESS)
|
||||
return tresult;
|
||||
} else {
|
||||
/*
|
||||
* If we don't COW now, on a user wire, the user will never
|
||||
* be able to write to the mapping. If we don't make this
|
||||
* restriction, the bookkeeping would be nearly impossible.
|
||||
*/
|
||||
entry->max_protection &= ~VM_PROT_WRITE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Make a reference to this object to prevent its disposal while we
|
||||
* are messing with it. Once we have the reference, the map is free
|
||||
@ -284,6 +287,12 @@ RetryFault:;
|
||||
*/
|
||||
|
||||
while (TRUE) {
|
||||
|
||||
if (object->flags & OBJ_DEAD) {
|
||||
UNLOCK_AND_DEALLOCATE;
|
||||
return (KERN_PROTECTION_FAILURE);
|
||||
}
|
||||
|
||||
m = vm_page_lookup(object, pindex);
|
||||
if (m != NULL) {
|
||||
int queue;
|
||||
@ -322,14 +331,14 @@ RetryFault:;
|
||||
|
||||
m->flags |= PG_BUSY;
|
||||
|
||||
if (/*m->valid && */
|
||||
((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
|
||||
if (((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
|
||||
m->object != kernel_object && m->object != kmem_object) {
|
||||
goto readrest;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (((object->type != OBJT_DEFAULT) && (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
|
||||
if (((object->type != OBJT_DEFAULT) &&
|
||||
(((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
|
||||
|| (object == first_object)) {
|
||||
|
||||
if (pindex >= object->size) {
|
||||
@ -341,7 +350,7 @@ RetryFault:;
|
||||
* Allocate a new page for this object/offset pair.
|
||||
*/
|
||||
m = vm_page_alloc(object, pindex,
|
||||
(vp || object->backing_object)?VM_ALLOC_NORMAL:VM_ALLOC_ZERO);
|
||||
(vp || object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_ZERO);
|
||||
|
||||
if (m == NULL) {
|
||||
UNLOCK_AND_DEALLOCATE;
|
||||
@ -349,8 +358,10 @@ RetryFault:;
|
||||
goto RetryFault;
|
||||
}
|
||||
}
|
||||
|
||||
readrest:
|
||||
if (object->type != OBJT_DEFAULT && (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) {
|
||||
if (object->type != OBJT_DEFAULT &&
|
||||
(((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) {
|
||||
int rv;
|
||||
int faultcount;
|
||||
int reqpage;
|
||||
@ -469,7 +480,8 @@ RetryFault:;
|
||||
* around having the machine panic on a kernel space
|
||||
* fault w/ I/O error.
|
||||
*/
|
||||
if (((map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
|
||||
if (((map != kernel_map) && (rv == VM_PAGER_ERROR)) ||
|
||||
(rv == VM_PAGER_BAD)) {
|
||||
FREE_PAGE(m);
|
||||
UNLOCK_AND_DEALLOCATE;
|
||||
return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE);
|
||||
|
258
sys/vm/vm_map.c
258
sys/vm/vm_map.c
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_map.c,v 1.104 1998/01/06 05:25:58 dyson Exp $
|
||||
* $Id: vm_map.c,v 1.105 1998/01/12 01:44:31 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -87,9 +87,11 @@
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_object.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
#include <vm/vm_pager.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/default_pager.h>
|
||||
#include <vm/swap_pager.h>
|
||||
#include <vm/vm_zone.h>
|
||||
|
||||
static MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
|
||||
@ -332,6 +334,7 @@ vm_map_entry_create(map)
|
||||
#define vm_map_entry_link(map, after_where, entry) \
|
||||
{ \
|
||||
(map)->nentries++; \
|
||||
(map)->timestamp++; \
|
||||
(entry)->prev = (after_where); \
|
||||
(entry)->next = (after_where)->next; \
|
||||
(entry)->prev->next = (entry); \
|
||||
@ -340,6 +343,7 @@ vm_map_entry_create(map)
|
||||
#define vm_map_entry_unlink(map, entry) \
|
||||
{ \
|
||||
(map)->nentries--; \
|
||||
(map)->timestamp++; \
|
||||
(entry)->next->prev = (entry)->prev; \
|
||||
(entry)->prev->next = (entry)->next; \
|
||||
}
|
||||
@ -1145,23 +1149,27 @@ vm_map_madvise(map, pmap, start, end, advise)
|
||||
for(current = entry;
|
||||
(current != &map->header) && (current->start < end);
|
||||
current = current->next) {
|
||||
vm_size_t size = current->end - current->start;
|
||||
vm_size_t size;
|
||||
|
||||
if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
vm_map_clip_end(map, current, end);
|
||||
size = current->end - current->start;
|
||||
|
||||
/*
|
||||
* Create an object if needed
|
||||
*/
|
||||
if (current->object.vm_object == NULL) {
|
||||
vm_object_t object;
|
||||
if ((advise == MADV_FREE) || (advise == MADV_DONTNEED))
|
||||
continue;
|
||||
object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size));
|
||||
current->object.vm_object = object;
|
||||
current->offset = 0;
|
||||
}
|
||||
|
||||
vm_map_clip_end(map, current, end);
|
||||
switch (advise) {
|
||||
case MADV_NORMAL:
|
||||
current->object.vm_object->behavior = OBJ_NORMAL;
|
||||
@ -1181,8 +1189,7 @@ vm_map_madvise(map, pmap, start, end, advise)
|
||||
{
|
||||
vm_pindex_t pindex;
|
||||
int count;
|
||||
size = current->end - current->start;
|
||||
pindex = OFF_TO_IDX(entry->offset);
|
||||
pindex = OFF_TO_IDX(current->offset);
|
||||
count = OFF_TO_IDX(size);
|
||||
/*
|
||||
* MADV_DONTNEED removes the page from all
|
||||
@ -1197,7 +1204,6 @@ vm_map_madvise(map, pmap, start, end, advise)
|
||||
{
|
||||
vm_pindex_t pindex;
|
||||
int count;
|
||||
size = current->end - current->start;
|
||||
pindex = OFF_TO_IDX(current->offset);
|
||||
count = OFF_TO_IDX(size);
|
||||
vm_object_madvise(current->object.vm_object,
|
||||
@ -1213,6 +1219,7 @@ vm_map_madvise(map, pmap, start, end, advise)
|
||||
}
|
||||
}
|
||||
|
||||
map->timestamp++;
|
||||
vm_map_simplify_entry(map, entry);
|
||||
vm_map_unlock(map);
|
||||
return;
|
||||
@ -1262,6 +1269,7 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
}
|
||||
|
||||
vm_map_simplify_entry(map, temp_entry);
|
||||
map->timestamp++;
|
||||
vm_map_unlock(map);
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
@ -1364,6 +1372,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
|
||||
/* First we need to allow map modifications */
|
||||
vm_map_set_recursive(map);
|
||||
vm_map_lock_downgrade(map);
|
||||
map->timestamp++;
|
||||
|
||||
rv = vm_fault_user_wire(map, entry->start, entry->end);
|
||||
if (rv) {
|
||||
@ -1394,6 +1403,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
|
||||
vm_map_simplify_entry(map,entry);
|
||||
}
|
||||
}
|
||||
map->timestamp++;
|
||||
vm_map_unlock(map);
|
||||
return KERN_SUCCESS;
|
||||
}
|
||||
@ -1562,6 +1572,7 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
entry->wired_count--;
|
||||
entry = entry->prev;
|
||||
}
|
||||
map->timestamp++;
|
||||
vm_map_unlock(map);
|
||||
return (KERN_INVALID_ARGUMENT);
|
||||
}
|
||||
@ -1630,6 +1641,7 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
|
||||
vm_map_unlock(map);
|
||||
|
||||
map->timestamp++;
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
@ -2068,6 +2080,7 @@ vmspace_fork(vm1)
|
||||
(caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
|
||||
new_pmap = &vm2->vm_pmap; /* XXX */
|
||||
new_map = &vm2->vm_map; /* XXX */
|
||||
new_map->timestamp = 1;
|
||||
|
||||
old_entry = old_map->header.next;
|
||||
|
||||
@ -2224,7 +2237,7 @@ vmspace_unshare(struct proc *p) {
|
||||
int
|
||||
vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
|
||||
vm_offset_t vaddr,
|
||||
vm_prot_t fault_type,
|
||||
vm_prot_t fault_typea,
|
||||
vm_map_entry_t *out_entry, /* OUT */
|
||||
vm_object_t *object, /* OUT */
|
||||
vm_pindex_t *pindex, /* OUT */
|
||||
@ -2238,6 +2251,7 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
|
||||
register vm_map_t map = *var_map;
|
||||
register vm_prot_t prot;
|
||||
register boolean_t su;
|
||||
vm_prot_t fault_type = fault_typea;
|
||||
|
||||
RetryLookup:;
|
||||
|
||||
@ -2297,11 +2311,16 @@ RetryLookup:;
|
||||
*/
|
||||
|
||||
prot = entry->protection;
|
||||
if ((fault_type & VM_PROT_OVERRIDE_WRITE) == 0 ||
|
||||
(entry->eflags & MAP_ENTRY_COW) == 0 ||
|
||||
(entry->wired_count != 0)) {
|
||||
if ((fault_type & (prot)) !=
|
||||
(fault_type & ~VM_PROT_OVERRIDE_WRITE))
|
||||
|
||||
fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
|
||||
if ((fault_type & prot) != fault_type) {
|
||||
RETURN(KERN_PROTECTION_FAILURE);
|
||||
}
|
||||
|
||||
if (entry->wired_count &&
|
||||
(fault_type & VM_PROT_WRITE) &&
|
||||
(entry->eflags & MAP_ENTRY_COW) &&
|
||||
(fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
|
||||
RETURN(KERN_PROTECTION_FAILURE);
|
||||
}
|
||||
|
||||
@ -2462,16 +2481,16 @@ vm_map_lookup_done(map, entry)
|
||||
* operations.
|
||||
*/
|
||||
int
|
||||
vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
|
||||
vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
|
||||
vm_map_t mapa;
|
||||
vm_object_t srcobject;
|
||||
off_t cp;
|
||||
int cnt;
|
||||
int cnta;
|
||||
vm_offset_t uaddra;
|
||||
int *npages;
|
||||
{
|
||||
vm_map_t map;
|
||||
vm_object_t first_object, object;
|
||||
vm_object_t first_object, oldobject, object;
|
||||
vm_map_entry_t first_entry, entry;
|
||||
vm_prot_t prot;
|
||||
boolean_t wired, su;
|
||||
@ -2480,12 +2499,14 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
|
||||
vm_pindex_t first_pindex, osize, oindex;
|
||||
off_t ooffset;
|
||||
int skipinit, allremoved;
|
||||
int cnt;
|
||||
|
||||
if (npages)
|
||||
*npages = 0;
|
||||
|
||||
allremoved = 0;
|
||||
|
||||
cnt = cnta;
|
||||
while (cnt > 0) {
|
||||
map = mapa;
|
||||
uaddr = uaddra;
|
||||
@ -2531,47 +2552,10 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
|
||||
* If we are changing an existing map entry, just redirect
|
||||
* the object, and change mappings.
|
||||
*/
|
||||
if (first_object->type == OBJT_VNODE) {
|
||||
|
||||
if (first_object != srcobject) {
|
||||
|
||||
vm_object_deallocate(first_object);
|
||||
srcobject->flags |= OBJ_OPT;
|
||||
vm_object_reference(srcobject);
|
||||
|
||||
first_entry->object.vm_object = srcobject;
|
||||
first_entry->offset = cp;
|
||||
|
||||
} else if (first_entry->offset != cp) {
|
||||
|
||||
first_entry->offset = cp;
|
||||
|
||||
} else {
|
||||
|
||||
skipinit = 1;
|
||||
|
||||
}
|
||||
|
||||
if (skipinit == 0) {
|
||||
/*
|
||||
* Remove old window into the file
|
||||
*/
|
||||
if (!allremoved) {
|
||||
pmap_remove (map->pmap, uaddra, uaddra + cnt);
|
||||
allremoved = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Force copy on write for mmaped regions
|
||||
*/
|
||||
vm_object_pmap_copy_1 (srcobject,
|
||||
oindex, oindex + osize);
|
||||
}
|
||||
|
||||
} else if ((first_object->ref_count == 1) &&
|
||||
if ((first_object->ref_count == 1) &&
|
||||
(first_object->size == osize) &&
|
||||
(first_object->resident_page_count == 0)) {
|
||||
vm_object_t oldobject;
|
||||
((first_object->type == OBJT_DEFAULT) ||
|
||||
(first_object->type == OBJT_SWAP)) ) {
|
||||
|
||||
oldobject = first_object->backing_object;
|
||||
|
||||
@ -2585,16 +2569,32 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
|
||||
allremoved = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove unneeded old pages
|
||||
*/
|
||||
if (first_object->resident_page_count) {
|
||||
vm_object_page_remove (first_object, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate swap space
|
||||
*/
|
||||
if (first_object->type == OBJT_SWAP) {
|
||||
swap_pager_freespace(first_object,
|
||||
OFF_TO_IDX(first_object->paging_offset),
|
||||
first_object->size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Force copy on write for mmaped regions
|
||||
*/
|
||||
vm_object_pmap_copy_1 (srcobject,
|
||||
oindex, oindex + osize);
|
||||
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
|
||||
|
||||
/*
|
||||
* Point the object appropriately
|
||||
*/
|
||||
if (oldobject != srcobject) {
|
||||
|
||||
/*
|
||||
* Set the object optimization hint flag
|
||||
*/
|
||||
@ -2613,6 +2613,7 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
|
||||
TAILQ_INSERT_TAIL(&srcobject->shadow_head,
|
||||
first_object, shadow_list);
|
||||
srcobject->shadow_count++;
|
||||
srcobject->flags |= OBJ_OPT;
|
||||
|
||||
first_object->backing_object = srcobject;
|
||||
}
|
||||
@ -2629,15 +2630,17 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
|
||||
srcobject->flags |= OBJ_OPT;
|
||||
vm_object_reference(srcobject);
|
||||
|
||||
object = srcobject;
|
||||
ooffset = cp;
|
||||
vm_object_shadow(&object, &ooffset, osize);
|
||||
|
||||
if (!allremoved) {
|
||||
pmap_remove (map->pmap, uaddra, uaddra + cnt);
|
||||
allremoved = 1;
|
||||
}
|
||||
vm_object_pmap_copy_1 (srcobject,
|
||||
oindex, oindex + osize);
|
||||
vm_map_lookup_done(map, first_entry);
|
||||
|
||||
vm_map_lock(map);
|
||||
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
|
||||
vm_map_lock_upgrade(map);
|
||||
|
||||
if (first_entry == &map->header) {
|
||||
map->first_free = &map->header;
|
||||
@ -2648,8 +2651,8 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
|
||||
SAVE_HINT(map, first_entry->prev);
|
||||
vm_map_entry_delete(map, first_entry);
|
||||
|
||||
rv = vm_map_insert(map, srcobject, cp, start, end,
|
||||
VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE | MAP_COPY_NEEDED);
|
||||
rv = vm_map_insert(map, object, ooffset, start, end,
|
||||
VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
|
||||
|
||||
if (rv != KERN_SUCCESS)
|
||||
panic("vm_uiomove: could not insert new entry: %d", rv);
|
||||
@ -2659,9 +2662,10 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
|
||||
* Map the window directly, if it is already in memory
|
||||
*/
|
||||
if (!skipinit)
|
||||
pmap_object_init_pt(map->pmap, start,
|
||||
srcobject, (vm_pindex_t) OFF_TO_IDX(cp), end - start, 0);
|
||||
pmap_object_init_pt(map->pmap, uaddra,
|
||||
srcobject, (vm_pindex_t) OFF_TO_IDX(cp), tcnt, 0);
|
||||
|
||||
map->timestamp++;
|
||||
vm_map_unlock(map);
|
||||
|
||||
cnt -= tcnt;
|
||||
@ -2689,9 +2693,8 @@ vm_freeze_page_alloc(object, pindex)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->valid = 0;
|
||||
m->dirty = 0;
|
||||
vm_page_deactivate(m);
|
||||
return m;
|
||||
}
|
||||
|
||||
@ -2708,30 +2711,20 @@ vm_freeze_copyopts(object, froma, toa)
|
||||
int s;
|
||||
vm_object_t robject, robjectn;
|
||||
vm_pindex_t idx, from, to;
|
||||
return;
|
||||
|
||||
if ((vfs_ioopt == 0) || (object == NULL) ||
|
||||
if ((vfs_ioopt == 0) ||
|
||||
(object == NULL) ||
|
||||
((object->flags & OBJ_OPT) == 0))
|
||||
return;
|
||||
|
||||
if (object->shadow_count > object->ref_count)
|
||||
panic("vm_freeze_copyopts: sc > rc");
|
||||
|
||||
for( robject = TAILQ_FIRST(&object->shadow_head);
|
||||
robject;
|
||||
robject = robjectn) {
|
||||
while( robject = TAILQ_FIRST(&object->shadow_head)) {
|
||||
vm_pindex_t bo_pindex;
|
||||
vm_pindex_t dstpindex;
|
||||
vm_page_t m_in, m_out;
|
||||
|
||||
robjectn = TAILQ_NEXT(robject, shadow_list);
|
||||
|
||||
bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
|
||||
if (bo_pindex > toa)
|
||||
continue;
|
||||
|
||||
if ((bo_pindex + robject->size) < froma)
|
||||
continue;
|
||||
|
||||
vm_object_reference(robject);
|
||||
|
||||
@ -2748,86 +2741,79 @@ vm_freeze_copyopts(object, froma, toa)
|
||||
}
|
||||
|
||||
robject->paging_in_progress++;
|
||||
from = froma;
|
||||
if (from < bo_pindex)
|
||||
from = bo_pindex;
|
||||
|
||||
to = toa;
|
||||
for (idx = 0; idx < robject->size; idx++) {
|
||||
|
||||
for (idx = from; idx < to; idx++) {
|
||||
|
||||
dstpindex = idx - bo_pindex;
|
||||
if (dstpindex >= robject->size)
|
||||
break;
|
||||
|
||||
m_in = vm_page_lookup(object, idx);
|
||||
if (m_in == NULL)
|
||||
continue;
|
||||
|
||||
if( m_in->flags & PG_BUSY) {
|
||||
s = splvm();
|
||||
while (m_in && (m_in->flags & PG_BUSY)) {
|
||||
m_in->flags |= PG_WANTED;
|
||||
tsleep(m_in, PVM, "pwtfrz", 0);
|
||||
m_in = vm_page_lookup(object, idx);
|
||||
}
|
||||
splx(s);
|
||||
if (m_in == NULL)
|
||||
continue;
|
||||
}
|
||||
m_in->flags |= PG_BUSY;
|
||||
|
||||
retryout:
|
||||
m_out = vm_page_lookup(robject, dstpindex);
|
||||
m_outretry:
|
||||
m_out = vm_page_lookup(robject, idx);
|
||||
if( m_out && (m_out->flags & PG_BUSY)) {
|
||||
s = splvm();
|
||||
while (m_out && (m_out->flags & PG_BUSY)) {
|
||||
m_out->flags |= PG_WANTED;
|
||||
tsleep(m_out, PVM, "pwtfrz", 0);
|
||||
m_out = vm_page_lookup(robject, dstpindex);
|
||||
m_out = vm_page_lookup(robject, idx);
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
if (m_out == NULL) {
|
||||
m_out = vm_freeze_page_alloc(robject, dstpindex);
|
||||
m_out = vm_freeze_page_alloc(robject, idx);
|
||||
if (m_out == NULL)
|
||||
goto retryout;
|
||||
goto m_outretry;
|
||||
}
|
||||
|
||||
if (m_out->valid == 0) {
|
||||
m_out->flags |= PG_BUSY;
|
||||
m_inretry:
|
||||
m_in = vm_page_lookup(object, bo_pindex + idx);
|
||||
if (m_in == NULL) {
|
||||
int rv;
|
||||
m_in = vm_freeze_page_alloc(object, bo_pindex + idx);
|
||||
if (m_in == NULL)
|
||||
goto m_inretry;
|
||||
rv = vm_pager_get_pages(object, &m_in, 1, 0);
|
||||
if (rv != VM_PAGER_OK) {
|
||||
printf("vm_freeze_copyopts: cannot read page from file: %x\n", m_in->pindex);
|
||||
continue;
|
||||
}
|
||||
} else if(m_in->busy || (m_in->flags & PG_BUSY)) {
|
||||
s = splvm();
|
||||
while (m_in && (m_in->busy || (m_in->flags & PG_BUSY))) {
|
||||
m_in->flags |= PG_WANTED;
|
||||
tsleep(m_in, PVM, "pwtfrz", 0);
|
||||
m_in = vm_page_lookup(object, bo_pindex + idx);
|
||||
}
|
||||
splx(s);
|
||||
if (m_in == NULL) {
|
||||
goto m_inretry;
|
||||
}
|
||||
}
|
||||
|
||||
m_in->flags |= PG_BUSY;
|
||||
vm_page_protect(m_in, VM_PROT_NONE);
|
||||
pmap_copy_page(VM_PAGE_TO_PHYS(m_in),
|
||||
VM_PAGE_TO_PHYS(m_out));
|
||||
pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
|
||||
m_out->valid = VM_PAGE_BITS_ALL;
|
||||
m_out->dirty = VM_PAGE_BITS_ALL;
|
||||
|
||||
vm_page_deactivate(m_out);
|
||||
vm_page_deactivate(m_in);
|
||||
|
||||
PAGE_WAKEUP(m_out);
|
||||
PAGE_WAKEUP(m_in);
|
||||
}
|
||||
PAGE_WAKEUP(m_out);
|
||||
PAGE_WAKEUP(m_in);
|
||||
}
|
||||
|
||||
object->shadow_count--;
|
||||
object->ref_count--;
|
||||
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
|
||||
robject->backing_object = NULL;
|
||||
robject->backing_object_offset = 0;
|
||||
|
||||
vm_object_pip_wakeup(robject);
|
||||
|
||||
if (((from - bo_pindex) == 0) && ((to - bo_pindex) == robject->size)) {
|
||||
|
||||
object->shadow_count--;
|
||||
|
||||
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
|
||||
robject->backing_object = NULL;
|
||||
robject->backing_object_offset = 0;
|
||||
|
||||
if (object->ref_count == 1) {
|
||||
if (object->shadow_count == 0)
|
||||
object->flags &= ~OBJ_OPT;
|
||||
vm_object_deallocate(object);
|
||||
vm_object_deallocate(robject);
|
||||
return;
|
||||
}
|
||||
vm_object_deallocate(object);
|
||||
}
|
||||
vm_object_deallocate(robject);
|
||||
}
|
||||
if (object->shadow_count == 0)
|
||||
object->flags &= ~OBJ_OPT;
|
||||
|
||||
object->flags &= ~OBJ_OPT;
|
||||
}
|
||||
|
||||
#include "opt_ddb.h"
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_map.h,v 1.29 1997/12/19 09:03:12 dyson Exp $
|
||||
* $Id: vm_map.h,v 1.30 1998/01/06 05:26:00 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -126,7 +126,6 @@ struct vm_map_entry {
|
||||
* insertion, or removal.
|
||||
*/
|
||||
struct vm_map {
|
||||
struct pmap *pmap; /* Physical map */
|
||||
struct lock lock; /* Lock for map data */
|
||||
struct vm_map_entry header; /* List of entries */
|
||||
int nentries; /* Number of entries */
|
||||
@ -136,9 +135,10 @@ struct vm_map {
|
||||
int ref_count; /* Reference count */
|
||||
struct simplelock ref_lock; /* Lock for ref_count field */
|
||||
vm_map_entry_t hint; /* hint for quick lookups */
|
||||
unsigned int timestamp; /* Version number */
|
||||
vm_map_entry_t first_free; /* First free space hint */
|
||||
boolean_t entries_pageable; /* map entries pageable?? */
|
||||
unsigned int timestamp; /* Version number */
|
||||
struct pmap *pmap; /* Physical map */
|
||||
#define min_offset header.start
|
||||
#define max_offset header.end
|
||||
};
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_object.c,v 1.105 1998/01/07 03:12:19 dyson Exp $
|
||||
* $Id: vm_object.c,v 1.106 1998/01/12 01:44:38 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -242,8 +242,11 @@ vm_object_reference(object)
|
||||
#endif
|
||||
|
||||
object->ref_count++;
|
||||
if (object->type == OBJT_VNODE)
|
||||
vget((struct vnode *) object->handle, LK_NOOBJ, curproc);
|
||||
if (object->type == OBJT_VNODE) {
|
||||
while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) {
|
||||
printf("vm_object_reference: delay in getting object\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -263,11 +266,10 @@ vm_object_vndeallocate(object)
|
||||
#endif
|
||||
|
||||
object->ref_count--;
|
||||
if (object->type == OBJT_VNODE) {
|
||||
if (object->ref_count == 0)
|
||||
vp->v_flag &= ~VTEXT;
|
||||
vrele(vp);
|
||||
if (object->ref_count == 0) {
|
||||
vp->v_flag &= ~VTEXT;
|
||||
}
|
||||
vrele(vp);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -296,7 +298,7 @@ vm_object_deallocate(object)
|
||||
}
|
||||
|
||||
if (object->ref_count == 0) {
|
||||
panic("vm_object_deallocate: object deallocated too many times");
|
||||
panic("vm_object_deallocate: object deallocated too many times: %d", object->type);
|
||||
} else if (object->ref_count > 2) {
|
||||
object->ref_count--;
|
||||
return;
|
||||
@ -452,17 +454,17 @@ vm_object_terminate(object)
|
||||
*/
|
||||
vm_pager_deallocate(object);
|
||||
|
||||
simple_lock(&vm_object_list_lock);
|
||||
TAILQ_REMOVE(&vm_object_list, object, object_list);
|
||||
vm_object_count--;
|
||||
simple_unlock(&vm_object_list_lock);
|
||||
|
||||
wakeup(object);
|
||||
|
||||
/*
|
||||
* Free the space for the object.
|
||||
*/
|
||||
zfree(obj_zone, object);
|
||||
if (object->ref_count == 0) {
|
||||
simple_lock(&vm_object_list_lock);
|
||||
TAILQ_REMOVE(&vm_object_list, object, object_list);
|
||||
vm_object_count--;
|
||||
simple_unlock(&vm_object_list_lock);
|
||||
/*
|
||||
* Free the space for the object.
|
||||
*/
|
||||
zfree(obj_zone, object);
|
||||
wakeup(object);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_object.h,v 1.40 1997/12/29 00:24:55 dyson Exp $
|
||||
* $Id: vm_object.h,v 1.41 1998/01/06 05:26:07 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -73,7 +73,7 @@
|
||||
|
||||
#include <sys/queue.h>
|
||||
|
||||
enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE };
|
||||
enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_DEAD };
|
||||
typedef enum obj_type objtype_t;
|
||||
|
||||
/*
|
||||
|
@ -34,7 +34,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_page.c,v 1.84 1997/12/29 00:24:58 dyson Exp $
|
||||
* $Id: vm_page.c,v 1.85 1998/01/12 01:44:41 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -894,18 +894,21 @@ vm_page_alloc(object, pindex, page_req)
|
||||
(cnt.v_free_count < cnt.v_pageout_free_min))
|
||||
pagedaemon_wakeup();
|
||||
|
||||
if (((page_req == VM_ALLOC_NORMAL) || (page_req == VM_ALLOC_ZERO)) &&
|
||||
oldobject &&
|
||||
((oldobject->type == OBJT_VNODE) &&
|
||||
(oldobject->ref_count == 0) &&
|
||||
(oldobject->resident_page_count == 0))) {
|
||||
s = splvm();
|
||||
if ((qtype == PQ_CACHE) &&
|
||||
((page_req == VM_ALLOC_NORMAL) || (page_req == VM_ALLOC_ZERO)) &&
|
||||
oldobject && (oldobject->type == OBJT_VNODE) &&
|
||||
((oldobject->flags & OBJ_DEAD) == 0)) {
|
||||
struct vnode *vp;
|
||||
vp = (struct vnode *) oldobject->handle;
|
||||
if (VSHOULDFREE(vp)) {
|
||||
vm_object_reference(oldobject);
|
||||
vm_object_vndeallocate(oldobject);
|
||||
if (vp && VSHOULDFREE(vp)) {
|
||||
if ((vp->v_flag & (VFREE|VTBFREE|VDOOMED)) == 0) {
|
||||
TAILQ_INSERT_TAIL(&vnode_tobefree_list, vp, v_freelist);
|
||||
vp->v_flag |= VTBFREE;
|
||||
}
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
|
||||
return (m);
|
||||
}
|
||||
@ -970,6 +973,10 @@ static int
|
||||
vm_page_freechk_and_unqueue(m)
|
||||
vm_page_t m;
|
||||
{
|
||||
vm_object_t oldobject;
|
||||
|
||||
oldobject = m->object;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (m->busy ||
|
||||
(m->flags & PG_BUSY) ||
|
||||
@ -999,6 +1006,18 @@ vm_page_freechk_and_unqueue(m)
|
||||
cnt.v_wire_count--;
|
||||
}
|
||||
|
||||
if (oldobject && (oldobject->type == OBJT_VNODE) &&
|
||||
((oldobject->flags & OBJ_DEAD) == 0)) {
|
||||
struct vnode *vp;
|
||||
vp = (struct vnode *) oldobject->handle;
|
||||
if (vp && VSHOULDFREE(vp)) {
|
||||
if ((vp->v_flag & (VTBFREE|VDOOMED|VFREE)) == 0) {
|
||||
TAILQ_INSERT_TAIL(&vnode_tobefree_list, vp, v_freelist);
|
||||
vp->v_flag |= VTBFREE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1068,6 +1087,7 @@ vm_page_free(m)
|
||||
} else {
|
||||
TAILQ_INSERT_HEAD(pq->pl, m, pageq);
|
||||
}
|
||||
|
||||
vm_page_free_wakeup();
|
||||
splx(s);
|
||||
}
|
||||
|
@ -65,7 +65,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_pageout.c,v 1.106 1998/01/06 05:26:11 dyson Exp $
|
||||
* $Id: vm_pageout.c,v 1.107 1998/01/12 01:44:44 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -594,19 +594,24 @@ vm_pageout_map_deactivate_pages(map, desired)
|
||||
|
||||
void
|
||||
vm_pageout_page_free(vm_page_t m) {
|
||||
vm_object_t objref = NULL;
|
||||
struct vnode *vp;
|
||||
vm_object_t object;
|
||||
|
||||
m->flags |= PG_BUSY;
|
||||
if (m->object->type == OBJT_VNODE) {
|
||||
objref = m->object;
|
||||
vm_object_reference(objref);
|
||||
object = m->object;
|
||||
vp = NULL;
|
||||
|
||||
object->ref_count++;
|
||||
if (object->type == OBJT_VNODE) {
|
||||
vp = object->handle;
|
||||
vp->v_usecount++;
|
||||
if (VSHOULDBUSY(vp))
|
||||
vbusy(vp);
|
||||
}
|
||||
m->flags |= PG_BUSY;
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
PAGE_WAKEUP(m);
|
||||
vm_page_free(m);
|
||||
if (objref) {
|
||||
vm_object_vndeallocate(objref);
|
||||
}
|
||||
vm_object_deallocate(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -776,10 +781,10 @@ vm_pageout_scan()
|
||||
continue;
|
||||
}
|
||||
|
||||
if (object->type == OBJT_VNODE) {
|
||||
if (object->type == OBJT_VNODE && (object->flags & OBJ_DEAD) == 0) {
|
||||
vp = object->handle;
|
||||
if (VOP_ISLOCKED(vp) ||
|
||||
vget(vp, LK_EXCLUSIVE, curproc)) {
|
||||
vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
|
||||
if ((m->queue == PQ_INACTIVE) &&
|
||||
(m->hold_count == 0) &&
|
||||
(m->busy == 0) &&
|
||||
|
@ -38,7 +38,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
|
||||
* $Id: vnode_pager.c,v 1.78 1997/12/29 00:25:11 dyson Exp $
|
||||
* $Id: vnode_pager.c,v 1.79 1998/01/06 05:26:17 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -178,7 +178,6 @@ vnode_pager_dealloc(object)
|
||||
splx(s);
|
||||
}
|
||||
|
||||
object->flags |= OBJ_DEAD;
|
||||
object->handle = NULL;
|
||||
object->type = OBJT_DEFAULT;
|
||||
vp->v_object = NULL;
|
||||
@ -200,6 +199,9 @@ vnode_pager_haspage(object, pindex, before, after)
|
||||
int bsize;
|
||||
int pagesperblock, blocksperpage;
|
||||
|
||||
if ((vp == NULL) || (vp->v_flag & VDOOMED))
|
||||
return FALSE;
|
||||
|
||||
/*
|
||||
* If filesystem no longer mounted or offset beyond end of file we do
|
||||
* not have the page.
|
||||
@ -892,9 +894,13 @@ vnode_pager_lock(object)
|
||||
for (; object != NULL; object = object->backing_object) {
|
||||
if (object->type != OBJT_VNODE)
|
||||
continue;
|
||||
if (object->flags & OBJ_DEAD)
|
||||
return NULL;
|
||||
|
||||
vn_lock(object->handle,
|
||||
LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p);
|
||||
while (vget(object->handle,
|
||||
LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) {
|
||||
printf("vnode_pager_lock: retrying\n");
|
||||
}
|
||||
return object->handle;
|
||||
}
|
||||
return NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user