1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-04 09:09:56 +00:00

Add three new VOPs: VOP_CREATEVOBJECT, VOP_DESTROYVOBJECT and VOP_GETVOBJECT.

They will be used by nullfs and other stacked filesystems to support full
cache coherency.

Reviewed in general by:	mckusick, dillon
This commit is contained in:
Boris Popov 2000-09-12 09:49:08 +00:00
parent 6ce5d87513
commit 9ff5ce6baf
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=65770
13 changed files with 201 additions and 166 deletions

View File

@ -179,7 +179,7 @@ exec_aout_imgact(imgp)
vp = imgp->vp;
map = &vmspace->vm_map;
vm_map_lock(map);
object = vp->v_object;
VOP_GETVOBJECT(vp, &object);
vm_object_reference(object);
text_end = virtual_offset + a_out->a_text;

View File

@ -187,7 +187,7 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
vm_offset_t file_addr;
vm_offset_t data_buf = 0;
object = vp->v_object;
VOP_GETVOBJECT(vp, &object);
error = 0;
/*

View File

@ -401,7 +401,7 @@ exec_map_first_page(imgp)
exec_unmap_first_page(imgp);
}
object = imgp->vp->v_object;
VOP_GETVOBJECT(imgp->vp, &object);
s = splvm();
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);

View File

@ -1434,8 +1434,7 @@ sendfile(struct proc *p, struct sendfile_args *uap)
}
vp = (struct vnode *)fp->f_data;
vref(vp);
obj = vp->v_object;
if (vp->v_type != VREG || obj == NULL) {
if (vp->v_type != VREG || VOP_GETVOBJECT(vp, &obj) != 0) {
error = EINVAL;
goto done;
}

View File

@ -1027,7 +1027,7 @@ brelse(struct buf * bp)
vm_page_flag_clear(m, PG_ZERO);
if (m == bogus_page) {
obj = (vm_object_t) vp->v_object;
VOP_GETVOBJECT(vp, &obj);
poff = OFF_TO_IDX(bp->b_offset);
for (j = i; j < bp->b_npages; j++) {
@ -1905,10 +1905,9 @@ inmem(struct vnode * vp, daddr_t blkno)
return 1;
if (vp->v_mount == NULL)
return 0;
if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0)
if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_flag & VOBJBUF) == 0)
return 0;
obj = vp->v_object;
size = PAGE_SIZE;
if (size > vp->v_mount->mnt_stat.f_iosize)
size = vp->v_mount->mnt_stat.f_iosize;
@ -2193,7 +2192,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
bsize = size;
offset = (off_t)blkno * bsize;
vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
vmio = (VOP_GETVOBJECT(vp, NULL) == 0) && (vp->v_flag & VOBJBUF);
maxsize = vmio ? size + (offset & PAGE_MASK) : size;
maxsize = imax(maxsize, bsize);
@ -2462,7 +2461,7 @@ allocbuf(struct buf *bp, int size)
*/
vp = bp->b_vp;
obj = vp->v_object;
VOP_GETVOBJECT(vp, &obj);
while (bp->b_npages < desiredpages) {
vm_page_t m;
@ -2641,7 +2640,7 @@ bufdonebio(struct bio *bp)
void
bufdone(struct buf *bp)
{
int s;
int s, error;
void (*biodone) __P((struct buf *));
s = splbio();
@ -2680,14 +2679,14 @@ bufdone(struct buf *bp)
int iosize;
struct vnode *vp = bp->b_vp;
obj = vp->v_object;
error = VOP_GETVOBJECT(vp, &obj);
#if defined(VFS_BIO_DEBUG)
if (vp->v_usecount == 0) {
panic("biodone: zero vnode ref count");
}
if (vp->v_object == NULL) {
if (error) {
panic("biodone: missing VM object");
}
@ -2700,7 +2699,7 @@ bufdone(struct buf *bp)
KASSERT(bp->b_offset != NOOFFSET,
("biodone: no buffer offset"));
if (!obj) {
if (error) {
panic("biodone: no object");
}
#if defined(VFS_BIO_DEBUG)
@ -2821,7 +2820,9 @@ vfs_unbusy_pages(struct buf * bp)
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
vm_object_t obj = vp->v_object;
vm_object_t obj;
VOP_GETVOBJECT(vp, &obj);
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
@ -2898,9 +2899,10 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
vm_object_t obj = vp->v_object;
vm_object_t obj;
vm_ooffset_t foff;
VOP_GETVOBJECT(vp, &obj);
foff = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));

View File

@ -43,6 +43,7 @@
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
@ -51,6 +52,18 @@
#include <sys/vnode.h>
#include <sys/poll.h>
#include <machine/limits.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
#include <vm/vm_zone.h>
static int vop_nostrategy __P((struct vop_strategy_args *));
/*
@ -67,7 +80,10 @@ static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
{ &vop_advlock_desc, (vop_t *) vop_einval },
{ &vop_bwrite_desc, (vop_t *) vop_stdbwrite },
{ &vop_close_desc, (vop_t *) vop_null },
{ &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject },
{ &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject },
{ &vop_fsync_desc, (vop_t *) vop_null },
{ &vop_getvobject_desc, (vop_t *) vop_stdgetvobject },
{ &vop_inactive_desc, (vop_t *) vop_stdinactive },
{ &vop_ioctl_desc, (vop_t *) vop_enotty },
{ &vop_islocked_desc, (vop_t *) vop_noislocked },
@ -522,6 +538,106 @@ vop_stdgetwritemount(ap)
return (0);
}
int
vop_stdcreatevobject(ap)
struct vop_createvobject_args /* {
struct vnode *vp;
struct ucred *cred;
struct proc *p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct ucred *cred = ap->a_cred;
struct proc *p = ap->a_p;
struct vattr vat;
vm_object_t object;
int error = 0;
if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
return (0);
retry:
if ((object = vp->v_object) == NULL) {
if (vp->v_type == VREG || vp->v_type == VDIR) {
if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
goto retn;
object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
} else if (devsw(vp->v_rdev) != NULL) {
/*
* This simply allocates the biggest object possible
* for a disk vnode. This should be fixed, but doesn't
* cause any problems (yet).
*/
object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
} else {
goto retn;
}
/*
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
*/
object->ref_count--;
vp->v_usecount--;
} else {
if (object->flags & OBJ_DEAD) {
VOP_UNLOCK(vp, 0, p);
tsleep(object, PVM, "vodead", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
goto retry;
}
}
KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
vp->v_flag |= VOBJBUF;
retn:
return (error);
}
int
vop_stddestroyvobject(ap)
struct vop_destroyvobject_args /* {
struct vnode *vp;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
vm_object_t obj = vp->v_object;
if (vp->v_object == NULL)
return (0);
if (obj->ref_count == 0) {
/*
* vclean() may be called twice. The first time
* removes the primary reference to the object,
* the second time goes one further and is a
* special-case to terminate the object.
*/
vm_object_terminate(obj);
} else {
/*
* Woe to the process that tries to page now :-).
*/
vm_pager_deallocate(obj);
}
return (0);
}
int
vop_stdgetvobject(ap)
struct vop_getvobject_args /* {
struct vnode *vp;
struct vm_object **objpp;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct vm_object **objpp = ap->a_objpp;
if (objpp)
*objpp = vp->v_object;
return (vp->v_object ? 0 : EINVAL);
}
/*
* vfs default ops
* used to fill the vfs fucntion table to get reasonable default return values.

View File

@ -484,10 +484,9 @@ getnewvnode(tag, mp, vops, vpp)
* if it still has cached pages or we cannot get
* its interlock.
*/
object = vp->v_object;
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(object && (object->resident_page_count ||
object->ref_count)) ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
!simple_lock_try(&vp->v_interlock)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
@ -711,8 +710,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
* Destroy the copy in the VM cache, too.
*/
simple_lock(&vp->v_interlock);
object = vp->v_object;
if (object != NULL) {
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
@ -1649,7 +1647,6 @@ vclean(vp, flags, p)
struct proc *p;
{
int active;
vm_object_t obj;
/*
* Check to see if the vnode is in use. If so we have to reference it
@ -1686,22 +1683,7 @@ vclean(vp, flags, p)
vinvalbuf(vp, 0, NOCRED, p, 0, 0);
}
if ((obj = vp->v_object) != NULL) {
if (obj->ref_count == 0) {
/*
* vclean() may be called twice. The first time
* removes the primary reference to the object,
* the second time goes one further and is a
* special-case to terminate the object.
*/
vm_object_terminate(obj);
} else {
/*
* Woe to the process that tries to page now :-).
*/
vm_pager_deallocate(obj);
}
}
VOP_DESTROYVOBJECT(vp);
/*
* If purging an active vnode, it must be closed and
@ -2523,20 +2505,20 @@ vfs_msync(struct mount *mp, int flags) {
continue;
if (flags != MNT_WAIT) {
obj = vp->v_object;
if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
if (VOP_GETVOBJECT(vp, &obj) != 0 ||
(obj->flags & OBJ_MIGHTBEDIRTY) == 0)
continue;
if (VOP_ISLOCKED(vp, NULL))
continue;
}
simple_lock(&vp->v_interlock);
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
if (vp->v_object) {
vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
vm_object_page_clean(obj, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
anyio = 1;
}
vput(vp);
@ -2563,49 +2545,7 @@ vfs_object_create(vp, p, cred)
struct proc *p;
struct ucred *cred;
{
struct vattr vat;
vm_object_t object;
int error = 0;
if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
return 0;
retry:
if ((object = vp->v_object) == NULL) {
if (vp->v_type == VREG || vp->v_type == VDIR) {
if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
goto retn;
object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
} else if (devsw(vp->v_rdev) != NULL) {
/*
* This simply allocates the biggest object possible
* for a disk vnode. This should be fixed, but doesn't
* cause any problems (yet).
*/
object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
} else {
goto retn;
}
/*
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
*/
object->ref_count--;
vp->v_usecount--;
} else {
if (object->flags & OBJ_DEAD) {
VOP_UNLOCK(vp, 0, p);
tsleep(object, PVM, "vodead", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
goto retry;
}
}
KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
vp->v_flag |= VOBJBUF;
retn:
return error;
return (VOP_CREATEVOBJECT(vp, cred, p));
}
void

View File

@ -1068,7 +1068,7 @@ open(p, uap)
goto bad;
}
/* assert that vn_open created a backing object if one is needed */
KASSERT(!vn_canvmio(vp) || vp->v_object != NULL,
KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
("open: vmio vnode has no backing object after vn_open"));
p->p_retval[0] = indx;
return (0);
@ -2637,6 +2637,7 @@ fsync(p, uap)
struct vnode *vp;
struct mount *mp;
struct file *fp;
vm_object_t obj;
int error;
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
@ -2645,8 +2646,8 @@ fsync(p, uap)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (vp->v_object)
vm_object_page_clean(vp->v_object, 0, 0, 0);
if (VOP_GETVOBJECT(vp, &obj) == 0)
vm_object_page_clean(obj, 0, 0, 0);
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef SOFTUPDATES
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
@ -3415,7 +3416,7 @@ fhopen(p, uap)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
fp->f_flag |= FHASLOCK;
}
if ((vp->v_type == VREG) && (vp->v_object == NULL))
if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
vfs_object_create(vp, p, p->p_ucred);
VOP_UNLOCK(vp, 0, p);

View File

@ -484,10 +484,9 @@ getnewvnode(tag, mp, vops, vpp)
* if it still has cached pages or we cannot get
* its interlock.
*/
object = vp->v_object;
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(object && (object->resident_page_count ||
object->ref_count)) ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
!simple_lock_try(&vp->v_interlock)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
@ -711,8 +710,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
* Destroy the copy in the VM cache, too.
*/
simple_lock(&vp->v_interlock);
object = vp->v_object;
if (object != NULL) {
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
@ -1649,7 +1647,6 @@ vclean(vp, flags, p)
struct proc *p;
{
int active;
vm_object_t obj;
/*
* Check to see if the vnode is in use. If so we have to reference it
@ -1686,22 +1683,7 @@ vclean(vp, flags, p)
vinvalbuf(vp, 0, NOCRED, p, 0, 0);
}
if ((obj = vp->v_object) != NULL) {
if (obj->ref_count == 0) {
/*
* vclean() may be called twice. The first time
* removes the primary reference to the object,
* the second time goes one further and is a
* special-case to terminate the object.
*/
vm_object_terminate(obj);
} else {
/*
* Woe to the process that tries to page now :-).
*/
vm_pager_deallocate(obj);
}
}
VOP_DESTROYVOBJECT(vp);
/*
* If purging an active vnode, it must be closed and
@ -2523,20 +2505,20 @@ vfs_msync(struct mount *mp, int flags) {
continue;
if (flags != MNT_WAIT) {
obj = vp->v_object;
if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
if (VOP_GETVOBJECT(vp, &obj) != 0 ||
(obj->flags & OBJ_MIGHTBEDIRTY) == 0)
continue;
if (VOP_ISLOCKED(vp, NULL))
continue;
}
simple_lock(&vp->v_interlock);
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
if (vp->v_object) {
vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
vm_object_page_clean(obj, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
anyio = 1;
}
vput(vp);
@ -2563,49 +2545,7 @@ vfs_object_create(vp, p, cred)
struct proc *p;
struct ucred *cred;
{
struct vattr vat;
vm_object_t object;
int error = 0;
if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
return 0;
retry:
if ((object = vp->v_object) == NULL) {
if (vp->v_type == VREG || vp->v_type == VDIR) {
if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
goto retn;
object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
} else if (devsw(vp->v_rdev) != NULL) {
/*
* This simply allocates the biggest object possible
* for a disk vnode. This should be fixed, but doesn't
* cause any problems (yet).
*/
object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
} else {
goto retn;
}
/*
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
*/
object->ref_count--;
vp->v_usecount--;
} else {
if (object->flags & OBJ_DEAD) {
VOP_UNLOCK(vp, 0, p);
tsleep(object, PVM, "vodead", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
goto retry;
}
}
KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
vp->v_flag |= VOBJBUF;
retn:
return error;
return (VOP_CREATEVOBJECT(vp, cred, p));
}
void

View File

@ -1068,7 +1068,7 @@ open(p, uap)
goto bad;
}
/* assert that vn_open created a backing object if one is needed */
KASSERT(!vn_canvmio(vp) || vp->v_object != NULL,
KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
("open: vmio vnode has no backing object after vn_open"));
p->p_retval[0] = indx;
return (0);
@ -2637,6 +2637,7 @@ fsync(p, uap)
struct vnode *vp;
struct mount *mp;
struct file *fp;
vm_object_t obj;
int error;
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
@ -2645,8 +2646,8 @@ fsync(p, uap)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (vp->v_object)
vm_object_page_clean(vp->v_object, 0, 0, 0);
if (VOP_GETVOBJECT(vp, &obj) == 0)
vm_object_page_clean(obj, 0, 0, 0);
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef SOFTUPDATES
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
@ -3415,7 +3416,7 @@ fhopen(p, uap)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
fp->f_flag |= FHASLOCK;
}
if ((vp->v_type == VREG) && (vp->v_object == NULL))
if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
vfs_object_create(vp, p, p->p_ucred);
VOP_UNLOCK(vp, 0, p);

View File

@ -555,3 +555,27 @@ vop_setextattr {
IN struct ucred *cred;
IN struct proc *p;
};
#
#% createvobject vp L L L
#
vop_createvobject {
IN struct vnode *vp;
IN struct ucred *cred;
IN struct proc *p;
};
#
#% destroyvobject vp L L L
#
vop_destroyvobject {
IN struct vnode *vp;
};
#
#% getvobject vp L L L
#
vop_getvobject {
IN struct vnode *vp;
OUT struct vm_object **objpp;
};

View File

@ -629,6 +629,9 @@ int vop_enotty __P((struct vop_generic_args *ap));
int vop_defaultop __P((struct vop_generic_args *ap));
int vop_null __P((struct vop_generic_args *ap));
int vop_panic __P((struct vop_generic_args *ap));
int vop_stdcreatevobject __P((struct vop_createvobject_args *ap));
int vop_stddestroyvobject __P((struct vop_destroyvobject_args *ap));
int vop_stdgetvobject __P((struct vop_getvobject_args *ap));
void vfree __P((struct vnode *));
void vput __P((struct vnode *vp));

View File

@ -198,6 +198,7 @@ mmap(p, uap)
int disablexworkaround;
off_t pos;
struct vmspace *vms = p->p_vmspace;
vm_object_t obj;
addr = (vm_offset_t) uap->addr;
size = uap->len;
@ -295,6 +296,14 @@ mmap(p, uap)
vp = (struct vnode *) fp->f_data;
if (vp->v_type != VREG && vp->v_type != VCHR)
return (EINVAL);
if (vp->v_type == VREG) {
/*
* Get the proper underlying object
*/
if (VOP_GETVOBJECT(vp, &obj) != 0)
return (EINVAL);
vp = (struct vnode*)obj->handle;
}
/*
* XXX hack to handle use of /dev/zero to map anon memory (ala
* SunOS).