1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1989, 1991, 1993, 1994
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1997-02-10 02:22:35 +00:00
|
|
|
* @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
|
1998-05-06 05:29:41 +00:00
|
|
|
* $Id: ffs_vfsops.c,v 1.80 1998/04/20 03:57:41 julian Exp $
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1998-04-20 03:57:41 +00:00
|
|
|
#include "opt_devfs.h" /* for SLICE */
|
1996-01-05 18:31:58 +00:00
|
|
|
#include "opt_quota.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/buf.h>
|
1997-09-27 13:40:20 +00:00
|
|
|
#include <sys/conf.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/disklabel.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
|
|
|
|
#include <miscfs/specfs/specdev.h>
|
|
|
|
|
|
|
|
#include <ufs/ufs/quota.h>
|
|
|
|
#include <ufs/ufs/ufsmount.h>
|
|
|
|
#include <ufs/ufs/inode.h>
|
|
|
|
#include <ufs/ufs/ufs_extern.h>
|
|
|
|
|
|
|
|
#include <ufs/ffs/fs.h>
|
|
|
|
#include <ufs/ffs/ffs_extern.h>
|
|
|
|
|
1995-04-09 06:03:56 +00:00
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_prot.h>
|
1995-04-09 06:03:56 +00:00
|
|
|
#include <vm/vm_page.h>
|
1996-08-21 21:56:23 +00:00
|
|
|
#include <vm/vm_extern.h>
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
#include <vm/vm_object.h>
|
1995-04-09 06:03:56 +00:00
|
|
|
|
1997-10-12 20:26:33 +00:00
|
|
|
static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part");
|
1997-10-11 18:31:40 +00:00
|
|
|
|
1995-12-17 21:14:36 +00:00
|
|
|
static int ffs_sbupdate __P((struct ufsmount *, int));
|
|
|
|
static int ffs_reload __P((struct mount *,struct ucred *,struct proc *));
|
|
|
|
static int ffs_oldfscompat __P((struct fs *));
|
1997-02-10 02:22:35 +00:00
|
|
|
static int ffs_mount __P((struct mount *, char *, caddr_t,
|
|
|
|
struct nameidata *, struct proc *));
|
|
|
|
static int ffs_init __P((struct vfsconf *));
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static struct vfsops ufs_vfsops = {
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_mount,
|
|
|
|
ufs_start,
|
|
|
|
ffs_unmount,
|
|
|
|
ufs_root,
|
|
|
|
ufs_quotactl,
|
|
|
|
ffs_statfs,
|
|
|
|
ffs_sync,
|
|
|
|
ffs_vget,
|
|
|
|
ffs_fhtovp,
|
|
|
|
ffs_vptofh,
|
|
|
|
ffs_init,
|
|
|
|
};
|
|
|
|
|
1994-09-22 01:57:27 +00:00
|
|
|
VFS_SET(ufs_vfsops, ufs, MOUNT_UFS, 0);
|
1994-09-21 03:47:43 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1995-08-28 09:19:25 +00:00
|
|
|
* ffs_mount
|
|
|
|
*
|
|
|
|
* Called when mounting local physical media
|
|
|
|
*
|
|
|
|
* PARAMETERS:
|
|
|
|
* mountroot
|
|
|
|
* mp mount point structure
|
|
|
|
* path NULL (flag for root mount!!!)
|
|
|
|
* data <unused>
|
|
|
|
* ndp <unused>
|
|
|
|
* p process (user credentials check [statfs])
|
|
|
|
*
|
|
|
|
* mount
|
|
|
|
* mp mount point structure
|
|
|
|
* path path to mount point
|
|
|
|
* data pointer to argument struct in user space
|
|
|
|
* ndp mount point namei() return (used for
|
|
|
|
* credentials on reload), reused to look
|
|
|
|
* up block device.
|
|
|
|
* p process (user credentials check)
|
|
|
|
*
|
|
|
|
* RETURNS: 0 Success
|
|
|
|
* !0 error number (errno.h)
|
|
|
|
*
|
|
|
|
* LOCK STATE:
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
1995-08-28 09:19:25 +00:00
|
|
|
* ENTRY
|
|
|
|
* mount point is locked
|
|
|
|
* EXIT
|
|
|
|
* mount point is locked
|
|
|
|
*
|
|
|
|
* NOTES:
|
|
|
|
* A NULL path can be used for a flag since the mount
|
|
|
|
* system call will fail with EFAULT in copyinstr in
|
|
|
|
* namei() if it is a genuine NULL from the user.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1998-04-19 23:32:49 +00:00
|
|
|
#ifdef SLICE
|
|
|
|
extern struct vnode *root_device_vnode;
|
|
|
|
#endif
|
1995-12-17 21:14:36 +00:00
|
|
|
static int
|
1995-08-28 09:19:25 +00:00
|
|
|
ffs_mount( mp, path, data, ndp, p)
|
1997-02-10 02:22:35 +00:00
|
|
|
struct mount *mp; /* mount struct pointer*/
|
1995-08-28 09:19:25 +00:00
|
|
|
char *path; /* path to mount point*/
|
|
|
|
caddr_t data; /* arguments to FS specific mount*/
|
|
|
|
struct nameidata *ndp; /* mount point credentials*/
|
|
|
|
struct proc *p; /* process requesting mount*/
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1995-08-28 09:19:25 +00:00
|
|
|
u_int size;
|
|
|
|
int err = 0;
|
|
|
|
struct vnode *devvp;
|
|
|
|
|
|
|
|
struct ufs_args args;
|
|
|
|
struct ufsmount *ump = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct fs *fs;
|
1998-02-25 04:47:04 +00:00
|
|
|
int error, flags;
|
|
|
|
mode_t accessmode;
|
1995-05-30 08:16:23 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1995-08-28 09:19:25 +00:00
|
|
|
* Use NULL path to flag a root mount
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-08-28 09:19:25 +00:00
|
|
|
if( path == NULL) {
|
|
|
|
/*
|
|
|
|
***
|
|
|
|
* Mounting root file system
|
|
|
|
***
|
|
|
|
*/
|
|
|
|
|
1998-04-19 23:32:49 +00:00
|
|
|
#ifdef SLICE
|
|
|
|
rootvp = root_device_vnode;
|
|
|
|
if (rootvp == NULL) {
|
|
|
|
printf("ffs_mountroot: rootvp not set");
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
#else /* !SLICE */
|
1997-09-07 16:21:11 +00:00
|
|
|
if ((err = bdevvp(rootdev, &rootvp))) {
|
|
|
|
printf("ffs_mountroot: can't find rootvp");
|
1997-02-10 02:22:35 +00:00
|
|
|
return (err);
|
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
|
1997-09-27 13:40:20 +00:00
|
|
|
if (bdevsw[major(rootdev)]->d_flags & D_NOCLUSTERR)
|
|
|
|
mp->mnt_flag |= MNT_NOCLUSTERR;
|
|
|
|
if (bdevsw[major(rootdev)]->d_flags & D_NOCLUSTERW)
|
|
|
|
mp->mnt_flag |= MNT_NOCLUSTERW;
|
1998-04-19 23:32:49 +00:00
|
|
|
#endif /* !SLICE */
|
1997-10-10 18:17:00 +00:00
|
|
|
if( ( err = ffs_mountfs(rootvp, mp, p, M_FFSNODE)) != 0) {
|
1995-08-28 09:19:25 +00:00
|
|
|
/* fs specific cleanup (if any)*/
|
|
|
|
goto error_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto dostatfs; /* success*/
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
***
|
|
|
|
* Mounting non-root file system or updating a file system
|
|
|
|
***
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* copy in user arguments*/
|
|
|
|
err = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
|
|
|
|
if (err)
|
|
|
|
goto error_1; /* can't get arguments*/
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If updating, check whether changing from read-only to
|
|
|
|
* read/write; if there is no device name, that's all we do.
|
1997-09-27 13:40:20 +00:00
|
|
|
* Disallow clearing MNT_NOCLUSTERR and MNT_NOCLUSTERW flags,
|
|
|
|
* if block device requests.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
1998-03-27 14:20:57 +00:00
|
|
|
devvp = ump->um_devvp;
|
1995-08-28 09:19:25 +00:00
|
|
|
err = 0;
|
1997-09-27 13:40:20 +00:00
|
|
|
if (bdevsw[major(ump->um_dev)]->d_flags & D_NOCLUSTERR)
|
|
|
|
mp->mnt_flag |= MNT_NOCLUSTERR;
|
|
|
|
if (bdevsw[major(ump->um_dev)]->d_flags & D_NOCLUSTERW)
|
|
|
|
mp->mnt_flag |= MNT_NOCLUSTERW;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
|
|
|
|
flags = WRITECLOSE;
|
|
|
|
if (mp->mnt_flag & MNT_FORCE)
|
|
|
|
flags |= FORCECLOSE;
|
1998-03-08 09:59:44 +00:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP) {
|
|
|
|
err = softdep_flushfiles(mp, flags, p);
|
|
|
|
} else {
|
|
|
|
err = ffs_flushfiles(mp, flags, p);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
if (!err && (mp->mnt_flag & MNT_RELOAD))
|
|
|
|
err = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
|
|
|
|
if (err) {
|
|
|
|
goto error_1;
|
|
|
|
}
|
1997-11-12 05:42:33 +00:00
|
|
|
if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
|
1995-05-15 08:39:37 +00:00
|
|
|
if (!fs->fs_clean) {
|
|
|
|
if (mp->mnt_flag & MNT_FORCE) {
|
|
|
|
printf("WARNING: %s was not properly dismounted.\n",fs->fs_fsmnt);
|
|
|
|
} else {
|
|
|
|
printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck.\n",
|
|
|
|
fs->fs_fsmnt);
|
1995-08-28 09:19:25 +00:00
|
|
|
err = EPERM;
|
|
|
|
goto error_1;
|
1995-05-15 08:39:37 +00:00
|
|
|
}
|
|
|
|
}
|
1998-02-25 04:47:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If upgrade to read-write by non-root, then verify
|
|
|
|
* that user has necessary permissions on the device.
|
|
|
|
*/
|
|
|
|
if (p->p_ucred->cr_uid != 0) {
|
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
|
|
|
|
if (error = VOP_ACCESS(devvp, VREAD | VWRITE,
|
|
|
|
p->p_ucred, p)) {
|
|
|
|
VOP_UNLOCK(devvp, 0, p);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
VOP_UNLOCK(devvp, 0, p);
|
|
|
|
}
|
|
|
|
|
1998-03-27 14:20:57 +00:00
|
|
|
/* check to see if we need to start softdep */
|
|
|
|
if (fs->fs_flags & FS_DOSOFTDEP) {
|
|
|
|
err = softdep_mount(devvp, mp, fs, p->p_ucred);
|
|
|
|
if (err)
|
|
|
|
goto error_1;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
fs->fs_ronly = 0;
|
1995-05-15 08:39:37 +00:00
|
|
|
}
|
1994-08-20 16:03:26 +00:00
|
|
|
if (fs->fs_ronly == 0) {
|
|
|
|
fs->fs_clean = 0;
|
|
|
|
ffs_sbupdate(ump, MNT_WAIT);
|
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
/* if not updating name...*/
|
1994-05-24 10:09:53 +00:00
|
|
|
if (args.fspec == 0) {
|
|
|
|
/*
|
1995-08-28 09:19:25 +00:00
|
|
|
* Process export requests. Jumping to "success"
|
|
|
|
* will return the vfs_export() error code.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-08-28 09:19:25 +00:00
|
|
|
err = vfs_export(mp, &ump->um_export, &args.export);
|
|
|
|
goto success;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Not an update, or updating the name: look up the name
|
|
|
|
* and verify that it refers to a sensible block device.
|
|
|
|
*/
|
|
|
|
NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
|
1995-08-28 09:19:25 +00:00
|
|
|
err = namei(ndp);
|
|
|
|
if (err) {
|
|
|
|
/* can't get devvp!*/
|
|
|
|
goto error_1;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
devvp = ndp->ni_vp;
|
|
|
|
|
|
|
|
if (devvp->v_type != VBLK) {
|
1995-08-28 09:19:25 +00:00
|
|
|
err = ENOTBLK;
|
|
|
|
goto error_2;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
if (major(devvp->v_rdev) >= nblkdev) {
|
1995-08-28 09:19:25 +00:00
|
|
|
err = ENXIO;
|
|
|
|
goto error_2;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1998-02-25 04:47:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If mount by non-root, then verify that user has necessary
|
|
|
|
* permissions on the device.
|
|
|
|
*/
|
|
|
|
if (p->p_ucred->cr_uid != 0) {
|
|
|
|
accessmode = VREAD;
|
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
|
|
|
accessmode |= VWRITE;
|
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
|
|
|
|
if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) {
|
|
|
|
vput(devvp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
VOP_UNLOCK(devvp, 0, p);
|
|
|
|
}
|
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
|
|
|
/*
|
|
|
|
********************
|
|
|
|
* UPDATE
|
1998-04-19 23:32:49 +00:00
|
|
|
* If it's not the same vnode, or at least the same device
|
|
|
|
* then it's not correct.
|
1995-08-28 09:19:25 +00:00
|
|
|
********************
|
|
|
|
*/
|
|
|
|
|
1998-04-19 23:32:49 +00:00
|
|
|
if (devvp != ump->um_devvp) {
|
|
|
|
if ( devvp->v_rdev == ump->um_devvp->v_rdev) {
|
|
|
|
vrele(devvp);
|
|
|
|
} else {
|
|
|
|
err = EINVAL; /* needs translation */
|
|
|
|
}
|
|
|
|
} else
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(devvp);
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
* Update device name only on success
|
|
|
|
*/
|
|
|
|
if( !err) {
|
|
|
|
/* Save "mounted from" info for mount point (NULL pad)*/
|
|
|
|
copyinstr( args.fspec,
|
|
|
|
mp->mnt_stat.f_mntfromname,
|
|
|
|
MNAMELEN - 1,
|
|
|
|
&size);
|
|
|
|
bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
********************
|
|
|
|
* NEW MOUNT
|
|
|
|
********************
|
|
|
|
*/
|
|
|
|
|
1997-09-27 13:40:20 +00:00
|
|
|
if (bdevsw[major(devvp->v_rdev)]->d_flags & D_NOCLUSTERR)
|
|
|
|
mp->mnt_flag |= MNT_NOCLUSTERR;
|
|
|
|
if (bdevsw[major(devvp->v_rdev)]->d_flags & D_NOCLUSTERW)
|
|
|
|
mp->mnt_flag |= MNT_NOCLUSTERW;
|
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
* Since this is a new mount, we want the names for
|
|
|
|
* the device and the mount point copied in. If an
|
|
|
|
* error occurs, the mountpoint is discarded by the
|
|
|
|
* upper level code.
|
|
|
|
*/
|
|
|
|
/* Save "last mounted on" info for mount point (NULL pad)*/
|
|
|
|
copyinstr( path, /* mount point*/
|
|
|
|
mp->mnt_stat.f_mntonname, /* save area*/
|
|
|
|
MNAMELEN - 1, /* max size*/
|
|
|
|
&size); /* real size*/
|
|
|
|
bzero( mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
|
|
|
|
|
|
|
|
/* Save "mounted from" info for mount point (NULL pad)*/
|
|
|
|
copyinstr( args.fspec, /* device name*/
|
|
|
|
mp->mnt_stat.f_mntfromname, /* save area*/
|
|
|
|
MNAMELEN - 1, /* max size*/
|
|
|
|
&size); /* real size*/
|
|
|
|
bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
|
|
|
|
|
1997-10-10 18:17:00 +00:00
|
|
|
err = ffs_mountfs(devvp, mp, p, M_FFSNODE);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
if (err) {
|
|
|
|
goto error_2;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
|
|
|
|
dostatfs:
|
|
|
|
/*
|
|
|
|
* Initialize FS stat information in mount struct; uses both
|
|
|
|
* mp->mnt_stat.f_mntonname and mp->mnt_stat.f_mntfromname
|
|
|
|
*
|
|
|
|
* This code is common to root and non-root mounts
|
|
|
|
*/
|
|
|
|
(void)VFS_STATFS(mp, &mp->mnt_stat, p);
|
|
|
|
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
|
|
|
|
error_2: /* error with devvp held*/
|
|
|
|
|
|
|
|
/* release devvp before failing*/
|
|
|
|
vrele(devvp);
|
|
|
|
|
|
|
|
error_1: /* no state to back out*/
|
|
|
|
|
|
|
|
success:
|
|
|
|
return( err);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reload all incore data for a filesystem (used after running fsck on
|
|
|
|
* the root filesystem and finding things to fix). The filesystem must
|
|
|
|
* be mounted read-only.
|
|
|
|
*
|
|
|
|
* Things to do to update the mount:
|
|
|
|
* 1) invalidate all cached meta-data.
|
|
|
|
* 2) re-read superblock from disk.
|
|
|
|
* 3) re-read summary information from disk.
|
|
|
|
* 4) invalidate all inactive vnodes.
|
|
|
|
* 5) invalidate all cached file data.
|
|
|
|
* 6) re-read inode data for all active vnodes.
|
|
|
|
*/
|
1995-12-17 21:14:36 +00:00
|
|
|
static int
|
1995-08-28 09:19:25 +00:00
|
|
|
ffs_reload(mp, cred, p)
|
|
|
|
register struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ucred *cred;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
register struct vnode *vp, *nvp, *devvp;
|
|
|
|
struct inode *ip;
|
|
|
|
struct csum *space;
|
|
|
|
struct buf *bp;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fs *fs, *newfs;
|
|
|
|
struct partinfo dpart;
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
dev_t dev;
|
1994-05-24 10:09:53 +00:00
|
|
|
int i, blks, size, error;
|
1997-02-10 02:22:35 +00:00
|
|
|
int32_t *lp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
|
|
|
/*
|
|
|
|
* Step 1: invalidate all cached meta-data.
|
|
|
|
*/
|
1995-08-28 09:19:25 +00:00
|
|
|
devvp = VFSTOUFS(mp)->um_devvp;
|
1998-03-08 09:59:44 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
|
|
|
|
error = vinvalbuf(devvp, 0, cred, p, 0, 0);
|
|
|
|
VOP_UNLOCK(devvp, 0, p);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("ffs_reload: dirty1");
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
|
|
|
|
dev = devvp->v_rdev;
|
|
|
|
/*
|
|
|
|
* Only VMIO the backing device if the backing device is a real
|
|
|
|
* block device. This excludes the original MFS implementation.
|
|
|
|
* Note that it is optional that the backing device be VMIOed. This
|
|
|
|
* increases the opportunity for metadata caching.
|
|
|
|
*/
|
|
|
|
if ((devvp->v_type == VBLK) && (major(dev) < nblkdev)) {
|
VM level code cleanups.
1) Start using TSM.
Struct procs continue to point to upages structure, after being freed.
Struct vmspace continues to point to pte object and kva space for kstack.
u_map is now superfluous.
2) vm_map's don't need to be reference counted. They always exist either
in the kernel or in a vmspace. The vmspaces are managed by reference
counts.
3) Remove the "wired" vm_map nonsense.
4) No need to keep a cache of kernel stack kva's.
5) Get rid of strange looking ++var, and change to var++.
6) Change more data structures to use our "zone" allocator. Added
struct proc, struct vmspace and struct vnode. This saves a significant
amount of kva space and physical memory. Additionally, this enables
TSM for the zone managed memory.
7) Keep ioopt disabled for now.
8) Remove the now bogus "single use" map concept.
9) Use generation counts or id's for data structures residing in TSM, where
it allows us to avoid unneeded restart overhead during traversals, where
blocking might occur.
10) Account better for memory deficits, so the pageout daemon will be able
to make enough memory available (experimental.)
11) Fix some vnode locking problems. (From Tor, I think.)
12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp.
(experimental.)
13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c
code. Use generation counts, get rid of unneded collpase operations,
and clean up the cluster code.
14) Make vm_zone more suitable for TSM.
This commit is partially as a result of discussions and contributions from
other people, including DG, Tor Egge, PHK, and probably others that I
have forgotten to attribute (so let me know, if I forgot.)
This is not the infamous, final cleanup of the vnode stuff, but a necessary
step. Vnode mgmt should be correct, but things might still change, and
there is still some missing stuff (like ioopt, and physical backing of
non-merged cache files, debugging of layering concepts.)
1998-01-22 17:30:44 +00:00
|
|
|
simple_lock(&devvp->v_interlock);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
vfs_object_create(devvp, p, p->p_ucred, 0);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 2: re-read superblock from disk.
|
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
|
|
|
|
size = DEV_BSIZE;
|
|
|
|
else
|
|
|
|
size = dpart.disklab->d_secsize;
|
|
|
|
if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp))
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
newfs = (struct fs *)bp->b_data;
|
|
|
|
if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
|
|
|
|
newfs->fs_bsize < sizeof(struct fs)) {
|
|
|
|
brelse(bp);
|
|
|
|
return (EIO); /* XXX needs translation */
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
fs = VFSTOUFS(mp)->um_fs;
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Copy pointer fields back into superblock before copying in XXX
|
|
|
|
* new superblock. These should really be in the ufsmount. XXX
|
|
|
|
* Note that important parameters (eg fs_ncg) are unchanged.
|
|
|
|
*/
|
|
|
|
bcopy(&fs->fs_csp[0], &newfs->fs_csp[0], sizeof(fs->fs_csp));
|
|
|
|
newfs->fs_maxcluster = fs->fs_maxcluster;
|
|
|
|
bcopy(newfs, fs, (u_int)fs->fs_sbsize);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (fs->fs_sbsize < SBSIZE)
|
|
|
|
bp->b_flags |= B_INVAL;
|
|
|
|
brelse(bp);
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_oldfscompat(fs);
|
1997-02-10 02:22:35 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 3: re-read summary information from disk.
|
|
|
|
*/
|
|
|
|
blks = howmany(fs->fs_cssize, fs->fs_fsize);
|
|
|
|
space = fs->fs_csp[0];
|
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
1994-10-08 06:20:06 +00:00
|
|
|
error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
|
|
|
|
NOCRED, &bp);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
bcopy(bp->b_data, fs->fs_csp[fragstoblks(fs, i)], (u_int)size);
|
|
|
|
brelse(bp);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* We no longer know anything about clusters per cylinder group.
|
|
|
|
*/
|
|
|
|
if (fs->fs_contigsumsize > 0) {
|
|
|
|
lp = fs->fs_maxcluster;
|
|
|
|
for (i = 0; i < fs->fs_ncg; i++)
|
|
|
|
*lp++ = fs->fs_contigsumsize;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
loop:
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_lock(&mntvnode_slock);
|
1995-08-28 09:19:25 +00:00
|
|
|
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
1997-02-10 02:22:35 +00:00
|
|
|
if (vp->v_mount != mp) {
|
|
|
|
simple_unlock(&mntvnode_slock);
|
|
|
|
goto loop;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
nvp = vp->v_mntvnodes.le_next;
|
|
|
|
/*
|
|
|
|
* Step 4: invalidate all inactive vnodes.
|
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
if (vrecycle(vp, &mntvnode_slock, p))
|
|
|
|
goto loop;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 5: invalidate all cached file data.
|
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_lock(&vp->v_interlock);
|
|
|
|
simple_unlock(&mntvnode_slock);
|
|
|
|
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
goto loop;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vinvalbuf(vp, 0, cred, p, 0, 0))
|
|
|
|
panic("ffs_reload: dirty2");
|
|
|
|
/*
|
|
|
|
* Step 6: re-read inode data for all active vnodes.
|
|
|
|
*/
|
|
|
|
ip = VTOI(vp);
|
1994-10-08 06:20:06 +00:00
|
|
|
error =
|
1994-05-24 10:09:53 +00:00
|
|
|
bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
|
1994-10-08 06:20:06 +00:00
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
ip->i_din = *((struct dinode *)bp->b_data +
|
|
|
|
ino_to_fsbo(fs, ip->i_number));
|
1998-03-08 09:59:44 +00:00
|
|
|
ip->i_effnlink = ip->i_nlink;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
|
|
|
vput(vp);
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common code for mount and mountroot
|
|
|
|
*/
|
|
|
|
int
|
1997-10-10 18:17:00 +00:00
|
|
|
ffs_mountfs(devvp, mp, p, malloctype)
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct vnode *devvp;
|
|
|
|
struct mount *mp;
|
|
|
|
struct proc *p;
|
1997-10-10 18:17:00 +00:00
|
|
|
struct malloc_type *malloctype;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct ufsmount *ump;
|
|
|
|
struct buf *bp;
|
|
|
|
register struct fs *fs;
|
1998-03-08 09:59:44 +00:00
|
|
|
struct cg *cgp;
|
1997-02-10 02:22:35 +00:00
|
|
|
dev_t dev;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct partinfo dpart;
|
1998-03-08 09:59:44 +00:00
|
|
|
struct csum cstotal;
|
1994-05-24 10:09:53 +00:00
|
|
|
caddr_t base, space;
|
1998-03-08 09:59:44 +00:00
|
|
|
int error, i, cyl, blks, size, ronly;
|
1997-02-10 02:22:35 +00:00
|
|
|
int32_t *lp;
|
|
|
|
struct ucred *cred;
|
|
|
|
u_int64_t maxfilesize; /* XXX */
|
1995-08-28 09:19:25 +00:00
|
|
|
u_int strsize;
|
1996-08-21 21:56:23 +00:00
|
|
|
int ncount;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
dev = devvp->v_rdev;
|
|
|
|
cred = p ? p->p_ucred : NOCRED;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Disallow multiple mounts of the same device.
|
|
|
|
* Disallow mounting of a device that is currently in use
|
|
|
|
* (except for root, which might share swap device for miniroot).
|
|
|
|
* Flush out any old buffers remaining from a previous use.
|
|
|
|
*/
|
1994-10-08 06:20:06 +00:00
|
|
|
error = vfs_mountedon(devvp);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1996-08-21 21:56:23 +00:00
|
|
|
ncount = vcount(devvp);
|
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00
|
|
|
|
1996-08-21 21:56:23 +00:00
|
|
|
if (ncount > 1 && devvp != rootvp)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBUSY);
|
1998-03-08 09:59:44 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
|
|
|
|
error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
|
|
|
|
VOP_UNLOCK(devvp, 0, p);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
/*
|
|
|
|
* Only VMIO the backing device if the backing device is a real
|
|
|
|
* block device. This excludes the original MFS implementation.
|
|
|
|
* Note that it is optional that the backing device be VMIOed. This
|
|
|
|
* increases the opportunity for metadata caching.
|
|
|
|
*/
|
|
|
|
if ((devvp->v_type == VBLK) && (major(dev) < nblkdev)) {
|
1998-03-07 14:59:44 +00:00
|
|
|
simple_lock(&devvp->v_interlock);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
vfs_object_create(devvp, p, p->p_ucred, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
|
1994-10-08 06:20:06 +00:00
|
|
|
error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
size = DEV_BSIZE;
|
1997-02-10 02:22:35 +00:00
|
|
|
else
|
1994-05-24 10:09:53 +00:00
|
|
|
size = dpart.disklab->d_secsize;
|
|
|
|
|
|
|
|
bp = NULL;
|
|
|
|
ump = NULL;
|
1997-03-15 18:58:10 +00:00
|
|
|
if (error = bread(devvp, SBLOCK, SBSIZE, cred, &bp))
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
|
|
|
fs = (struct fs *)bp->b_data;
|
|
|
|
if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
|
|
|
|
fs->fs_bsize < sizeof(struct fs)) {
|
|
|
|
error = EINVAL; /* XXX needs translation */
|
|
|
|
goto out;
|
|
|
|
}
|
1996-11-13 01:45:56 +00:00
|
|
|
fs->fs_fmod = 0;
|
1995-05-15 08:39:37 +00:00
|
|
|
if (!fs->fs_clean) {
|
|
|
|
if (ronly || (mp->mnt_flag & MNT_FORCE)) {
|
|
|
|
printf("WARNING: %s was not properly dismounted.\n",fs->fs_fsmnt);
|
|
|
|
} else {
|
|
|
|
printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck.\n",fs->fs_fsmnt);
|
|
|
|
error = EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
|
|
|
|
if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
|
|
|
|
error = EROFS; /* needs translation */
|
|
|
|
goto out;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
|
|
|
|
bzero((caddr_t)ump, sizeof *ump);
|
1997-10-10 18:17:00 +00:00
|
|
|
ump->um_malloctype = malloctype;
|
1994-05-24 10:09:53 +00:00
|
|
|
ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
|
|
|
|
M_WAITOK);
|
VFS mega cleanup commit (x/N)
1. Add new file "sys/kern/vfs_default.c" where default actions for
VOPs go. Implement proper defaults for ABORTOP, BWRITE, LEASE,
POLL, REVOKE and STRATEGY. Various stuff spread over the entire
tree belongs here.
2. Change VOP_BLKATOFF to a normal function in cd9660.
3. Kill VOP_BLKATOFF, VOP_TRUNCATE, VOP_VFREE, VOP_VALLOC. These
are private interface functions between UFS and the underlying
storage manager layer (FFS/LFS/MFS/EXT2FS). The functions now
live in struct ufsmount instead.
4. Remove a kludge of VOP_ functions in all filesystems, that did
nothing but obscure the simplicity and break the expandability.
If a filesystem doesn't implement VOP_FOO, it shouldn't have an
entry for it in its vnops table. The system will try to DTRT
if it is not implemented. There are still some cruft left, but
the bulk of it is done.
5. Fix another VCALL in vfs_cache.c (thanks Bruce!)
1997-10-16 10:50:27 +00:00
|
|
|
ump->um_blkatoff = ffs_blkatoff;
|
|
|
|
ump->um_truncate = ffs_truncate;
|
1997-10-16 20:32:40 +00:00
|
|
|
ump->um_update = ffs_update;
|
VFS mega cleanup commit (x/N)
1. Add new file "sys/kern/vfs_default.c" where default actions for
VOPs go. Implement proper defaults for ABORTOP, BWRITE, LEASE,
POLL, REVOKE and STRATEGY. Various stuff spread over the entire
tree belongs here.
2. Change VOP_BLKATOFF to a normal function in cd9660.
3. Kill VOP_BLKATOFF, VOP_TRUNCATE, VOP_VFREE, VOP_VALLOC. These
are private interface functions between UFS and the underlying
storage manager layer (FFS/LFS/MFS/EXT2FS). The functions now
live in struct ufsmount instead.
4. Remove a kludge of VOP_ functions in all filesystems, that did
nothing but obscure the simplicity and break the expandability.
If a filesystem doesn't implement VOP_FOO, it shouldn't have an
entry for it in its vnops table. The system will try to DTRT
if it is not implemented. There are still some cruft left, but
the bulk of it is done.
5. Fix another VCALL in vfs_cache.c (thanks Bruce!)
1997-10-16 10:50:27 +00:00
|
|
|
ump->um_valloc = ffs_valloc;
|
|
|
|
ump->um_vfree = ffs_vfree;
|
1994-05-24 10:09:53 +00:00
|
|
|
bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
|
|
|
|
if (fs->fs_sbsize < SBSIZE)
|
|
|
|
bp->b_flags |= B_INVAL;
|
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
fs = ump->um_fs;
|
|
|
|
fs->fs_ronly = ronly;
|
1994-08-20 16:03:26 +00:00
|
|
|
if (ronly == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
fs->fs_fmod = 1;
|
1994-08-20 16:03:26 +00:00
|
|
|
fs->fs_clean = 0;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
size = fs->fs_cssize;
|
|
|
|
blks = howmany(size, fs->fs_fsize);
|
|
|
|
if (fs->fs_contigsumsize > 0)
|
|
|
|
size += fs->fs_ncg * sizeof(int32_t);
|
|
|
|
base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
|
|
|
|
cred, &bp)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
free(base, M_UFSMNT);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
bcopy(bp->b_data, space, (u_int)size);
|
|
|
|
fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
|
|
|
|
space += size;
|
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fs->fs_contigsumsize > 0) {
|
|
|
|
fs->fs_maxcluster = lp = (int32_t *)space;
|
|
|
|
for (i = 0; i < fs->fs_ncg; i++)
|
|
|
|
*lp++ = fs->fs_contigsumsize;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_data = (qaddr_t)ump;
|
|
|
|
mp->mnt_stat.f_fsid.val[0] = (long)dev;
|
1997-03-23 20:08:22 +00:00
|
|
|
if (fs->fs_id[0] != 0 && fs->fs_id[1] != 0)
|
|
|
|
mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
|
|
|
|
else
|
|
|
|
mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
|
1997-03-18 19:50:12 +00:00
|
|
|
mp->mnt_flag |= MNT_LOCAL;
|
1994-05-24 10:09:53 +00:00
|
|
|
ump->um_mountp = mp;
|
|
|
|
ump->um_dev = dev;
|
|
|
|
ump->um_devvp = devvp;
|
|
|
|
ump->um_nindir = fs->fs_nindir;
|
|
|
|
ump->um_bptrtodb = fs->fs_fsbtodb;
|
|
|
|
ump->um_seqinc = fs->fs_frag;
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
ump->um_quotas[i] = NULLVP;
|
1998-03-08 09:59:44 +00:00
|
|
|
devvp->v_specmountpoint = mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_oldfscompat(fs);
|
1995-08-28 09:19:25 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set FS local "last mounted on" information (NULL pad)
|
|
|
|
*/
|
|
|
|
copystr( mp->mnt_stat.f_mntonname, /* mount point*/
|
|
|
|
fs->fs_fsmnt, /* copy area*/
|
|
|
|
sizeof(fs->fs_fsmnt) - 1, /* max size*/
|
|
|
|
&strsize); /* real size*/
|
|
|
|
bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize);
|
|
|
|
|
|
|
|
if( mp->mnt_flag & MNT_ROOTFS) {
|
|
|
|
/*
|
|
|
|
* Root mount; update timestamp in mount structure.
|
|
|
|
* this will be used by the common root mount code
|
|
|
|
* to update the system clock.
|
|
|
|
*/
|
|
|
|
mp->mnt_time = fs->fs_time;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
|
|
|
|
ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
|
|
|
|
maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */
|
|
|
|
if (fs->fs_maxfilesize > maxfilesize) /* XXX */
|
|
|
|
fs->fs_maxfilesize = maxfilesize; /* XXX */
|
|
|
|
if (ronly == 0) {
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((fs->fs_flags & FS_DOSOFTDEP) &&
|
|
|
|
(error = softdep_mount(devvp, mp, fs, cred)) != 0) {
|
|
|
|
free(base, M_UFSMNT);
|
|
|
|
goto out;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
fs->fs_clean = 0;
|
|
|
|
(void) ffs_sbupdate(ump, MNT_WAIT);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
out:
|
1998-03-08 09:59:44 +00:00
|
|
|
devvp->v_specmountpoint = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (bp)
|
|
|
|
brelse(bp);
|
1997-02-10 02:22:35 +00:00
|
|
|
(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ump) {
|
|
|
|
free(ump->um_fs, M_UFSMNT);
|
|
|
|
free(ump, M_UFSMNT);
|
|
|
|
mp->mnt_data = (qaddr_t)0;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity checks for old file systems.
|
|
|
|
*
|
|
|
|
* XXX - goes away some day.
|
|
|
|
*/
|
1995-12-17 21:14:36 +00:00
|
|
|
static int
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_oldfscompat(fs)
|
|
|
|
struct fs *fs;
|
|
|
|
{
|
|
|
|
|
|
|
|
fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
|
|
|
|
fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
|
|
|
|
if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
|
|
|
|
fs->fs_nrpos = 8; /* XXX */
|
|
|
|
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
|
1995-11-20 12:25:37 +00:00
|
|
|
#if 0
|
|
|
|
int i; /* XXX */
|
1997-02-10 02:22:35 +00:00
|
|
|
u_int64_t sizepb = fs->fs_bsize; /* XXX */
|
|
|
|
/* XXX */
|
1994-05-24 10:09:53 +00:00
|
|
|
fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
|
|
|
|
for (i = 0; i < NIADDR; i++) { /* XXX */
|
|
|
|
sizepb *= NINDIR(fs); /* XXX */
|
|
|
|
fs->fs_maxfilesize += sizepb; /* XXX */
|
|
|
|
} /* XXX */
|
1994-10-22 02:27:35 +00:00
|
|
|
#endif
|
1995-12-11 04:58:34 +00:00
|
|
|
fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
|
1994-05-24 10:09:53 +00:00
|
|
|
fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
|
|
|
|
fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
|
|
|
|
} /* XXX */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmount system call
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ffs_unmount(mp, mntflags, p)
|
|
|
|
struct mount *mp;
|
|
|
|
int mntflags;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
register struct ufsmount *ump;
|
|
|
|
register struct fs *fs;
|
1997-02-10 02:22:35 +00:00
|
|
|
int error, flags;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
flags = 0;
|
|
|
|
if (mntflags & MNT_FORCE) {
|
|
|
|
flags |= FORCECLOSE;
|
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP) {
|
|
|
|
if ((error = softdep_flushfiles(mp, flags, p)) != 0)
|
|
|
|
return (error);
|
|
|
|
} else {
|
|
|
|
if ((error = ffs_flushfiles(mp, flags, p)) != 0)
|
|
|
|
return (error);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fs->fs_ronly == 0) {
|
1994-08-20 16:03:26 +00:00
|
|
|
fs->fs_clean = 1;
|
1997-02-10 02:22:35 +00:00
|
|
|
error = ffs_sbupdate(ump, MNT_WAIT);
|
|
|
|
if (error) {
|
|
|
|
fs->fs_clean = 0;
|
|
|
|
return (error);
|
|
|
|
}
|
1994-08-20 16:03:26 +00:00
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
ump->um_devvp->v_specmountpoint = NULL;
|
1996-08-21 21:56:23 +00:00
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
|
1994-05-24 10:09:53 +00:00
|
|
|
NOCRED, p);
|
1996-08-21 21:56:23 +00:00
|
|
|
|
|
|
|
vrele(ump->um_devvp);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
free(fs->fs_csp[0], M_UFSMNT);
|
|
|
|
free(fs, M_UFSMNT);
|
|
|
|
free(ump, M_UFSMNT);
|
|
|
|
mp->mnt_data = (qaddr_t)0;
|
1997-03-18 19:50:12 +00:00
|
|
|
mp->mnt_flag &= ~MNT_LOCAL;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush out all the files in a filesystem.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_flushfiles(mp, flags, p)
|
|
|
|
register struct mount *mp;
|
|
|
|
int flags;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
register struct ufsmount *ump;
|
1994-10-08 06:20:06 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
#ifdef QUOTA
|
|
|
|
if (mp->mnt_flag & MNT_QUOTA) {
|
1994-10-10 01:04:55 +00:00
|
|
|
int i;
|
|
|
|
error = vflush(mp, NULLVP, SKIPSYSTEM|flags);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++) {
|
|
|
|
if (ump->um_quotas[i] == NULLVP)
|
|
|
|
continue;
|
|
|
|
quotaoff(p, mp, i);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Here we fall through to vflush again to ensure
|
|
|
|
* that we have gotten rid of all the system vnodes.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
#endif
|
1998-03-08 09:59:44 +00:00
|
|
|
/*
|
|
|
|
* Flush all the files.
|
|
|
|
*/
|
|
|
|
if ((error = vflush(mp, NULL, flags)) != 0)
|
|
|
|
return (error);
|
|
|
|
/*
|
|
|
|
* Flush filesystem metadata.
|
|
|
|
*/
|
|
|
|
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
|
|
|
|
error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p);
|
|
|
|
VOP_UNLOCK(ump->um_devvp, 0, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file system statistics.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ffs_statfs(mp, sbp, p)
|
|
|
|
struct mount *mp;
|
|
|
|
register struct statfs *sbp;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
register struct ufsmount *ump;
|
|
|
|
register struct fs *fs;
|
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
|
|
|
if (fs->fs_magic != FS_MAGIC)
|
|
|
|
panic("ffs_statfs");
|
|
|
|
sbp->f_bsize = fs->fs_fsize;
|
|
|
|
sbp->f_iosize = fs->fs_bsize;
|
|
|
|
sbp->f_blocks = fs->fs_dsize;
|
|
|
|
sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
|
|
|
|
fs->fs_cstotal.cs_nffree;
|
1996-01-14 18:55:09 +00:00
|
|
|
sbp->f_bavail = freespace(fs, fs->fs_minfree);
|
1994-05-24 10:09:53 +00:00
|
|
|
sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
|
|
|
|
sbp->f_ffree = fs->fs_cstotal.cs_nifree;
|
|
|
|
if (sbp != &mp->mnt_stat) {
|
1997-02-10 02:22:35 +00:00
|
|
|
sbp->f_type = mp->mnt_vfc->vfc_typenum;
|
1994-05-24 10:09:53 +00:00
|
|
|
bcopy((caddr_t)mp->mnt_stat.f_mntonname,
|
|
|
|
(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
|
|
|
|
bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
|
|
|
|
(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Go through the disk queues to initiate sandbagged IO;
|
|
|
|
* go through the inodes to write those that have been modified;
|
|
|
|
* initiate the writing of the super block if it has been modified.
|
|
|
|
*
|
|
|
|
* Note: we are always called with the filesystem marked `MPBUSY'.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ffs_sync(mp, waitfor, cred, p)
|
|
|
|
struct mount *mp;
|
|
|
|
int waitfor;
|
|
|
|
struct ucred *cred;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct vnode *nvp, *vp;
|
|
|
|
struct inode *ip;
|
|
|
|
struct ufsmount *ump = VFSTOUFS(mp);
|
|
|
|
struct fs *fs;
|
1995-04-11 04:23:47 +00:00
|
|
|
struct timeval tv;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error, allerror = 0;
|
|
|
|
|
|
|
|
fs = ump->um_fs;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
|
|
|
|
printf("fs = %s\n", fs->fs_fsmnt);
|
1997-03-09 06:00:44 +00:00
|
|
|
panic("ffs_sync: rofs mod");
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Write back each (modified) inode.
|
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_lock(&mntvnode_slock);
|
1994-05-24 10:09:53 +00:00
|
|
|
loop:
|
1995-12-11 04:58:34 +00:00
|
|
|
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If the vnode that we are about to sync is no longer
|
|
|
|
* associated with this mount point, start over.
|
|
|
|
*/
|
|
|
|
if (vp->v_mount != mp)
|
|
|
|
goto loop;
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_lock(&vp->v_interlock);
|
1995-12-11 04:58:34 +00:00
|
|
|
nvp = vp->v_mntvnodes.le_next;
|
1994-05-24 10:09:53 +00:00
|
|
|
ip = VTOI(vp);
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((vp->v_type == VNON) || ((ip->i_flag &
|
1998-02-01 08:24:00 +00:00
|
|
|
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0) &&
|
1998-03-08 09:59:44 +00:00
|
|
|
((vp->v_dirtyblkhd.lh_first == NULL) || (waitfor == MNT_LAZY))) {
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_unlock(&vp->v_interlock);
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1995-04-11 04:23:47 +00:00
|
|
|
if (vp->v_type != VCHR) {
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_unlock(&mntvnode_slock);
|
|
|
|
error =
|
|
|
|
vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
|
|
|
|
if (error) {
|
|
|
|
simple_lock(&mntvnode_slock);
|
|
|
|
if (error == ENOENT)
|
|
|
|
goto loop;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (error = VOP_FSYNC(vp, cred, waitfor, p))
|
1995-04-11 04:23:47 +00:00
|
|
|
allerror = error;
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
|
|
|
vrele(vp);
|
|
|
|
simple_lock(&mntvnode_slock);
|
1995-04-11 04:23:47 +00:00
|
|
|
} else {
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_unlock(&mntvnode_slock);
|
|
|
|
simple_unlock(&vp->v_interlock);
|
1998-03-30 09:56:58 +00:00
|
|
|
getmicrotime(&tv);
|
1998-02-01 08:24:00 +00:00
|
|
|
/* UFS_UPDATE(vp, &tv, &tv, waitfor == MNT_WAIT); */
|
1997-10-16 20:32:40 +00:00
|
|
|
UFS_UPDATE(vp, &tv, &tv, 0);
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_lock(&mntvnode_slock);
|
1995-04-11 04:23:47 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
simple_unlock(&mntvnode_slock);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Force stale file system control information to be flushed.
|
|
|
|
*/
|
1998-03-08 09:59:44 +00:00
|
|
|
if (waitfor != MNT_LAZY) {
|
|
|
|
if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
|
|
|
|
waitfor = MNT_NOWAIT;
|
|
|
|
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
|
|
|
|
if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) != 0)
|
|
|
|
allerror = error;
|
|
|
|
VOP_UNLOCK(ump->um_devvp, 0, p);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef QUOTA
|
|
|
|
qsync(mp);
|
|
|
|
#endif
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Write back modified superblock.
|
|
|
|
*/
|
1998-03-08 09:59:44 +00:00
|
|
|
if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0)
|
|
|
|
allerror = error;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (allerror);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up a FFS dinode number to find its incore vnode, otherwise read it
|
|
|
|
* in from disk. If it is in core, wait for the lock bit to clear, then
|
|
|
|
* return the inode locked. Detection and handling of mount points must be
|
|
|
|
* done by the calling routine.
|
|
|
|
*/
|
1995-12-17 21:14:36 +00:00
|
|
|
static int ffs_inode_hash_lock;
|
1995-07-21 03:52:40 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
int
|
|
|
|
ffs_vget(mp, ino, vpp)
|
|
|
|
struct mount *mp;
|
|
|
|
ino_t ino;
|
|
|
|
struct vnode **vpp;
|
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fs *fs;
|
|
|
|
struct inode *ip;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ufsmount *ump;
|
|
|
|
struct buf *bp;
|
|
|
|
struct vnode *vp;
|
|
|
|
dev_t dev;
|
1997-10-10 18:17:00 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
dev = ump->um_dev;
|
1995-07-21 16:20:20 +00:00
|
|
|
restart:
|
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00
|
|
|
if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-07-21 03:52:40 +00:00
|
|
|
/*
|
1995-07-21 16:20:20 +00:00
|
|
|
* Lock out the creation of new entries in the FFS hash table in
|
|
|
|
* case getnewvnode() or MALLOC() blocks, otherwise a duplicate
|
1995-07-21 03:52:40 +00:00
|
|
|
* may occur!
|
|
|
|
*/
|
|
|
|
if (ffs_inode_hash_lock) {
|
|
|
|
while (ffs_inode_hash_lock) {
|
|
|
|
ffs_inode_hash_lock = -1;
|
|
|
|
tsleep(&ffs_inode_hash_lock, PVM, "ffsvgt", 0);
|
|
|
|
}
|
1995-07-21 16:20:20 +00:00
|
|
|
goto restart;
|
1995-07-21 03:52:40 +00:00
|
|
|
}
|
|
|
|
ffs_inode_hash_lock = 1;
|
|
|
|
|
1996-06-12 03:37:57 +00:00
|
|
|
/*
|
|
|
|
* If this MALLOC() is performed after the getnewvnode()
|
|
|
|
* it might block, leaving a vnode with a NULL v_data to be
|
|
|
|
* found by ffs_sync() if a sync happens to fire right then,
|
|
|
|
* which will cause a panic because ffs_sync() blindly
|
|
|
|
* dereferences vp->v_data (as well it should).
|
|
|
|
*/
|
1997-10-10 18:17:00 +00:00
|
|
|
MALLOC(ip, struct inode *, sizeof(struct inode),
|
|
|
|
ump->um_malloctype, M_WAITOK);
|
1996-06-12 03:37:57 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Allocate a new vnode/inode. */
|
1994-10-08 06:20:06 +00:00
|
|
|
error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp);
|
|
|
|
if (error) {
|
1995-07-21 16:20:20 +00:00
|
|
|
if (ffs_inode_hash_lock < 0)
|
1995-07-21 03:52:40 +00:00
|
|
|
wakeup(&ffs_inode_hash_lock);
|
|
|
|
ffs_inode_hash_lock = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
*vpp = NULL;
|
1997-10-10 18:17:00 +00:00
|
|
|
FREE(ip, ump->um_malloctype);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
bzero((caddr_t)ip, sizeof(struct inode));
|
1997-02-10 02:22:35 +00:00
|
|
|
lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp->v_data = ip;
|
|
|
|
ip->i_vnode = vp;
|
|
|
|
ip->i_fs = fs = ump->um_fs;
|
|
|
|
ip->i_dev = dev;
|
|
|
|
ip->i_number = ino;
|
|
|
|
#ifdef QUOTA
|
1994-10-10 01:04:55 +00:00
|
|
|
{
|
1995-07-21 03:52:40 +00:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
ip->i_dquot[i] = NODQUOT;
|
1994-10-10 01:04:55 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* Put it onto its hash chain and lock it so that other requests for
|
|
|
|
* this inode will block if they arrive while we are sleeping waiting
|
|
|
|
* for old data structures to be purged or for the contents of the
|
|
|
|
* disk portion of this inode to be read.
|
|
|
|
*/
|
|
|
|
ufs_ihashins(ip);
|
|
|
|
|
1995-07-21 16:20:20 +00:00
|
|
|
if (ffs_inode_hash_lock < 0)
|
1995-07-21 03:52:40 +00:00
|
|
|
wakeup(&ffs_inode_hash_lock);
|
|
|
|
ffs_inode_hash_lock = 0;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Read in the disk contents for the inode, copy into the inode. */
|
1994-10-08 06:20:06 +00:00
|
|
|
error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
|
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* The inode does not contain anything useful, so it would
|
|
|
|
* be misleading to leave it on its hash chain. With mode
|
|
|
|
* still zero, it will be unlinked and returned to the free
|
|
|
|
* list by vput().
|
|
|
|
*/
|
|
|
|
brelse(bp);
|
1996-01-19 04:00:31 +00:00
|
|
|
vput(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
|
1998-03-08 09:59:44 +00:00
|
|
|
if (DOINGSOFTDEP(vp))
|
|
|
|
softdep_load_inodeblock(ip);
|
|
|
|
else
|
|
|
|
ip->i_effnlink = ip->i_nlink;
|
1996-01-19 04:00:31 +00:00
|
|
|
bqrelse(bp);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the vnode from the inode, check for aliases.
|
|
|
|
* Note that the underlying vnode may have changed.
|
|
|
|
*/
|
1996-02-25 20:12:36 +00:00
|
|
|
error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
|
1994-10-08 06:20:06 +00:00
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Finish inode initialization now that aliasing has been resolved.
|
|
|
|
*/
|
|
|
|
ip->i_devvp = ump->um_devvp;
|
|
|
|
VREF(ip->i_devvp);
|
|
|
|
/*
|
|
|
|
* Set up a generation number for this inode if it does not
|
|
|
|
* already have one. This should only happen on old filesystems.
|
|
|
|
*/
|
|
|
|
if (ip->i_gen == 0) {
|
1997-03-23 20:08:22 +00:00
|
|
|
ip->i_gen = random() / 2 + 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
|
|
|
|
ip->i_flag |= IN_MODIFIED;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Ensure that uid and gid are correct. This is a temporary
|
|
|
|
* fix until fsck has been changed to do the update.
|
|
|
|
*/
|
|
|
|
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
|
|
|
|
ip->i_uid = ip->i_din.di_ouid; /* XXX */
|
|
|
|
ip->i_gid = ip->i_din.di_ogid; /* XXX */
|
|
|
|
} /* XXX */
|
|
|
|
|
|
|
|
*vpp = vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File handle to vnode
|
|
|
|
*
|
|
|
|
* Have to be really careful about stale file handles:
|
|
|
|
* - check that the inode number is valid
|
|
|
|
* - call ffs_vget() to get the locked inode
|
|
|
|
* - check for an unallocated inode (i_mode == 0)
|
|
|
|
* - check that the given client host has export rights and return
|
|
|
|
* those rights via. exflagsp and credanonp
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
|
|
|
|
register struct mount *mp;
|
|
|
|
struct fid *fhp;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *nam;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vnode **vpp;
|
|
|
|
int *exflagsp;
|
|
|
|
struct ucred **credanonp;
|
|
|
|
{
|
|
|
|
register struct ufid *ufhp;
|
|
|
|
struct fs *fs;
|
|
|
|
|
|
|
|
ufhp = (struct ufid *)fhp;
|
|
|
|
fs = VFSTOUFS(mp)->um_fs;
|
|
|
|
if (ufhp->ufid_ino < ROOTINO ||
|
|
|
|
ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
|
|
|
|
return (ESTALE);
|
|
|
|
return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode pointer to File handle
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_vptofh(vp, fhp)
|
|
|
|
struct vnode *vp;
|
|
|
|
struct fid *fhp;
|
|
|
|
{
|
|
|
|
register struct inode *ip;
|
|
|
|
register struct ufid *ufhp;
|
|
|
|
|
|
|
|
ip = VTOI(vp);
|
|
|
|
ufhp = (struct ufid *)fhp;
|
|
|
|
ufhp->ufid_len = sizeof(struct ufid);
|
|
|
|
ufhp->ufid_ino = ip->i_number;
|
|
|
|
ufhp->ufid_gen = ip->i_gen;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Initialize the filesystem; just use ufs_init.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_init(vfsp)
|
|
|
|
struct vfsconf *vfsp;
|
|
|
|
{
|
|
|
|
|
1998-03-08 09:59:44 +00:00
|
|
|
softdep_initialize();
|
1997-02-10 02:22:35 +00:00
|
|
|
return (ufs_init(vfsp));
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Write a superblock and associated information back to disk.
|
|
|
|
*/
|
1995-12-17 21:14:36 +00:00
|
|
|
static int
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_sbupdate(mp, waitfor)
|
|
|
|
struct ufsmount *mp;
|
|
|
|
int waitfor;
|
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fs *dfs, *fs = mp->um_fs;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct buf *bp;
|
|
|
|
int blks;
|
|
|
|
caddr_t space;
|
1997-02-10 02:22:35 +00:00
|
|
|
int i, size, error, allerror = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* First write back the summary information.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
blks = howmany(fs->fs_cssize, fs->fs_fsize);
|
|
|
|
space = (caddr_t)fs->fs_csp[0];
|
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
|
|
|
bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
|
|
|
|
size, 0, 0);
|
|
|
|
bcopy(space, bp->b_data, (u_int)size);
|
|
|
|
space += size;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (waitfor != MNT_WAIT)
|
1994-05-24 10:09:53 +00:00
|
|
|
bawrite(bp);
|
1997-02-10 02:22:35 +00:00
|
|
|
else if (error = bwrite(bp))
|
|
|
|
allerror = error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Now write back the superblock itself. If any errors occurred
|
|
|
|
* up to this point, then fail so that the superblock avoids
|
|
|
|
* being written out as clean.
|
|
|
|
*/
|
|
|
|
if (allerror)
|
|
|
|
return (allerror);
|
|
|
|
bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
|
1998-03-08 09:59:44 +00:00
|
|
|
fs->fs_fmod = 0;
|
1998-03-30 09:56:58 +00:00
|
|
|
fs->fs_time = time_second;
|
1997-02-10 02:22:35 +00:00
|
|
|
bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
|
|
|
|
/* Restore compatibility to old file systems. XXX */
|
|
|
|
dfs = (struct fs *)bp->b_data; /* XXX */
|
|
|
|
if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
|
|
|
|
dfs->fs_nrpos = -1; /* XXX */
|
|
|
|
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
|
|
|
|
int32_t *lp, tmp; /* XXX */
|
|
|
|
/* XXX */
|
|
|
|
lp = (int32_t *)&dfs->fs_qbmask; /* XXX */
|
|
|
|
tmp = lp[4]; /* XXX */
|
|
|
|
for (i = 4; i > 0; i--) /* XXX */
|
|
|
|
lp[i] = lp[i-1]; /* XXX */
|
|
|
|
lp[0] = tmp; /* XXX */
|
|
|
|
} /* XXX */
|
|
|
|
dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
|
|
|
|
if (waitfor != MNT_WAIT)
|
|
|
|
bawrite(bp);
|
|
|
|
else if (error = bwrite(bp))
|
|
|
|
allerror = error;
|
|
|
|
return (allerror);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|