1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1997-12-16 17:40:42 +00:00
|
|
|
/* For 4.3 integer FS ID compatibility */
|
|
|
|
#include "opt_compat.h"
|
2000-07-03 13:26:54 +00:00
|
|
|
#include "opt_ffs.h"
|
1997-12-16 17:40:42 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1999-02-25 15:54:06 +00:00
|
|
|
#include <sys/buf.h>
|
1997-02-10 02:22:35 +00:00
|
|
|
#include <sys/sysent.h>
|
1999-09-11 00:46:08 +00:00
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mount.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1995-11-12 06:43:28 +00:00
|
|
|
#include <sys/sysproto.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/kernel.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
1998-11-03 14:29:09 +00:00
|
|
|
#include <sys/linker.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/stat.h>
|
2001-03-28 11:52:56 +00:00
|
|
|
#include <sys/sx.h>
|
1996-09-03 14:25:27 +00:00
|
|
|
#include <sys/unistd.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/dirent.h>
|
1999-12-19 06:08:07 +00:00
|
|
|
#include <sys/extattr.h>
|
2001-02-21 06:39:57 +00:00
|
|
|
#include <sys/jail.h>
|
2001-08-23 13:51:17 +00:00
|
|
|
#include <sys/sysctl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-12-19 06:08:07 +00:00
|
|
|
#include <machine/limits.h>
|
2001-08-23 13:51:17 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_object.h>
|
1997-12-27 02:56:39 +00:00
|
|
|
#include <vm/vm_zone.h>
|
2000-11-18 21:01:04 +00:00
|
|
|
#include <vm/vm_page.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
static int change_dir __P((struct nameidata *ndp, struct thread *td));
|
2001-02-28 20:54:28 +00:00
|
|
|
static void checkdirs __P((struct vnode *olddp, struct vnode *newdp));
|
1999-03-23 14:26:40 +00:00
|
|
|
static int chroot_refuse_vdir_fds __P((struct filedesc *fdp));
|
1999-08-22 01:46:57 +00:00
|
|
|
static int getutimes __P((const struct timeval *, struct timespec *));
|
2001-09-12 08:38:13 +00:00
|
|
|
static int setfown __P((struct thread *td, struct vnode *, uid_t, gid_t));
|
|
|
|
static int setfmode __P((struct thread *td, struct vnode *, int));
|
|
|
|
static int setfflags __P((struct thread *td, struct vnode *, int));
|
|
|
|
static int setutimes __P((struct thread *td, struct vnode *,
|
1999-08-22 01:46:57 +00:00
|
|
|
const struct timespec *, int));
|
2001-09-22 03:07:41 +00:00
|
|
|
static int vn_access __P((struct vnode *vp, int user_flags, struct ucred *cred,
|
2001-09-21 21:33:22 +00:00
|
|
|
struct thread *td));
|
|
|
|
|
1997-10-23 09:29:09 +00:00
|
|
|
static int usermount = 0; /* if 1, non-root can mount fs. */
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
int (*union_dircheckp) __P((struct thread *td, struct vnode **, struct file *));
|
1998-11-03 08:01:48 +00:00
|
|
|
|
1997-10-23 09:29:09 +00:00
|
|
|
SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Virtual File System System Calls
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mount a file system.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount_args {
|
1997-02-10 02:22:35 +00:00
|
|
|
char *type;
|
1994-05-24 10:09:53 +00:00
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
caddr_t data;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
mount(td, uap)
|
|
|
|
struct thread *td;
|
2001-02-16 14:31:49 +00:00
|
|
|
struct mount_args /* {
|
1997-02-10 02:22:35 +00:00
|
|
|
syscallarg(char *) type;
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
syscallarg(caddr_t) data;
|
|
|
|
} */ *uap;
|
2001-03-01 21:00:17 +00:00
|
|
|
{
|
|
|
|
char *fstype;
|
|
|
|
char *fspath;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK | M_ZERO);
|
|
|
|
fspath = malloc(MNAMELEN, M_TEMP, M_WAITOK | M_ZERO);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vfs_mount() actually takes a kernel string for `type' and
|
|
|
|
* `path' now, so extract them.
|
|
|
|
*/
|
|
|
|
error = copyinstr(SCARG(uap, type), fstype, MFSNAMELEN, NULL);
|
|
|
|
if (error)
|
|
|
|
goto finish;
|
|
|
|
error = copyinstr(SCARG(uap, path), fspath, MNAMELEN, NULL);
|
|
|
|
if (error)
|
|
|
|
goto finish;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = vfs_mount(td, fstype, fspath, SCARG(uap, flags),
|
2001-03-01 21:00:17 +00:00
|
|
|
SCARG(uap, data));
|
|
|
|
finish:
|
|
|
|
free(fstype, M_TEMP);
|
|
|
|
free(fspath, M_TEMP);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vfs_mount(): actually attempt a filesystem mount.
|
|
|
|
*
|
|
|
|
* This routine is designed to be a "generic" entry point for routines
|
|
|
|
* that wish to mount a filesystem. All parameters except `fsdata' are
|
|
|
|
* pointers into kernel space. `fsdata' is currently still a pointer
|
|
|
|
* into userspace.
|
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_mount(td, fstype, fspath, fsflags, fsdata)
|
|
|
|
struct thread *td;
|
2001-07-09 19:11:51 +00:00
|
|
|
const char *fstype;
|
2001-03-01 21:00:17 +00:00
|
|
|
char *fspath;
|
|
|
|
int fsflags;
|
|
|
|
void *fsdata;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct mount *mp;
|
|
|
|
struct vfsconf *vfsp;
|
1997-11-12 05:42:33 +00:00
|
|
|
int error, flag = 0, flag2 = 0;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct vattr va;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2001-03-01 21:00:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Be ultra-paranoid about making sure the type and fspath
|
|
|
|
* variables will fit in our mp buffers, including the
|
|
|
|
* terminating NUL.
|
|
|
|
*/
|
2001-03-02 14:05:49 +00:00
|
|
|
if ((strlen(fstype) >= MFSNAMELEN - 1) ||
|
|
|
|
(strlen(fspath) >= MNAMELEN - 1))
|
2001-03-01 21:00:17 +00:00
|
|
|
return (ENAMETOOLONG);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-10-01 20:01:07 +00:00
|
|
|
if (usermount == 0) {
|
|
|
|
error = suser_td(td);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
1999-01-30 12:27:00 +00:00
|
|
|
/*
|
|
|
|
* Do not allow NFS export by non-root users.
|
|
|
|
*/
|
2001-03-01 21:00:17 +00:00
|
|
|
if (fsflags & MNT_EXPORTED) {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = suser_td(td);
|
1999-01-30 12:27:00 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
|
|
|
|
*/
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
if (suser_xxx(p->p_ucred, 0, 0))
|
2001-03-01 21:00:17 +00:00
|
|
|
fsflags |= MNT_NOSUID | MNT_NODEV;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Get vnode to be covered
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspath, td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = nd.ni_vp;
|
2001-03-01 21:00:17 +00:00
|
|
|
if (fsflags & MNT_UPDATE) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((vp->v_flag & VROOT) == 0) {
|
|
|
|
vput(vp);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
mp = vp->v_mount;
|
|
|
|
flag = mp->mnt_flag;
|
1997-11-12 05:42:33 +00:00
|
|
|
flag2 = mp->mnt_kern_flag;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* We only allow the filesystem to be reloaded if it
|
|
|
|
* is currently mounted read-only.
|
|
|
|
*/
|
2001-03-01 21:00:17 +00:00
|
|
|
if ((fsflags & MNT_RELOAD) &&
|
1994-05-24 10:09:53 +00:00
|
|
|
((mp->mnt_flag & MNT_RDONLY) == 0)) {
|
|
|
|
vput(vp);
|
|
|
|
return (EOPNOTSUPP); /* Needs translation */
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Only root, or the user that did the original mount is
|
|
|
|
* permitted to update it.
|
|
|
|
*/
|
2001-10-01 20:01:07 +00:00
|
|
|
if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid) {
|
|
|
|
error = suser_td(td);
|
|
|
|
if (error) {
|
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
if (vfs_busy(mp, LK_NOWAIT, 0, td)) {
|
1997-02-10 02:22:35 +00:00
|
|
|
vput(vp);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
2000-08-09 01:57:11 +00:00
|
|
|
if ((vp->v_flag & VMOUNT) != 0 ||
|
|
|
|
vp->v_mountedhere != NULL) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
2000-08-09 01:57:11 +00:00
|
|
|
vput(vp);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
vp->v_flag |= VMOUNT;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
2001-03-01 21:00:17 +00:00
|
|
|
mp->mnt_flag |= fsflags &
|
2000-08-09 01:57:11 +00:00
|
|
|
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto update;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* If the user is not root, ensure that they own the directory
|
|
|
|
* onto which we are attempting to mount.
|
|
|
|
*/
|
2001-10-01 20:01:07 +00:00
|
|
|
error = VOP_GETATTR(vp, &va, p->p_ucred, td);
|
|
|
|
if (error) {
|
1997-02-10 02:22:35 +00:00
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
2001-10-01 20:01:07 +00:00
|
|
|
if (va.va_uid != p->p_ucred->cr_uid) {
|
|
|
|
error = suser_td(td);
|
|
|
|
if (error) {
|
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, td, 0, 0)) != 0) {
|
2000-08-09 01:57:11 +00:00
|
|
|
vput(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-08-09 01:57:11 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type != VDIR) {
|
|
|
|
vput(vp);
|
|
|
|
return (ENOTDIR);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
|
2001-03-01 21:00:17 +00:00
|
|
|
if (!strcmp(vfsp->vfc_name, fstype))
|
1997-02-10 02:22:35 +00:00
|
|
|
break;
|
|
|
|
if (vfsp == NULL) {
|
1998-11-03 14:29:09 +00:00
|
|
|
linker_file_t lf;
|
|
|
|
|
|
|
|
/* Only load modules for root (very important!) */
|
2001-10-01 20:01:07 +00:00
|
|
|
error = suser_td(td);
|
|
|
|
if (error) {
|
1998-11-03 14:29:09 +00:00
|
|
|
vput(vp);
|
|
|
|
return error;
|
|
|
|
}
|
2001-03-01 21:00:17 +00:00
|
|
|
error = linker_load_file(fstype, &lf);
|
1998-11-03 14:29:09 +00:00
|
|
|
if (error || lf == NULL) {
|
|
|
|
vput(vp);
|
|
|
|
if (lf == NULL)
|
|
|
|
error = ENODEV;
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
lf->userrefs++;
|
|
|
|
/* lookup again, see if the VFS was loaded */
|
|
|
|
for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
|
2001-03-01 21:00:17 +00:00
|
|
|
if (!strcmp(vfsp->vfc_name, fstype))
|
1998-11-03 14:29:09 +00:00
|
|
|
break;
|
|
|
|
if (vfsp == NULL) {
|
|
|
|
lf->userrefs--;
|
|
|
|
linker_file_unload(lf);
|
|
|
|
vput(vp);
|
|
|
|
return (ENODEV);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
1998-09-10 02:27:52 +00:00
|
|
|
if ((vp->v_flag & VMOUNT) != 0 ||
|
|
|
|
vp->v_mountedhere != NULL) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
1998-09-10 02:27:52 +00:00
|
|
|
vp->v_flag |= VMOUNT;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
1997-02-10 02:22:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate and initialize the filesystem.
|
|
|
|
*/
|
2000-12-08 21:51:06 +00:00
|
|
|
mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
|
2001-10-23 01:21:29 +00:00
|
|
|
TAILQ_INIT(&mp->mnt_nvnodelist);
|
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00
|
|
|
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
|
2001-09-12 08:38:13 +00:00
|
|
|
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_op = vfsp->vfc_vfsops;
|
|
|
|
mp->mnt_vfc = vfsp;
|
|
|
|
vfsp->vfc_refcount++;
|
|
|
|
mp->mnt_stat.f_type = vfsp->vfc_typenum;
|
|
|
|
mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
|
2001-03-01 21:00:17 +00:00
|
|
|
strncpy(mp->mnt_stat.f_fstypename, fstype, MFSNAMELEN);
|
|
|
|
mp->mnt_stat.f_fstypename[MFSNAMELEN - 1] = '\0';
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_vnodecovered = vp;
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_stat.f_owner = p->p_ucred->cr_uid;
|
2001-03-01 21:00:17 +00:00
|
|
|
strncpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
|
|
|
|
mp->mnt_stat.f_mntonname[MNAMELEN - 1] = '\0';
|
1999-09-29 20:05:33 +00:00
|
|
|
mp->mnt_iosize_max = DFLTPHYS;
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
update:
|
|
|
|
/*
|
|
|
|
* Set the mount level flags.
|
|
|
|
*/
|
2001-03-01 21:00:17 +00:00
|
|
|
if (fsflags & MNT_RDONLY)
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag |= MNT_RDONLY;
|
|
|
|
else if (mp->mnt_flag & MNT_RDONLY)
|
1997-11-12 05:42:33 +00:00
|
|
|
mp->mnt_kern_flag |= MNTK_WANTRDWR;
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
|
1997-09-27 13:40:20 +00:00
|
|
|
MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
|
1999-11-01 04:57:43 +00:00
|
|
|
MNT_NOSYMFOLLOW | MNT_IGNORE |
|
1997-11-13 00:28:51 +00:00
|
|
|
MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
|
2001-03-01 21:00:17 +00:00
|
|
|
mp->mnt_flag |= fsflags & (MNT_NOSUID | MNT_NOEXEC |
|
1997-02-10 02:22:35 +00:00
|
|
|
MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
|
1999-11-01 04:57:43 +00:00
|
|
|
MNT_NOSYMFOLLOW | MNT_IGNORE |
|
1997-11-13 00:28:51 +00:00
|
|
|
MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Mount the filesystem.
|
1999-12-15 23:02:35 +00:00
|
|
|
* XXX The final recipients of VFS_MOUNT just overwrite the ndp they
|
|
|
|
* get. No freeing of cn_pnbuf.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VFS_MOUNT(mp, fspath, fsdata, &nd, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
1997-11-12 05:42:33 +00:00
|
|
|
if (mp->mnt_kern_flag & MNTK_WANTRDWR)
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag &= ~MNT_RDONLY;
|
2000-07-11 22:07:57 +00:00
|
|
|
mp->mnt_flag &=~
|
|
|
|
(MNT_UPDATE | MNT_RELOAD | MNT_FORCE | MNT_SNAPSHOT);
|
1997-11-22 06:10:36 +00:00
|
|
|
mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag = flag;
|
1997-11-12 05:42:33 +00:00
|
|
|
mp->mnt_kern_flag = flag2;
|
1997-11-22 06:10:36 +00:00
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0) {
|
|
|
|
if (mp->mnt_syncer == NULL)
|
|
|
|
error = vfs_allocate_syncvnode(mp);
|
|
|
|
} else {
|
|
|
|
if (mp->mnt_syncer != NULL)
|
|
|
|
vrele(mp->mnt_syncer);
|
|
|
|
mp->mnt_syncer = NULL;
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
2000-08-09 01:57:11 +00:00
|
|
|
vp->v_flag &= ~VMOUNT;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
2000-08-09 01:57:11 +00:00
|
|
|
vrele(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Put the new filesystem on the mount list after root.
|
|
|
|
*/
|
|
|
|
cache_purge(vp);
|
|
|
|
if (!error) {
|
2001-02-28 20:54:28 +00:00
|
|
|
struct vnode *newdp;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
1998-09-10 02:27:52 +00:00
|
|
|
vp->v_flag &= ~VMOUNT;
|
|
|
|
vp->v_mountedhere = mp;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mountlist_mtx);
|
2001-02-28 20:54:28 +00:00
|
|
|
if (VFS_ROOT(mp, &newdp))
|
|
|
|
panic("mount: lost mount");
|
|
|
|
checkdirs(vp, newdp);
|
|
|
|
vput(newdp);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
|
|
|
error = vfs_allocate_syncvnode(mp);
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
|
|
|
if ((error = VFS_START(mp, 0, td)) != 0)
|
1995-08-11 11:31:18 +00:00
|
|
|
vrele(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
1998-09-10 02:27:52 +00:00
|
|
|
vp->v_flag &= ~VMOUNT;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_vfc->vfc_refcount--;
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
free((caddr_t)mp, M_MOUNT);
|
|
|
|
vput(vp);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Scan all active processes to see if any of them have a current
|
2001-02-28 20:54:28 +00:00
|
|
|
* or root directory of `olddp'. If so, replace them with the new
|
|
|
|
* mount point.
|
1997-02-10 02:22:35 +00:00
|
|
|
*/
|
|
|
|
static void
|
2001-02-28 20:54:28 +00:00
|
|
|
checkdirs(olddp, newdp)
|
|
|
|
struct vnode *olddp, *newdp;
|
1997-02-10 02:22:35 +00:00
|
|
|
{
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
if (olddp->v_usecount == 1)
|
|
|
|
return;
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_slock(&allproc_lock);
|
1999-11-16 16:28:58 +00:00
|
|
|
LIST_FOREACH(p, &allproc, p_list) {
|
1997-02-10 02:22:35 +00:00
|
|
|
fdp = p->p_fd;
|
2001-03-07 02:25:13 +00:00
|
|
|
if (fdp == NULL)
|
|
|
|
continue;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fdp->fd_cdir == olddp) {
|
|
|
|
vrele(fdp->fd_cdir);
|
|
|
|
VREF(newdp);
|
|
|
|
fdp->fd_cdir = newdp;
|
|
|
|
}
|
|
|
|
if (fdp->fd_rdir == olddp) {
|
|
|
|
vrele(fdp->fd_rdir);
|
|
|
|
VREF(newdp);
|
|
|
|
fdp->fd_rdir = newdp;
|
|
|
|
}
|
|
|
|
}
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
1997-02-10 02:22:35 +00:00
|
|
|
if (rootvnode == olddp) {
|
|
|
|
vrele(rootvnode);
|
|
|
|
VREF(newdp);
|
|
|
|
rootvnode = newdp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Unmount a file system.
|
|
|
|
*
|
|
|
|
* Note: unmount takes a path to the vnode mounted on as argument,
|
|
|
|
* not special file (as before).
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct unmount_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
unmount(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct unmount_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct vnode *vp;
|
|
|
|
struct mount *mp;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-02-10 02:22:35 +00:00
|
|
|
mp = vp->v_mount;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Only root, or the user that did the original mount is
|
|
|
|
* permitted to unmount this filesystem.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-10-01 20:01:07 +00:00
|
|
|
if (mp->mnt_stat.f_owner != td->td_proc->p_ucred->cr_uid) {
|
|
|
|
error = suser_td(td);
|
|
|
|
if (error) {
|
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Don't allow unmounting the root file system.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
if (mp->mnt_flag & MNT_ROOTFS) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
1994-08-20 16:03:26 +00:00
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Must be the root of the filesystem
|
1994-08-20 16:03:26 +00:00
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
if ((vp->v_flag & VROOT) == 0) {
|
|
|
|
vput(vp);
|
1994-08-20 16:03:26 +00:00
|
|
|
return (EINVAL);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
|
|
|
vput(vp);
|
2001-09-12 08:38:13 +00:00
|
|
|
return (dounmount(mp, SCARG(uap, flags), td));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do the actual file system unmount.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
dounmount(mp, flags, td)
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int flags;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-28 20:54:28 +00:00
|
|
|
struct vnode *coveredvp, *fsrootvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
1998-07-03 03:47:24 +00:00
|
|
|
int async_flag;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1997-11-12 05:42:33 +00:00
|
|
|
mp->mnt_kern_flag |= MNTK_UNMOUNT;
|
2001-09-12 08:38:13 +00:00
|
|
|
lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
1997-07-17 07:17:33 +00:00
|
|
|
|
|
|
|
if (mp->mnt_flag & MNT_EXPUBLIC)
|
|
|
|
vfs_setpublicfs(NULL, NULL, NULL);
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
vfs_msync(mp, MNT_WAIT);
|
1998-07-03 03:47:24 +00:00
|
|
|
async_flag = mp->mnt_flag & MNT_ASYNC;
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag &=~ MNT_ASYNC;
|
|
|
|
cache_purgevfs(mp); /* remove cache entries for this file sys */
|
1998-03-08 09:59:44 +00:00
|
|
|
if (mp->mnt_syncer != NULL)
|
|
|
|
vrele(mp->mnt_syncer);
|
2001-02-28 20:54:28 +00:00
|
|
|
/* Move process cdir/rdir refs on fs root to underlying vnode. */
|
|
|
|
if (VFS_ROOT(mp, &fsrootvp) == 0) {
|
|
|
|
if (mp->mnt_vnodecovered != NULL)
|
|
|
|
checkdirs(fsrootvp, mp->mnt_vnodecovered);
|
|
|
|
if (fsrootvp == rootvnode) {
|
|
|
|
vrele(rootvnode);
|
|
|
|
rootvnode = NULL;
|
|
|
|
}
|
|
|
|
vput(fsrootvp);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (((mp->mnt_flag & MNT_RDONLY) ||
|
2001-09-12 08:38:13 +00:00
|
|
|
(error = VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td)) == 0) ||
|
2000-07-11 22:07:57 +00:00
|
|
|
(flags & MNT_FORCE)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VFS_UNMOUNT(mp, flags, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
}
|
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error) {
|
2001-02-28 20:54:28 +00:00
|
|
|
/* Undo cdir/rdir and rootvnode changes made above. */
|
|
|
|
if (VFS_ROOT(mp, &fsrootvp) == 0) {
|
|
|
|
if (mp->mnt_vnodecovered != NULL)
|
|
|
|
checkdirs(mp->mnt_vnodecovered, fsrootvp);
|
|
|
|
if (rootvnode == NULL) {
|
|
|
|
rootvnode = fsrootvp;
|
|
|
|
vref(rootvnode);
|
|
|
|
}
|
|
|
|
vput(fsrootvp);
|
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
|
|
|
|
(void) vfs_allocate_syncvnode(mp);
|
2001-08-20 19:16:31 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1997-11-12 05:42:33 +00:00
|
|
|
mp->mnt_kern_flag &= ~MNTK_UNMOUNT;
|
1998-07-03 03:47:24 +00:00
|
|
|
mp->mnt_flag |= async_flag;
|
2001-08-20 19:16:31 +00:00
|
|
|
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK,
|
2001-09-12 08:38:13 +00:00
|
|
|
&mountlist_mtx, td);
|
1998-12-12 21:07:09 +00:00
|
|
|
if (mp->mnt_kern_flag & MNTK_MWAIT)
|
|
|
|
wakeup((caddr_t)mp);
|
1997-02-10 02:22:35 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2001-08-20 19:16:31 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
TAILQ_REMOVE(&mountlist, mp, mnt_list);
|
2001-08-20 19:16:31 +00:00
|
|
|
if ((coveredvp = mp->mnt_vnodecovered) != NULL)
|
|
|
|
coveredvp->v_mountedhere = NULL;
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_vfc->vfc_refcount--;
|
2001-10-23 01:21:29 +00:00
|
|
|
if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
|
1997-02-10 02:22:35 +00:00
|
|
|
panic("unmount: dangling vnode");
|
2001-09-12 08:38:13 +00:00
|
|
|
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
|
2000-10-04 01:29:17 +00:00
|
|
|
lockdestroy(&mp->mnt_lock);
|
2001-08-20 19:16:31 +00:00
|
|
|
if (coveredvp != NULL)
|
|
|
|
vrele(coveredvp);
|
1997-11-12 05:42:33 +00:00
|
|
|
if (mp->mnt_kern_flag & MNTK_MWAIT)
|
1997-02-10 02:22:35 +00:00
|
|
|
wakeup((caddr_t)mp);
|
|
|
|
free((caddr_t)mp, M_MOUNT);
|
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sync each mounted filesystem.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1995-10-08 00:06:22 +00:00
|
|
|
struct sync_args {
|
|
|
|
int dummy;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1995-10-08 00:06:22 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
#ifdef DEBUG
|
1997-11-22 06:41:21 +00:00
|
|
|
static int syncprt = 0;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
|
1997-02-10 02:22:35 +00:00
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sync(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sync_args *uap;
|
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp, *nmp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int asyncflag;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
1997-02-10 02:22:35 +00:00
|
|
|
continue;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0 &&
|
|
|
|
vn_start_write(NULL, &mp, V_NOWAIT) == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
asyncflag = mp->mnt_flag & MNT_ASYNC;
|
|
|
|
mp->mnt_flag &= ~MNT_ASYNC;
|
1995-05-21 21:39:31 +00:00
|
|
|
vfs_msync(mp, MNT_NOWAIT);
|
1998-03-08 09:59:44 +00:00
|
|
|
VFS_SYNC(mp, MNT_NOWAIT,
|
2001-09-12 08:38:13 +00:00
|
|
|
((td != NULL) ? td->td_proc->p_ucred : NOCRED), td);
|
1998-03-08 09:59:44 +00:00
|
|
|
mp->mnt_flag |= asyncflag;
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mountlist_mtx);
|
1997-02-12 06:46:11 +00:00
|
|
|
#if 0
|
|
|
|
/*
|
|
|
|
* XXX don't call vfs_bufstats() yet because that routine
|
|
|
|
* was not imported in the Lite2 merge.
|
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (syncprt)
|
|
|
|
vfs_bufstats();
|
|
|
|
#endif /* DIAGNOSTIC */
|
1997-02-12 06:46:11 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
/* XXX PRISON: could be per prison flag */
|
|
|
|
static int prison_quotas;
|
|
|
|
#if 0
|
|
|
|
SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Change filesystem quotas.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct quotactl_args {
|
|
|
|
char *path;
|
|
|
|
int cmd;
|
|
|
|
int uid;
|
|
|
|
caddr_t arg;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
quotactl(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct quotactl_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) cmd;
|
|
|
|
syscallarg(int) uid;
|
|
|
|
syscallarg(caddr_t) arg;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if (jailed(td->td_proc->p_ucred) && !prison_quotas)
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
return (EPERM);
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2000-07-11 22:07:57 +00:00
|
|
|
error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, arg), td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get filesystem statistics.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct statfs_args {
|
|
|
|
char *path;
|
|
|
|
struct statfs *buf;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
statfs(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct statfs_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct statfs *) buf;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct mount *mp;
|
|
|
|
register struct statfs *sp;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
1997-03-23 20:08:22 +00:00
|
|
|
struct statfs sb;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
mp = nd.ni_vp->v_mount;
|
|
|
|
sp = &mp->mnt_stat;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(nd.ni_vp);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VFS_STATFS(mp, sp, td);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
|
2001-09-12 08:38:13 +00:00
|
|
|
if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
|
1997-03-23 20:08:22 +00:00
|
|
|
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
|
|
|
|
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
|
|
|
|
sp = &sb;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
return (copyout((caddr_t)sp, (caddr_t)SCARG(uap, buf), sizeof(*sp)));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get filesystem statistics.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fstatfs_args {
|
|
|
|
int fd;
|
|
|
|
struct statfs *buf;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fstatfs(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fstatfs_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(struct statfs *) buf;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
struct mount *mp;
|
|
|
|
register struct statfs *sp;
|
|
|
|
int error;
|
1997-03-23 20:08:22 +00:00
|
|
|
struct statfs sb;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
mp = ((struct vnode *)fp->f_data)->v_mount;
|
|
|
|
sp = &mp->mnt_stat;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VFS_STATFS(mp, sp, td);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
|
2001-09-12 08:38:13 +00:00
|
|
|
if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
|
1997-03-23 20:08:22 +00:00
|
|
|
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
|
|
|
|
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
|
|
|
|
sp = &sb;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
return (copyout((caddr_t)sp, (caddr_t)SCARG(uap, buf), sizeof(*sp)));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get statistics on all filesystems.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getfsstat_args {
|
|
|
|
struct statfs *buf;
|
|
|
|
long bufsize;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getfsstat(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct getfsstat_args /* {
|
|
|
|
syscallarg(struct statfs *) buf;
|
|
|
|
syscallarg(long) bufsize;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct mount *mp, *nmp;
|
|
|
|
register struct statfs *sp;
|
|
|
|
caddr_t sfsp;
|
|
|
|
long count, maxcount, error;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
|
|
|
|
sfsp = (caddr_t)SCARG(uap, buf);
|
1995-08-11 11:31:18 +00:00
|
|
|
count = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
1996-01-16 13:07:14 +00:00
|
|
|
continue;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (sfsp && count < maxcount) {
|
1994-05-24 10:09:53 +00:00
|
|
|
sp = &mp->mnt_stat;
|
|
|
|
/*
|
1998-03-08 09:59:44 +00:00
|
|
|
* If MNT_NOWAIT or MNT_LAZY is specified, do not
|
|
|
|
* refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
|
|
|
|
* overrides MNT_WAIT.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1998-03-08 09:59:44 +00:00
|
|
|
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
|
1997-02-10 02:22:35 +00:00
|
|
|
(SCARG(uap, flags) & MNT_WAIT)) &&
|
2001-09-12 08:38:13 +00:00
|
|
|
(error = VFS_STATFS(mp, sp, td))) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
1996-01-16 13:07:14 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
|
1994-10-08 22:33:43 +00:00
|
|
|
error = copyout((caddr_t)sp, sfsp, sizeof(*sp));
|
1996-01-16 13:07:14 +00:00
|
|
|
if (error) {
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1996-01-16 13:07:14 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
sfsp += sizeof(*sp);
|
|
|
|
}
|
|
|
|
count++;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mountlist_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (sfsp && count > maxcount)
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = maxcount;
|
1994-05-24 10:09:53 +00:00
|
|
|
else
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = count;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change current working directory to a given file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fchdir_args {
|
|
|
|
int fd;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fchdir(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fchdir_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
register struct filedesc *fdp = td->td_proc->p_fd;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct vnode *vp, *tdp;
|
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
1997-02-10 02:22:35 +00:00
|
|
|
VREF(vp);
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
error = ENOTDIR;
|
|
|
|
else
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred, td);
|
1997-02-10 02:22:35 +00:00
|
|
|
while (!error && (mp = vp->v_mountedhere) != NULL) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if (vfs_busy(mp, 0, 0, td))
|
1997-02-10 02:22:35 +00:00
|
|
|
continue;
|
|
|
|
error = VFS_ROOT(mp, &tdp);
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_unbusy(mp, td);
|
1997-02-10 02:22:35 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
vput(vp);
|
|
|
|
vp = tdp;
|
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
vput(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(fdp->fd_cdir);
|
|
|
|
fdp->fd_cdir = vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change current working directory (``.'').
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chdir_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
chdir(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct chdir_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
register struct filedesc *fdp = td->td_proc->p_fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
|
|
|
if ((error = change_dir(&nd, td)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(fdp->fd_cdir);
|
|
|
|
fdp->fd_cdir = nd.ni_vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1999-03-23 14:26:40 +00:00
|
|
|
/*
|
|
|
|
* Helper function for raised chroot(2) security function: Refuse if
|
|
|
|
* any filedescriptors are open directories.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
chroot_refuse_vdir_fds(fdp)
|
|
|
|
struct filedesc *fdp;
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
|
|
|
|
error = getvnode(fdp, fd, &fp);
|
|
|
|
if (error)
|
|
|
|
continue;
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
continue;
|
|
|
|
return(EPERM);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This sysctl determines if we will allow a process to chroot(2) if it
|
|
|
|
* has a directory open:
|
|
|
|
* 0: disallowed for all processes.
|
|
|
|
* 1: allowed for processes that were not already chroot(2)'ed.
|
|
|
|
* 2: allowed for all processes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int chroot_allow_open_directories = 1;
|
|
|
|
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
|
|
|
|
&chroot_allow_open_directories, 0, "");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Change notion of root (``/'') directory.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chroot_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
chroot(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct chroot_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
register struct filedesc *fdp = td->td_proc->p_fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
error = suser_xxx(0, td->td_proc, PRISON_ROOT);
|
1999-03-23 14:26:40 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (chroot_allow_open_directories == 0 ||
|
|
|
|
(chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode))
|
|
|
|
error = chroot_refuse_vdir_fds(fdp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
|
|
|
if ((error = change_dir(&nd, td)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-02-15 04:17:09 +00:00
|
|
|
vrele(fdp->fd_rdir);
|
1994-05-24 10:09:53 +00:00
|
|
|
fdp->fd_rdir = nd.ni_vp;
|
1999-09-25 14:14:21 +00:00
|
|
|
if (!fdp->fd_jdir) {
|
|
|
|
fdp->fd_jdir = nd.ni_vp;
|
|
|
|
VREF(fdp->fd_jdir);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common routine for chroot and chdir.
|
|
|
|
*/
|
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
change_dir(ndp, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct nameidata *ndp;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
int error;
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
error = namei(ndp);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = ndp->ni_vp;
|
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
error = ENOTDIR;
|
|
|
|
else
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
1997-02-10 02:22:35 +00:00
|
|
|
vput(vp);
|
|
|
|
else
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check permissions, allocate an open file structure,
|
|
|
|
* and call the device open routine if any.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct open_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
open(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct open_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2000-07-04 03:34:11 +00:00
|
|
|
struct filedesc *fdp = p->p_fd;
|
|
|
|
struct file *fp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct vattr vat;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1997-10-28 10:29:55 +00:00
|
|
|
int cmode, flags, oflags;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *nfp;
|
|
|
|
int type, indx, error;
|
|
|
|
struct flock lf;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-10-28 10:29:55 +00:00
|
|
|
oflags = SCARG(uap, flags);
|
|
|
|
if ((oflags & O_ACCMODE) == O_ACCMODE)
|
1997-10-22 07:28:51 +00:00
|
|
|
return (EINVAL);
|
1997-10-28 10:29:55 +00:00
|
|
|
flags = FFLAGS(oflags);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = falloc(td, &nfp, &indx);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
fp = nfp;
|
1997-02-10 02:22:35 +00:00
|
|
|
cmode = ((SCARG(uap, mode) &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
|
|
|
td->td_dupfd = -indx - 1; /* XXX check for fdopen */
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* Bump the ref count to prevent another process from closing
|
|
|
|
* the descriptor while we are blocked in vn_open()
|
|
|
|
*/
|
|
|
|
fhold(fp);
|
2000-07-04 03:34:11 +00:00
|
|
|
error = vn_open(&nd, &flags, cmode);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* release our own reference
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* handle special fdopen() case. bleh. dupfdopen() is
|
|
|
|
* responsible for dropping the old contents of ofiles[indx]
|
|
|
|
* if it succeeds.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((error == ENODEV || error == ENXIO) &&
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_dupfd >= 0 && /* XXX from fdopen */
|
1994-05-24 10:09:53 +00:00
|
|
|
(error =
|
2001-09-12 08:38:13 +00:00
|
|
|
dupfdopen(td, fdp, indx, td->td_dupfd, flags, error)) == 0) {
|
|
|
|
td->td_retval[0] = indx;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* Clean up the descriptor, but only if another thread hadn't
|
|
|
|
* replaced or closed it.
|
|
|
|
*/
|
|
|
|
if (fdp->fd_ofiles[indx] == fp) {
|
|
|
|
fdp->fd_ofiles[indx] = NULL;
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
return (error);
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_dupfd = 0;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = nd.ni_vp;
|
1996-12-19 19:42:37 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* There should be 2 references on the file, one from the descriptor
|
|
|
|
* table, and one for us.
|
|
|
|
*
|
|
|
|
* Handle the case where someone closed the file (via its file
|
|
|
|
* descriptor) while we were blocked. The end result should look
|
|
|
|
* like opening the file succeeded but it was immediately closed.
|
|
|
|
*/
|
|
|
|
if (fp->f_count == 1) {
|
|
|
|
KASSERT(fdp->fd_ofiles[indx] != fp,
|
|
|
|
("Open file descriptor lost all refs"));
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
|
|
|
vn_close(vp, flags & FMASK, fp->f_cred, td);
|
|
|
|
fdrop(fp, td);
|
|
|
|
td->td_retval[0] = indx;
|
2000-11-18 21:01:04 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_data = (caddr_t)vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_flag = flags & FMASK;
|
|
|
|
fp->f_ops = &vnops;
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_type = (vp->v_type == VFIFO ? DTYPE_FIFO : DTYPE_VNODE);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (flags & (O_EXLOCK | O_SHLOCK)) {
|
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
if (flags & O_EXLOCK)
|
|
|
|
lf.l_type = F_WRLCK;
|
|
|
|
else
|
|
|
|
lf.l_type = F_RDLCK;
|
|
|
|
type = F_FLOCK;
|
|
|
|
if ((flags & FNONBLOCK) == 0)
|
|
|
|
type |= F_WAIT;
|
2000-07-04 03:34:11 +00:00
|
|
|
if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0)
|
|
|
|
goto bad;
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_flag |= FHASLOCK;
|
|
|
|
}
|
2000-07-04 03:34:11 +00:00
|
|
|
if (flags & O_TRUNC) {
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
goto bad;
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
|
2000-07-04 03:34:11 +00:00
|
|
|
VATTR_NULL(&vat);
|
|
|
|
vat.va_size = 0;
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
|
|
|
error = VOP_SETATTR(vp, &vat, p->p_ucred, td);
|
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
2000-07-04 03:34:11 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
}
|
1999-08-12 20:38:32 +00:00
|
|
|
/* assert that vn_open created a backing object if one is needed */
|
2000-09-12 09:49:08 +00:00
|
|
|
KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
|
1999-08-12 20:38:32 +00:00
|
|
|
("open: vmio vnode has no backing object after vn_open"));
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* Release our private reference, leaving the one associated with
|
|
|
|
* the descriptor table intact.
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
|
|
|
td->td_retval[0] = indx;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
2000-07-04 03:34:11 +00:00
|
|
|
bad:
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fdp->fd_ofiles[indx] == fp) {
|
|
|
|
fdp->fd_ofiles[indx] = NULL;
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2000-07-04 03:34:11 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_43
|
|
|
|
/*
|
|
|
|
* Create a file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ocreat_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ocreat(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct ocreat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct open_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ nuap;
|
|
|
|
|
|
|
|
SCARG(&nuap, path) = SCARG(uap, path);
|
|
|
|
SCARG(&nuap, mode) = SCARG(uap, mode);
|
|
|
|
SCARG(&nuap, flags) = O_WRONLY | O_CREAT | O_TRUNC;
|
2001-09-12 08:38:13 +00:00
|
|
|
return (open(td, &nuap));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif /* COMPAT_43 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a special file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mknod_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
int dev;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
mknod(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct mknod_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
syscallarg(int) dev;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
1998-06-07 17:13:14 +00:00
|
|
|
int whiteout = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
switch (SCARG(uap, mode) & S_IFMT) {
|
|
|
|
case S_IFCHR:
|
|
|
|
case S_IFBLK:
|
2001-09-12 08:38:13 +00:00
|
|
|
error = suser_td(td);
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
break;
|
|
|
|
default:
|
2001-09-12 08:38:13 +00:00
|
|
|
error = suser_xxx(0, td->td_proc, PRISON_ROOT);
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
break;
|
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vp != NULL) {
|
|
|
|
vrele(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EEXIST;
|
2000-07-11 22:07:57 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
2001-09-12 08:38:13 +00:00
|
|
|
vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ td->td_proc->p_fd->fd_cmask;
|
1997-02-10 02:22:35 +00:00
|
|
|
vattr.va_rdev = SCARG(uap, dev);
|
|
|
|
whiteout = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
switch (SCARG(uap, mode) & S_IFMT) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case S_IFMT: /* used by badsect to flag bad sectors */
|
|
|
|
vattr.va_type = VBAD;
|
|
|
|
break;
|
|
|
|
case S_IFCHR:
|
|
|
|
vattr.va_type = VCHR;
|
|
|
|
break;
|
|
|
|
case S_IFBLK:
|
|
|
|
vattr.va_type = VBLK;
|
|
|
|
break;
|
1997-02-10 02:22:35 +00:00
|
|
|
case S_IFWHT:
|
|
|
|
whiteout = 1;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (!error) {
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
1999-12-15 23:02:35 +00:00
|
|
|
if (whiteout)
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
|
1999-12-15 23:02:35 +00:00
|
|
|
else {
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
|
|
|
|
&nd.ni_cnd, &vattr);
|
1999-11-13 14:35:50 +00:00
|
|
|
if (error == 0)
|
|
|
|
vput(nd.ni_vp);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mknod");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "mknod");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Create a named pipe.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mkfifo_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
mkfifo(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct mkfifo_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if (nd.ni_vp != NULL) {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EEXIST);
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_type = VFIFO;
|
2001-09-12 08:38:13 +00:00
|
|
|
vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ td->td_proc->p_fd->fd_cmask;
|
|
|
|
VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
1998-05-07 04:58:58 +00:00
|
|
|
error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
|
1999-11-13 14:35:50 +00:00
|
|
|
if (error == 0)
|
|
|
|
vput(nd.ni_vp);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-07 04:58:58 +00:00
|
|
|
vput(nd.ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1998-05-07 04:58:58 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a hard file link.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct link_args {
|
|
|
|
char *path;
|
|
|
|
char *link;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
link(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct link_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(char *) link;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
int error;
|
|
|
|
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW|NOOBJ, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = nd.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
vrele(vp);
|
|
|
|
return (EPERM); /* POSIX */
|
|
|
|
}
|
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = namei(&nd)) == 0) {
|
|
|
|
if (nd.ni_vp != NULL) {
|
|
|
|
vrele(nd.ni_vp);
|
|
|
|
error = EEXIST;
|
|
|
|
} else {
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
2000-07-11 22:07:57 +00:00
|
|
|
error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
vrele(vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "link");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "link");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a symbolic link.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct symlink_args {
|
|
|
|
char *path;
|
|
|
|
char *link;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
symlink(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct symlink_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(char *) link;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
char *path;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-09-21 04:24:27 +00:00
|
|
|
path = zalloc(namei_zone);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
|
|
|
if (nd.ni_vp) {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EEXIST;
|
|
|
|
goto out;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
2001-09-12 08:38:13 +00:00
|
|
|
vattr.va_mode = ACCESSPERMS &~ td->td_proc->p_fd->fd_cmask;
|
|
|
|
VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1999-11-13 20:58:17 +00:00
|
|
|
if (error == 0)
|
|
|
|
vput(nd.ni_vp);
|
1998-05-07 04:58:58 +00:00
|
|
|
vput(nd.ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "symlink");
|
1999-12-12 03:28:14 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "symlink");
|
1994-05-24 10:09:53 +00:00
|
|
|
out:
|
1997-09-21 04:24:27 +00:00
|
|
|
zfree(namei_zone, path);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Delete a whiteout from the filesystem.
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
undelete(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct undelete_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = namei(&nd);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-02-10 02:22:35 +00:00
|
|
|
if (nd.ni_vp)
|
|
|
|
vrele(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
1997-02-10 02:22:35 +00:00
|
|
|
return (EEXIST);
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
1999-12-15 23:02:35 +00:00
|
|
|
error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE);
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-02-10 02:22:35 +00:00
|
|
|
vput(nd.ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "undelete");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "undelete");
|
1997-02-10 02:22:35 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Delete a name from the filesystem.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct unlink_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
unlink(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct unlink_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1996-05-24 16:19:23 +00:00
|
|
|
if (vp->v_type == VDIR)
|
|
|
|
error = EPERM; /* POSIX */
|
|
|
|
else {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* The root of a mounted filesystem cannot be deleted.
|
1996-05-24 16:19:23 +00:00
|
|
|
*
|
|
|
|
* XXX: can this only be a VDIR case?
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
if (vp->v_flag & VROOT)
|
|
|
|
error = EBUSY;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vrele(vp);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (!error) {
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
|
|
|
vput(vp);
|
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "unlink");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "unlink");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reposition read/write file offset.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct lseek_args {
|
|
|
|
int fd;
|
|
|
|
int pad;
|
|
|
|
off_t offset;
|
|
|
|
int whence;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
lseek(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct lseek_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) offset;
|
|
|
|
syscallarg(int) whence;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct ucred *cred = td->td_proc->p_ucred;
|
|
|
|
register struct filedesc *fdp = td->td_proc->p_fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct file *fp;
|
|
|
|
struct vattr vattr;
|
2001-08-21 21:20:42 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
off_t offset;
|
|
|
|
int error, noneg;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
if ((u_int)SCARG(uap, fd) >= fdp->fd_nfiles ||
|
|
|
|
(fp = fdp->fd_ofiles[SCARG(uap, fd)]) == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBADF);
|
|
|
|
if (fp->f_type != DTYPE_VNODE)
|
|
|
|
return (ESPIPE);
|
2001-08-21 21:20:42 +00:00
|
|
|
vp = (struct vnode *)fp->f_data;
|
|
|
|
noneg = (vp->v_type != VCHR);
|
|
|
|
offset = SCARG(uap, offset);
|
1997-02-10 02:22:35 +00:00
|
|
|
switch (SCARG(uap, whence)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case L_INCR:
|
2001-08-21 21:20:42 +00:00
|
|
|
if (noneg &&
|
2001-08-29 18:35:53 +00:00
|
|
|
(fp->f_offset < 0 ||
|
|
|
|
(offset > 0 && fp->f_offset > OFF_MAX - offset)))
|
2001-08-21 21:20:42 +00:00
|
|
|
return (EOVERFLOW);
|
|
|
|
offset += fp->f_offset;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
case L_XTND:
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred, td);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2001-08-21 21:20:42 +00:00
|
|
|
if (noneg &&
|
2001-08-29 18:35:53 +00:00
|
|
|
(vattr.va_size > OFF_MAX ||
|
|
|
|
(offset > 0 && vattr.va_size > OFF_MAX - offset)))
|
2001-08-21 21:20:42 +00:00
|
|
|
return (EOVERFLOW);
|
|
|
|
offset += vattr.va_size;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
case L_SET:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2001-08-21 21:20:42 +00:00
|
|
|
if (noneg && offset < 0)
|
|
|
|
return (EINVAL);
|
|
|
|
fp->f_offset = offset;
|
2001-09-12 08:38:13 +00:00
|
|
|
*(off_t *)(td->td_retval) = fp->f_offset;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
|
|
|
/*
|
|
|
|
* Reposition read/write file offset.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct olseek_args {
|
|
|
|
int fd;
|
|
|
|
long offset;
|
|
|
|
int whence;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
olseek(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct olseek_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(long) offset;
|
|
|
|
syscallarg(int) whence;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct lseek_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) offset;
|
|
|
|
syscallarg(int) whence;
|
|
|
|
} */ nuap;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(&nuap, fd) = SCARG(uap, fd);
|
|
|
|
SCARG(&nuap, offset) = SCARG(uap, offset);
|
|
|
|
SCARG(&nuap, whence) = SCARG(uap, whence);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = lseek(td, &nuap);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif /* COMPAT_43 */
|
|
|
|
|
|
|
|
/*
|
2001-09-21 21:33:22 +00:00
|
|
|
* Check access permissions using passed credentials.
|
|
|
|
*/
|
|
|
|
static int
|
2001-09-22 03:07:41 +00:00
|
|
|
vn_access(vp, user_flags, cred, td)
|
2001-09-21 21:33:22 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
int user_flags;
|
|
|
|
struct ucred *cred;
|
|
|
|
struct thread *td;
|
|
|
|
{
|
|
|
|
int error, flags;
|
|
|
|
|
|
|
|
/* Flags == 0 means only check for existence. */
|
|
|
|
error = 0;
|
|
|
|
if (user_flags) {
|
|
|
|
flags = 0;
|
|
|
|
if (user_flags & R_OK)
|
|
|
|
flags |= VREAD;
|
|
|
|
if (user_flags & W_OK)
|
|
|
|
flags |= VWRITE;
|
|
|
|
if (user_flags & X_OK)
|
|
|
|
flags |= VEXEC;
|
|
|
|
if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
|
|
|
|
error = VOP_ACCESS(vp, flags, cred, td);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check access permissions using "real" credentials.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct access_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
access(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct access_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-09-02 12:31:55 +00:00
|
|
|
struct ucred *cred, *tmpcred;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct vnode *vp;
|
2001-09-21 21:33:22 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
cred = td->td_proc->p_ucred;
|
2000-09-02 12:31:55 +00:00
|
|
|
/*
|
|
|
|
* Create and modify a temporary credential instead of one that
|
|
|
|
* is potentially shared. This could also mess up socket
|
|
|
|
* buffer accounting which can run in an interrupt context.
|
|
|
|
*
|
|
|
|
* XXX - Depending on how "threads" are finally implemented, it
|
|
|
|
* may be better to explicitly pass the credential to namei()
|
|
|
|
* rather than to modify the potentially shared process structure.
|
|
|
|
*/
|
|
|
|
tmpcred = crdup(cred);
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
tmpcred->cr_uid = cred->cr_ruid;
|
|
|
|
tmpcred->cr_groups[0] = cred->cr_rgid;
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_proc->p_ucred = tmpcred;
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out1;
|
|
|
|
vp = nd.ni_vp;
|
|
|
|
|
2001-09-22 03:07:41 +00:00
|
|
|
error = vn_access(vp, SCARG(uap, flags), tmpcred, td);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
out1:
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_proc->p_ucred = cred;
|
2000-09-02 12:31:55 +00:00
|
|
|
crfree(tmpcred);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2001-09-21 21:33:22 +00:00
|
|
|
/*
|
|
|
|
* Check access permissions using "effective" credentials.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct eaccess_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
eaccess(td, uap)
|
|
|
|
struct thread *td;
|
|
|
|
register struct eaccess_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
struct vnode *vp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), td);
|
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
|
|
|
|
2001-09-22 03:07:41 +00:00
|
|
|
error = vn_access(vp, SCARG(uap, flags), td->td_proc->p_ucred, td);
|
2001-09-21 21:33:22 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
|
|
|
/*
|
|
|
|
* Get file status; this version follows links.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ostat_args {
|
|
|
|
char *path;
|
|
|
|
struct ostat *ub;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ostat(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct ostat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct ostat *) ub;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
struct ostat osb;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = vn_stat(nd.ni_vp, &sb, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(nd.ni_vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
cvtstat(&sb, &osb);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&osb, (caddr_t)SCARG(uap, ub), sizeof (osb));
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file status; this version does not follow links.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct olstat_args {
|
|
|
|
char *path;
|
|
|
|
struct ostat *ub;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
olstat(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct olstat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct ostat *) ub;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-03-31 12:02:53 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct stat sb;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ostat osb;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1994-09-02 04:14:44 +00:00
|
|
|
vp = nd.ni_vp;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = vn_stat(vp, &sb, td);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-03-31 12:02:53 +00:00
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
cvtstat(&sb, &osb);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&osb, (caddr_t)SCARG(uap, ub), sizeof (osb));
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert from an old to a new stat structure.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
1994-05-24 10:09:53 +00:00
|
|
|
cvtstat(st, ost)
|
|
|
|
struct stat *st;
|
|
|
|
struct ostat *ost;
|
|
|
|
{
|
|
|
|
|
|
|
|
ost->st_dev = st->st_dev;
|
|
|
|
ost->st_ino = st->st_ino;
|
|
|
|
ost->st_mode = st->st_mode;
|
|
|
|
ost->st_nlink = st->st_nlink;
|
|
|
|
ost->st_uid = st->st_uid;
|
|
|
|
ost->st_gid = st->st_gid;
|
|
|
|
ost->st_rdev = st->st_rdev;
|
|
|
|
if (st->st_size < (quad_t)1 << 32)
|
|
|
|
ost->st_size = st->st_size;
|
|
|
|
else
|
|
|
|
ost->st_size = -2;
|
|
|
|
ost->st_atime = st->st_atime;
|
|
|
|
ost->st_mtime = st->st_mtime;
|
|
|
|
ost->st_ctime = st->st_ctime;
|
|
|
|
ost->st_blksize = st->st_blksize;
|
|
|
|
ost->st_blocks = st->st_blocks;
|
|
|
|
ost->st_flags = st->st_flags;
|
|
|
|
ost->st_gen = st->st_gen;
|
|
|
|
}
|
|
|
|
#endif /* COMPAT_43 || COMPAT_SUNOS */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file status; this version follows links.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct stat_args {
|
|
|
|
char *path;
|
|
|
|
struct stat *ub;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
stat(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct stat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct stat *) ub;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = vn_stat(nd.ni_vp, &sb, td);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(nd.ni_vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&sb, (caddr_t)SCARG(uap, ub), sizeof (sb));
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file status; this version does not follow links.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct lstat_args {
|
|
|
|
char *path;
|
|
|
|
struct stat *ub;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
lstat(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct lstat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct stat *) ub;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
1997-03-31 12:02:53 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct stat sb;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = vn_stat(vp, &sb, td);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-03-31 12:02:53 +00:00
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&sb, (caddr_t)SCARG(uap, ub), sizeof (sb));
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Implementation of the NetBSD stat() function.
|
|
|
|
* XXX This should probably be collapsed with the FreeBSD version,
|
|
|
|
* as the differences are only due to vn_stat() clearing spares at
|
|
|
|
* the end of the structures. vn_stat could be split to avoid this,
|
|
|
|
* and thus collapse the following to close to zero code.
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
void
|
|
|
|
cvtnstat(sb, nsb)
|
|
|
|
struct stat *sb;
|
|
|
|
struct nstat *nsb;
|
|
|
|
{
|
|
|
|
nsb->st_dev = sb->st_dev;
|
|
|
|
nsb->st_ino = sb->st_ino;
|
|
|
|
nsb->st_mode = sb->st_mode;
|
|
|
|
nsb->st_nlink = sb->st_nlink;
|
|
|
|
nsb->st_uid = sb->st_uid;
|
|
|
|
nsb->st_gid = sb->st_gid;
|
|
|
|
nsb->st_rdev = sb->st_rdev;
|
|
|
|
nsb->st_atimespec = sb->st_atimespec;
|
|
|
|
nsb->st_mtimespec = sb->st_mtimespec;
|
|
|
|
nsb->st_ctimespec = sb->st_ctimespec;
|
|
|
|
nsb->st_size = sb->st_size;
|
|
|
|
nsb->st_blocks = sb->st_blocks;
|
|
|
|
nsb->st_blksize = sb->st_blksize;
|
|
|
|
nsb->st_flags = sb->st_flags;
|
|
|
|
nsb->st_gen = sb->st_gen;
|
1999-11-18 08:14:20 +00:00
|
|
|
nsb->st_qspare[0] = sb->st_qspare[0];
|
|
|
|
nsb->st_qspare[1] = sb->st_qspare[1];
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct nstat_args {
|
|
|
|
char *path;
|
|
|
|
struct nstat *ub;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
nstat(td, uap)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
register struct nstat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct nstat *) ub;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
struct nstat nsb;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = vn_stat(nd.ni_vp, &sb, td);
|
1998-05-11 03:55:28 +00:00
|
|
|
vput(nd.ni_vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
cvtnstat(&sb, &nsb);
|
|
|
|
error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-09-14 19:13:59 +00:00
|
|
|
* NetBSD lstat. Get file status; this version does not follow links.
|
1998-05-11 03:55:28 +00:00
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct lstat_args {
|
|
|
|
char *path;
|
|
|
|
struct stat *ub;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
nlstat(td, uap)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
register struct nlstat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct nstat *) ub;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct stat sb;
|
|
|
|
struct nstat nsb;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = vn_stat(vp, &sb, td);
|
1998-05-11 03:55:28 +00:00
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
cvtnstat(&sb, &nsb);
|
|
|
|
error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Get configurable pathname variables.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct pathconf_args {
|
|
|
|
char *path;
|
|
|
|
int name;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
pathconf(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct pathconf_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) name;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), td->td_retval);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(nd.ni_vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return target name of a symbolic link.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct readlink_args {
|
|
|
|
char *path;
|
|
|
|
char *buf;
|
|
|
|
int count;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
readlink(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct readlink_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(char *) buf;
|
|
|
|
syscallarg(int) count;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct vnode *vp;
|
|
|
|
struct iovec aiov;
|
|
|
|
struct uio auio;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = nd.ni_vp;
|
|
|
|
if (vp->v_type != VLNK)
|
|
|
|
error = EINVAL;
|
|
|
|
else {
|
1997-02-10 02:22:35 +00:00
|
|
|
aiov.iov_base = SCARG(uap, buf);
|
|
|
|
aiov.iov_len = SCARG(uap, count);
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_offset = 0;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
1997-02-10 02:22:35 +00:00
|
|
|
auio.uio_resid = SCARG(uap, count);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_READLINK(vp, &auio, td->td_proc->p_ucred);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
vput(vp);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation code for chflags() and fchflags().
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
setfflags(td, vp, flags)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
|
1999-08-02 21:34:46 +00:00
|
|
|
/*
|
1999-08-04 04:52:18 +00:00
|
|
|
* Prevent non-root users from setting flags on devices. When
|
|
|
|
* a device is reused, users can retain ownership of the device
|
|
|
|
* if they are allowed to set flags and programs assume that
|
|
|
|
* chown can't fail when done as root.
|
1999-08-02 21:34:46 +00:00
|
|
|
*/
|
2001-10-01 20:01:07 +00:00
|
|
|
if (vp->v_type == VCHR || vp->v_type == VBLK) {
|
|
|
|
error = suser_xxx(td->td_proc->p_ucred, td->td_proc,
|
|
|
|
PRISON_ROOT);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
1999-08-03 17:07:04 +00:00
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1998-05-11 03:55:28 +00:00
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_flags = flags;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
|
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1999-08-04 04:52:18 +00:00
|
|
|
return (error);
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Change flags of a file given a path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chflags_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
chflags(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct chflags_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = setfflags(td, nd.ni_vp, SCARG(uap, flags));
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change flags of a file given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fchflags_args {
|
|
|
|
int fd;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fchflags(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fchflags_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
return setfflags(td, (struct vnode *) fp->f_data, SCARG(uap, flags));
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation code for chmod(), lchmod() and fchmod().
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
setfmode(td, vp, mode)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
int mode;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1995-10-22 09:32:48 +00:00
|
|
|
VATTR_NULL(&vattr);
|
1998-05-11 03:55:28 +00:00
|
|
|
vattr.va_mode = mode & ALLPERMS;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
|
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1998-05-11 03:55:28 +00:00
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change mode of a file given path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chmod_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
chmod(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct chmod_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = setfmode(td, nd.ni_vp, SCARG(uap, mode));
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change mode of a file given path name (don't follow links.)
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct lchmod_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
lchmod(td, uap)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
register struct lchmod_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = setfmode(td, nd.ni_vp, SCARG(uap, mode));
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change mode of a file given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fchmod_args {
|
|
|
|
int fd;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fchmod(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fchmod_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
return setfmode(td, (struct vnode *)fp->f_data, SCARG(uap, mode));
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation for chown(), lchown(), and fchown()
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
setfown(td, vp, uid, gid)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
uid_t uid;
|
|
|
|
gid_t gid;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1995-10-22 09:32:48 +00:00
|
|
|
VATTR_NULL(&vattr);
|
1998-05-11 03:55:28 +00:00
|
|
|
vattr.va_uid = uid;
|
|
|
|
vattr.va_gid = gid;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
|
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1998-05-11 03:55:28 +00:00
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set ownership given a path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chown_args {
|
|
|
|
char *path;
|
|
|
|
int uid;
|
|
|
|
int gid;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
chown(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct chown_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) uid;
|
|
|
|
syscallarg(int) gid;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = setfown(td, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-03-31 12:21:37 +00:00
|
|
|
/*
|
|
|
|
* Set ownership given a path name, do not cross symlinks.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct lchown_args {
|
|
|
|
char *path;
|
|
|
|
int uid;
|
|
|
|
int gid;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
lchown(td, uap)
|
|
|
|
struct thread *td;
|
1997-03-31 12:21:37 +00:00
|
|
|
register struct lchown_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) uid;
|
|
|
|
syscallarg(int) gid;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1997-03-31 12:21:37 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = setfown(td, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
1997-03-31 12:21:37 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Set ownership given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fchown_args {
|
|
|
|
int fd;
|
|
|
|
int uid;
|
|
|
|
int gid;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fchown(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fchown_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) uid;
|
|
|
|
syscallarg(int) gid;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
return setfown(td, (struct vnode *)fp->f_data,
|
1998-05-11 03:55:28 +00:00
|
|
|
SCARG(uap, uid), SCARG(uap, gid));
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation code for utimes(), lutimes(), and futimes().
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
static int
|
1999-08-22 01:46:57 +00:00
|
|
|
getutimes(usrtvp, tsp)
|
|
|
|
const struct timeval *usrtvp;
|
|
|
|
struct timespec *tsp;
|
|
|
|
{
|
|
|
|
struct timeval tv[2];
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (usrtvp == NULL) {
|
1999-08-22 16:50:30 +00:00
|
|
|
microtime(&tv[0]);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
|
1999-08-22 01:46:57 +00:00
|
|
|
tsp[1] = tsp[0];
|
|
|
|
} else {
|
|
|
|
if ((error = copyin(usrtvp, tv, sizeof (tv))) != 0)
|
|
|
|
return (error);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&tv[1], &tsp[1]);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation code for utimes(), lutimes(), and futimes().
|
|
|
|
*/
|
1999-08-22 01:46:57 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
setutimes(td, vp, ts, nullflag)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vnode *vp;
|
1999-08-22 01:46:57 +00:00
|
|
|
const struct timespec *ts;
|
1998-05-11 03:55:28 +00:00
|
|
|
int nullflag;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1995-10-22 09:32:48 +00:00
|
|
|
VATTR_NULL(&vattr);
|
1999-08-22 01:46:57 +00:00
|
|
|
vattr.va_atime = ts[0];
|
|
|
|
vattr.va_mtime = ts[1];
|
1998-05-11 03:55:28 +00:00
|
|
|
if (nullflag)
|
|
|
|
vattr.va_vaflags |= VA_UTIMES_NULL;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
|
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1998-05-11 03:55:28 +00:00
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the access and modification times of a file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct utimes_args {
|
|
|
|
char *path;
|
|
|
|
struct timeval *tptr;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
utimes(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct utimes_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct timeval *) tptr;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1999-08-22 01:46:57 +00:00
|
|
|
struct timespec ts[2];
|
|
|
|
struct timeval *usrtvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1999-08-22 01:46:57 +00:00
|
|
|
usrtvp = SCARG(uap, tptr);
|
|
|
|
if ((error = getutimes(usrtvp, ts)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = setutimes(td, nd.ni_vp, ts, usrtvp == NULL);
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1998-05-11 03:55:28 +00:00
|
|
|
/*
|
|
|
|
* Set the access and modification times of a file.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct lutimes_args {
|
|
|
|
char *path;
|
|
|
|
struct timeval *tptr;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
lutimes(td, uap)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
register struct lutimes_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct timeval *) tptr;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
1999-08-22 01:46:57 +00:00
|
|
|
struct timespec ts[2];
|
|
|
|
struct timeval *usrtvp;
|
1998-05-11 03:55:28 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1999-08-22 01:46:57 +00:00
|
|
|
usrtvp = SCARG(uap, tptr);
|
|
|
|
if ((error = getutimes(usrtvp, ts)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = setutimes(td, nd.ni_vp, ts, usrtvp == NULL);
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the access and modification times of a file.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct futimes_args {
|
|
|
|
int fd;
|
|
|
|
struct timeval *tptr;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
futimes(td, uap)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
register struct futimes_args /* {
|
|
|
|
syscallarg(int ) fd;
|
|
|
|
syscallarg(struct timeval *) tptr;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
1999-08-22 01:46:57 +00:00
|
|
|
struct timespec ts[2];
|
1998-05-11 03:55:28 +00:00
|
|
|
struct file *fp;
|
1999-08-22 01:46:57 +00:00
|
|
|
struct timeval *usrtvp;
|
1998-05-11 03:55:28 +00:00
|
|
|
int error;
|
|
|
|
|
1999-08-22 01:46:57 +00:00
|
|
|
usrtvp = SCARG(uap, tptr);
|
|
|
|
if ((error = getutimes(usrtvp, ts)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
return setutimes(td, (struct vnode *)fp->f_data, ts, usrtvp == NULL);
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Truncate a file given its path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct truncate_args {
|
|
|
|
char *path;
|
|
|
|
int pad;
|
|
|
|
off_t length;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
truncate(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct truncate_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) length;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1994-09-02 10:23:43 +00:00
|
|
|
if (uap->length < 0)
|
|
|
|
return(EINVAL);
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type == VDIR)
|
|
|
|
error = EISDIR;
|
|
|
|
else if ((error = vn_writechk(vp)) == 0 &&
|
2001-09-12 08:38:13 +00:00
|
|
|
(error = VOP_ACCESS(vp, VWRITE, td->td_proc->p_ucred, td)) == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
1997-02-10 02:22:35 +00:00
|
|
|
vattr.va_size = SCARG(uap, length);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
vput(vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Truncate a file given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ftruncate_args {
|
|
|
|
int fd;
|
|
|
|
int pad;
|
|
|
|
off_t length;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ftruncate(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct ftruncate_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) length;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
1994-09-02 10:23:43 +00:00
|
|
|
if (uap->length < 0)
|
|
|
|
return(EINVAL);
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((fp->f_flag & FWRITE) == 0)
|
|
|
|
return (EINVAL);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type == VDIR)
|
|
|
|
error = EISDIR;
|
|
|
|
else if ((error = vn_writechk(vp)) == 0) {
|
|
|
|
VATTR_NULL(&vattr);
|
1997-02-10 02:22:35 +00:00
|
|
|
vattr.va_size = SCARG(uap, length);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, fp->f_cred, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
|
|
|
/*
|
|
|
|
* Truncate a file given its path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct otruncate_args {
|
|
|
|
char *path;
|
|
|
|
long length;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
otruncate(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct otruncate_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(long) length;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct truncate_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) length;
|
|
|
|
} */ nuap;
|
|
|
|
|
|
|
|
SCARG(&nuap, path) = SCARG(uap, path);
|
|
|
|
SCARG(&nuap, length) = SCARG(uap, length);
|
2001-09-12 08:38:13 +00:00
|
|
|
return (truncate(td, &nuap));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Truncate a file given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct oftruncate_args {
|
|
|
|
int fd;
|
|
|
|
long length;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
oftruncate(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct oftruncate_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(long) length;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct ftruncate_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) length;
|
|
|
|
} */ nuap;
|
|
|
|
|
|
|
|
SCARG(&nuap, fd) = SCARG(uap, fd);
|
|
|
|
SCARG(&nuap, length) = SCARG(uap, length);
|
2001-09-12 08:38:13 +00:00
|
|
|
return (ftruncate(td, &nuap));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif /* COMPAT_43 || COMPAT_SUNOS */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sync an open file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fsync_args {
|
|
|
|
int fd;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fsync(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fsync_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
2000-09-12 09:49:08 +00:00
|
|
|
vm_object_t obj;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2001-07-04 16:20:28 +00:00
|
|
|
GIANT_REQUIRED;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
2001-05-19 01:28:09 +00:00
|
|
|
if (VOP_GETVOBJECT(vp, &obj) == 0) {
|
2000-09-12 09:49:08 +00:00
|
|
|
vm_object_page_clean(obj, 0, 0, 0);
|
2001-05-19 01:28:09 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, td);
|
2000-07-03 13:26:54 +00:00
|
|
|
#ifdef SOFTUPDATES
|
2000-06-16 14:32:13 +00:00
|
|
|
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
|
|
|
|
error = softdep_fsync(vp);
|
2000-07-03 13:26:54 +00:00
|
|
|
#endif
|
2000-06-16 14:32:13 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rename files. Source and destination must either both be directories,
|
|
|
|
* or both not be directories. If target is a directory, it must be empty.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct rename_args {
|
|
|
|
char *from;
|
|
|
|
char *to;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
rename(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct rename_args /* {
|
|
|
|
syscallarg(char *) from;
|
|
|
|
syscallarg(char *) to;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *tvp, *fvp, *tdvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata fromnd, tond;
|
|
|
|
int error;
|
|
|
|
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1994-05-24 10:09:53 +00:00
|
|
|
NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, from), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&fromnd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
fvp = fromnd.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(fvp, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
NDFREE(&fromnd, NDF_ONLY_PNBUF);
|
|
|
|
vrele(fromnd.ni_dvp);
|
|
|
|
vrele(fvp);
|
|
|
|
goto out1;
|
|
|
|
}
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&tond, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | NOOBJ,
|
2001-09-12 08:38:13 +00:00
|
|
|
UIO_USERSPACE, SCARG(uap, to), td);
|
1995-07-31 00:35:58 +00:00
|
|
|
if (fromnd.ni_vp->v_type == VDIR)
|
|
|
|
tond.ni_cnd.cn_flags |= WILLBEDIR;
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&tond)) != 0) {
|
1995-11-18 11:35:05 +00:00
|
|
|
/* Translate error code for rename("dir1", "dir2/."). */
|
|
|
|
if (error == EISDIR && fvp->v_type == VDIR)
|
|
|
|
error = EINVAL;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&fromnd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(fromnd.ni_dvp);
|
|
|
|
vrele(fvp);
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
tdvp = tond.ni_dvp;
|
|
|
|
tvp = tond.ni_vp;
|
|
|
|
if (tvp != NULL) {
|
|
|
|
if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
|
|
|
|
error = ENOTDIR;
|
|
|
|
goto out;
|
|
|
|
} else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
|
|
|
|
error = EISDIR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fvp == tdvp)
|
|
|
|
error = EINVAL;
|
|
|
|
/*
|
|
|
|
* If source is the same as the destination (that is the
|
|
|
|
* same inode number with the same name in the same directory),
|
|
|
|
* then there is nothing to do.
|
|
|
|
*/
|
|
|
|
if (fvp == tvp && fromnd.ni_dvp == tdvp &&
|
|
|
|
fromnd.ni_cnd.cn_namelen == tond.ni_cnd.cn_namelen &&
|
|
|
|
!bcmp(fromnd.ni_cnd.cn_nameptr, tond.ni_cnd.cn_nameptr,
|
|
|
|
fromnd.ni_cnd.cn_namelen))
|
|
|
|
error = -1;
|
|
|
|
out:
|
|
|
|
if (!error) {
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(tdvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
if (fromnd.ni_dvp != tdvp) {
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(fromnd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
}
|
1995-03-19 11:16:58 +00:00
|
|
|
if (tvp) {
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(tvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
1995-03-19 11:16:58 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
|
|
|
|
tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&fromnd, NDF_ONLY_PNBUF);
|
|
|
|
NDFREE(&tond, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&fromnd, NDF_ONLY_PNBUF);
|
|
|
|
NDFREE(&tond, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (tdvp == tvp)
|
|
|
|
vrele(tdvp);
|
|
|
|
else
|
|
|
|
vput(tdvp);
|
|
|
|
if (tvp)
|
|
|
|
vput(tvp);
|
|
|
|
vrele(fromnd.ni_dvp);
|
|
|
|
vrele(fvp);
|
|
|
|
}
|
|
|
|
vrele(tond.ni_startdir);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(fromnd.ni_dvp, "rename");
|
|
|
|
ASSERT_VOP_UNLOCKED(fromnd.ni_vp, "rename");
|
|
|
|
ASSERT_VOP_UNLOCKED(tond.ni_dvp, "rename");
|
|
|
|
ASSERT_VOP_UNLOCKED(tond.ni_vp, "rename");
|
1994-05-24 10:09:53 +00:00
|
|
|
out1:
|
|
|
|
if (fromnd.ni_startdir)
|
|
|
|
vrele(fromnd.ni_startdir);
|
|
|
|
if (error == -1)
|
|
|
|
return (0);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a directory file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mkdir_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
mkdir(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct mkdir_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
|
|
|
return vn_mkdir(uap->path, uap->mode, UIO_USERSPACE, td);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
vn_mkdir(path, mode, segflg, td)
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
enum uio_seg segflg;
|
|
|
|
struct thread *td;
|
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT, segflg, path, td);
|
1995-07-31 00:35:58 +00:00
|
|
|
nd.ni_cnd.cn_flags |= WILLBEDIR;
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
|
|
|
if (vp != NULL) {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EEXIST);
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_type = VDIR;
|
2001-09-12 08:38:13 +00:00
|
|
|
vattr.va_mode = (mode & ACCESSPERMS) &~ td->td_proc->p_fd->fd_cmask;
|
|
|
|
VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-07 04:58:58 +00:00
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (!error)
|
|
|
|
vput(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mkdir");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "mkdir");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a directory file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct rmdir_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
rmdir(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct rmdir_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
|
|
|
if (vp->v_type != VDIR) {
|
|
|
|
error = ENOTDIR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* No rmdir "." please.
|
|
|
|
*/
|
|
|
|
if (nd.ni_dvp == vp) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The root of a mounted filesystem cannot be deleted.
|
|
|
|
*/
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vp->v_flag & VROOT) {
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EBUSY;
|
2000-07-11 22:07:57 +00:00
|
|
|
goto out;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
if (nd.ni_dvp == vp)
|
|
|
|
vrele(nd.ni_dvp);
|
|
|
|
else
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
vput(vp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
2000-07-11 22:07:57 +00:00
|
|
|
error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
|
|
|
|
vn_finished_write(mp);
|
1999-12-15 23:02:35 +00:00
|
|
|
out:
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-07 04:58:58 +00:00
|
|
|
if (nd.ni_dvp == vp)
|
|
|
|
vrele(nd.ni_dvp);
|
|
|
|
else
|
|
|
|
vput(nd.ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(vp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "rmdir");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "rmdir");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_43
|
|
|
|
/*
|
|
|
|
* Read a block of directory entries in a file system independent format.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ogetdirentries_args {
|
|
|
|
int fd;
|
|
|
|
char *buf;
|
|
|
|
u_int count;
|
|
|
|
long *basep;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ogetdirentries(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct ogetdirentries_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(char *) buf;
|
|
|
|
syscallarg(u_int) count;
|
|
|
|
syscallarg(long *) basep;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1998-11-03 08:01:48 +00:00
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct uio auio, kuio;
|
|
|
|
struct iovec aiov, kiov;
|
|
|
|
struct dirent *dp, *edp;
|
|
|
|
caddr_t dirbuf;
|
1997-02-10 02:22:35 +00:00
|
|
|
int error, eofflag, readcnt;
|
1994-05-24 10:09:53 +00:00
|
|
|
long loff;
|
|
|
|
|
2001-08-10 22:14:18 +00:00
|
|
|
/* XXX arbitrary sanity limit on `count'. */
|
|
|
|
if (SCARG(uap, count) > 64 * 1024)
|
|
|
|
return (EINVAL);
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((fp->f_flag & FREAD) == 0)
|
|
|
|
return (EBADF);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
1997-02-10 02:22:35 +00:00
|
|
|
unionread:
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
return (EINVAL);
|
1997-02-10 02:22:35 +00:00
|
|
|
aiov.iov_base = SCARG(uap, buf);
|
|
|
|
aiov.iov_len = SCARG(uap, count);
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
1997-02-10 02:22:35 +00:00
|
|
|
auio.uio_resid = SCARG(uap, count);
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
loff = auio.uio_offset = fp->f_offset;
|
|
|
|
# if (BYTE_ORDER != LITTLE_ENDIAN)
|
|
|
|
if (vp->v_mount->mnt_maxsymlinklen <= 0) {
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag,
|
|
|
|
NULL, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_offset = auio.uio_offset;
|
|
|
|
} else
|
|
|
|
# endif
|
|
|
|
{
|
|
|
|
kuio = auio;
|
|
|
|
kuio.uio_iov = &kiov;
|
|
|
|
kuio.uio_segflg = UIO_SYSSPACE;
|
1997-02-10 02:22:35 +00:00
|
|
|
kiov.iov_len = SCARG(uap, count);
|
|
|
|
MALLOC(dirbuf, caddr_t, SCARG(uap, count), M_TEMP, M_WAITOK);
|
1994-05-24 10:09:53 +00:00
|
|
|
kiov.iov_base = dirbuf;
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_READDIR(vp, &kuio, fp->f_cred, &eofflag,
|
|
|
|
NULL, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_offset = kuio.uio_offset;
|
|
|
|
if (error == 0) {
|
1997-02-10 02:22:35 +00:00
|
|
|
readcnt = SCARG(uap, count) - kuio.uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
edp = (struct dirent *)&dirbuf[readcnt];
|
|
|
|
for (dp = (struct dirent *)dirbuf; dp < edp; ) {
|
|
|
|
# if (BYTE_ORDER == LITTLE_ENDIAN)
|
|
|
|
/*
|
|
|
|
* The expected low byte of
|
|
|
|
* dp->d_namlen is our dp->d_type.
|
|
|
|
* The high MBZ byte of dp->d_namlen
|
|
|
|
* is our dp->d_namlen.
|
|
|
|
*/
|
|
|
|
dp->d_type = dp->d_namlen;
|
|
|
|
dp->d_namlen = 0;
|
|
|
|
# else
|
|
|
|
/*
|
|
|
|
* The dp->d_type is the high byte
|
|
|
|
* of the expected dp->d_namlen,
|
|
|
|
* so must be zero'ed.
|
|
|
|
*/
|
|
|
|
dp->d_type = 0;
|
|
|
|
# endif
|
|
|
|
if (dp->d_reclen > 0) {
|
|
|
|
dp = (struct dirent *)
|
|
|
|
((char *)dp + dp->d_reclen);
|
|
|
|
} else {
|
|
|
|
error = EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (dp >= edp)
|
|
|
|
error = uiomove(dirbuf, readcnt, &auio);
|
|
|
|
}
|
|
|
|
FREE(dirbuf, M_TEMP);
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
1999-03-03 02:35:51 +00:00
|
|
|
if (SCARG(uap, count) == auio.uio_resid) {
|
|
|
|
if (union_dircheckp) {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = union_dircheckp(td, &vp, fp);
|
1999-03-03 02:35:51 +00:00
|
|
|
if (error == -1)
|
|
|
|
goto unionread;
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
if ((vp->v_flag & VROOT) &&
|
|
|
|
(vp->v_mount->mnt_flag & MNT_UNION)) {
|
|
|
|
struct vnode *tvp = vp;
|
|
|
|
vp = vp->v_mount->mnt_vnodecovered;
|
|
|
|
VREF(vp);
|
|
|
|
fp->f_data = (caddr_t) vp;
|
|
|
|
fp->f_offset = 0;
|
|
|
|
vrele(tvp);
|
1997-02-10 02:22:35 +00:00
|
|
|
goto unionread;
|
1999-03-03 02:35:51 +00:00
|
|
|
}
|
1999-02-27 07:06:05 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
|
|
|
|
sizeof(long));
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a block of directory entries in a file system independent format.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getdirentries_args {
|
|
|
|
int fd;
|
|
|
|
char *buf;
|
|
|
|
u_int count;
|
|
|
|
long *basep;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getdirentries(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct getdirentries_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(char *) buf;
|
|
|
|
syscallarg(u_int) count;
|
|
|
|
syscallarg(long *) basep;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1998-11-03 08:01:48 +00:00
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
long loff;
|
1997-02-10 02:22:35 +00:00
|
|
|
int error, eofflag;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((fp->f_flag & FREAD) == 0)
|
|
|
|
return (EBADF);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
|
|
|
unionread:
|
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
return (EINVAL);
|
1997-02-10 02:22:35 +00:00
|
|
|
aiov.iov_base = SCARG(uap, buf);
|
|
|
|
aiov.iov_len = SCARG(uap, count);
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
1997-02-10 02:22:35 +00:00
|
|
|
auio.uio_resid = SCARG(uap, count);
|
2001-09-12 08:38:13 +00:00
|
|
|
/* vn_lock(vp, LK_SHARED | LK_RETRY, td); */
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
loff = auio.uio_offset = fp->f_offset;
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_offset = auio.uio_offset;
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
1999-03-03 02:35:51 +00:00
|
|
|
if (SCARG(uap, count) == auio.uio_resid) {
|
|
|
|
if (union_dircheckp) {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = union_dircheckp(td, &vp, fp);
|
1999-03-03 02:35:51 +00:00
|
|
|
if (error == -1)
|
|
|
|
goto unionread;
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
if ((vp->v_flag & VROOT) &&
|
|
|
|
(vp->v_mount->mnt_flag & MNT_UNION)) {
|
|
|
|
struct vnode *tvp = vp;
|
|
|
|
vp = vp->v_mount->mnt_vnodecovered;
|
|
|
|
VREF(vp);
|
|
|
|
fp->f_data = (caddr_t) vp;
|
|
|
|
fp->f_offset = 0;
|
|
|
|
vrele(tvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto unionread;
|
1999-03-03 02:35:51 +00:00
|
|
|
}
|
1999-02-27 07:06:05 +00:00
|
|
|
}
|
1998-05-11 03:55:28 +00:00
|
|
|
if (SCARG(uap, basep) != NULL) {
|
|
|
|
error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
|
|
|
|
sizeof(long));
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1998-05-11 03:55:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct getdents_args {
|
|
|
|
int fd;
|
|
|
|
char *buf;
|
|
|
|
size_t count;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getdents(td, uap)
|
|
|
|
struct thread *td;
|
1998-05-11 03:55:28 +00:00
|
|
|
register struct getdents_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(char *) buf;
|
|
|
|
syscallarg(u_int) count;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct getdirentries_args ap;
|
|
|
|
ap.fd = uap->fd;
|
|
|
|
ap.buf = uap->buf;
|
|
|
|
ap.count = uap->count;
|
|
|
|
ap.basep = NULL;
|
2001-09-12 08:38:13 +00:00
|
|
|
return getdirentries(td, &ap);
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the mode mask for creation of filesystem nodes.
|
2000-03-28 07:16:37 +00:00
|
|
|
*
|
|
|
|
* MP SAFE
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct umask_args {
|
|
|
|
int newmask;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1997-02-10 02:22:35 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
umask(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct umask_args /* {
|
|
|
|
syscallarg(int) newmask;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct filedesc *fdp;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
|
|
|
td->td_retval[0] = fdp->fd_cmask;
|
1997-02-10 02:22:35 +00:00
|
|
|
fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Void all references to file by ripping underlying filesystem
|
|
|
|
* away from vnode.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct revoke_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
revoke(td, uap)
|
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct revoke_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2000-11-02 21:14:13 +00:00
|
|
|
if (vp->v_type != VCHR) {
|
1999-01-24 06:28:37 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2001-10-01 20:01:07 +00:00
|
|
|
error = VOP_GETATTR(vp, &vattr, td->td_proc->p_ucred, td);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
2001-10-01 20:01:07 +00:00
|
|
|
if (td->td_proc->p_ucred->cr_uid != vattr.va_uid) {
|
|
|
|
error = suser_xxx(0, td->td_proc, PRISON_ROOT);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
goto out;
|
1999-08-26 14:53:31 +00:00
|
|
|
if (vcount(vp) > 1)
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_REVOKE(vp, REVOKEALL);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
out:
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert a user file descriptor to a kernel file entry.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
getvnode(fdp, fd, fpp)
|
|
|
|
struct filedesc *fdp;
|
|
|
|
int fd;
|
1995-11-13 08:22:21 +00:00
|
|
|
struct file **fpp;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
|
|
|
|
if ((u_int)fd >= fdp->fd_nfiles ||
|
|
|
|
(fp = fdp->fd_ofiles[fd]) == NULL)
|
|
|
|
return (EBADF);
|
1996-12-19 19:42:37 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
|
|
|
*fpp = fp;
|
|
|
|
return (0);
|
|
|
|
}
|
1999-09-11 00:46:08 +00:00
|
|
|
/*
|
|
|
|
* Get (NFS) file handle
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct getfh_args {
|
|
|
|
char *fname;
|
|
|
|
fhandle_t *fhp;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getfh(td, uap)
|
|
|
|
struct thread *td;
|
1999-09-11 00:46:08 +00:00
|
|
|
register struct getfh_args *uap;
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
fhandle_t fh;
|
|
|
|
register struct vnode *vp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be super user
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
error = suser_td(td);
|
1999-09-11 00:46:08 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, td);
|
1999-09-11 00:46:08 +00:00
|
|
|
error = namei(&nd);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1999-09-11 00:46:08 +00:00
|
|
|
vp = nd.ni_vp;
|
|
|
|
bzero(&fh, sizeof(fh));
|
|
|
|
fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
|
|
|
|
error = VFS_VPTOFH(vp, &fh.fh_fid);
|
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = copyout(&fh, uap->fhp, sizeof (fh));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* syscall for the rpc.lockd to use to translate a NFS file handle into
|
|
|
|
* an open descriptor.
|
|
|
|
*
|
|
|
|
* warning: do not remove the suser() call or this becomes one giant
|
|
|
|
* security hole.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct fhopen_args {
|
|
|
|
const struct fhandle *u_fhp;
|
|
|
|
int flags;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fhopen(td, uap)
|
|
|
|
struct thread *td;
|
1999-09-11 00:46:08 +00:00
|
|
|
struct fhopen_args /* {
|
|
|
|
syscallarg(const struct fhandle *) u_fhp;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1999-09-11 00:46:08 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct fhandle fhp;
|
|
|
|
struct vattr vat;
|
|
|
|
struct vattr *vap = &vat;
|
|
|
|
struct flock lf;
|
|
|
|
struct file *fp;
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
int fmode, mode, error, type;
|
|
|
|
struct file *nfp;
|
|
|
|
int indx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be super user
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
error = suser_td(td);
|
1999-09-11 00:46:08 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
fmode = FFLAGS(SCARG(uap, flags));
|
|
|
|
/* why not allow a non-read/write open for our lockd? */
|
|
|
|
if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
|
|
|
|
return (EINVAL);
|
|
|
|
error = copyin(SCARG(uap,u_fhp), &fhp, sizeof(fhp));
|
|
|
|
if (error)
|
|
|
|
return(error);
|
|
|
|
/* find the mount point */
|
|
|
|
mp = vfs_getvfs(&fhp.fh_fsid);
|
|
|
|
if (mp == NULL)
|
|
|
|
return (ESTALE);
|
|
|
|
/* now give me my vnode, it gets returned to me locked */
|
|
|
|
error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
/*
|
|
|
|
* from now on we have to make sure not
|
|
|
|
* to forget about the vnode
|
|
|
|
* any error that causes an abort must vput(vp)
|
|
|
|
* just set error = err and 'goto bad;'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* from vn_open
|
|
|
|
*/
|
|
|
|
if (vp->v_type == VLNK) {
|
|
|
|
error = EMLINK;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (vp->v_type == VSOCK) {
|
|
|
|
error = EOPNOTSUPP;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
mode = 0;
|
|
|
|
if (fmode & (FWRITE | O_TRUNC)) {
|
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
error = EISDIR;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
error = vn_writechk(vp);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
mode |= VWRITE;
|
|
|
|
}
|
|
|
|
if (fmode & FREAD)
|
|
|
|
mode |= VREAD;
|
|
|
|
if (mode) {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_ACCESS(vp, mode, p->p_ucred, td);
|
1999-09-11 00:46:08 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (fmode & O_TRUNC) {
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td); /* XXX */
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(NULL, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
|
1999-09-11 00:46:08 +00:00
|
|
|
VATTR_NULL(vap);
|
|
|
|
vap->va_size = 0;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_SETATTR(vp, vap, p->p_ucred, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1999-09-11 00:46:08 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_OPEN(vp, fmode, p->p_ucred, td);
|
1999-09-11 00:46:08 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
/*
|
|
|
|
* Make sure that a VM object is created for VMIO support.
|
|
|
|
*/
|
|
|
|
if (vn_canvmio(vp) == TRUE) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = vfs_object_create(vp, td, p->p_ucred)) != 0)
|
1999-09-11 00:46:08 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (fmode & FWRITE)
|
|
|
|
vp->v_writecount++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* end of vn_open code
|
|
|
|
*/
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = falloc(td, &nfp, &indx)) != 0)
|
1999-09-11 00:46:08 +00:00
|
|
|
goto bad;
|
|
|
|
fp = nfp;
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Hold an extra reference to avoid having fp ripped out
|
|
|
|
* from under us while we block in the lock op
|
|
|
|
*/
|
|
|
|
fhold(fp);
|
1999-09-11 00:46:08 +00:00
|
|
|
nfp->f_data = (caddr_t)vp;
|
|
|
|
nfp->f_flag = fmode & FMASK;
|
|
|
|
nfp->f_ops = &vnops;
|
|
|
|
nfp->f_type = DTYPE_VNODE;
|
|
|
|
if (fmode & (O_EXLOCK | O_SHLOCK)) {
|
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
if (fmode & O_EXLOCK)
|
|
|
|
lf.l_type = F_WRLCK;
|
|
|
|
else
|
|
|
|
lf.l_type = F_RDLCK;
|
|
|
|
type = F_FLOCK;
|
|
|
|
if ((fmode & FNONBLOCK) == 0)
|
|
|
|
type |= F_WAIT;
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
1999-09-11 00:46:08 +00:00
|
|
|
if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* The lock request failed. Normally close the
|
|
|
|
* descriptor but handle the case where someone might
|
|
|
|
* have dup()d or close()d it when we weren't looking.
|
|
|
|
*/
|
|
|
|
if (fdp->fd_ofiles[indx] == fp) {
|
|
|
|
fdp->fd_ofiles[indx] = NULL;
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* release our private reference
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2000-11-18 21:01:04 +00:00
|
|
|
return(error);
|
1999-09-11 00:46:08 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
1999-09-11 00:46:08 +00:00
|
|
|
fp->f_flag |= FHASLOCK;
|
|
|
|
}
|
2000-09-12 09:49:08 +00:00
|
|
|
if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
|
2001-09-12 08:38:13 +00:00
|
|
|
vfs_object_create(vp, td, p->p_ucred);
|
1999-09-11 00:46:08 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
|
|
|
fdrop(fp, td);
|
|
|
|
td->td_retval[0] = indx;
|
1999-09-11 00:46:08 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
bad:
|
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Stat an (NFS) file handle.
|
|
|
|
*/
|
1999-09-11 00:46:08 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct fhstat_args {
|
|
|
|
struct fhandle *u_fhp;
|
|
|
|
struct stat *sb;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fhstat(td, uap)
|
|
|
|
struct thread *td;
|
1999-09-11 00:46:08 +00:00
|
|
|
register struct fhstat_args /* {
|
|
|
|
syscallarg(struct fhandle *) u_fhp;
|
|
|
|
syscallarg(struct stat *) sb;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
fhandle_t fh;
|
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be super user
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
error = suser_td(td);
|
1999-09-11 00:46:08 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
|
|
|
|
return (ESTALE);
|
|
|
|
if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = vn_stat(vp, &sb, td);
|
1999-09-11 00:46:08 +00:00
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = copyout(&sb, SCARG(uap, sb), sizeof(sb));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Implement fstatfs() for (NFS) file handles.
|
|
|
|
*/
|
1999-09-11 00:46:08 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct fhstatfs_args {
|
|
|
|
struct fhandle *u_fhp;
|
|
|
|
struct statfs *buf;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
fhstatfs(td, uap)
|
|
|
|
struct thread *td;
|
1999-09-11 00:46:08 +00:00
|
|
|
struct fhstatfs_args /* {
|
|
|
|
syscallarg(struct fhandle) *u_fhp;
|
|
|
|
syscallarg(struct statfs) *buf;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct statfs *sp;
|
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct statfs sb;
|
|
|
|
fhandle_t fh;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be super user
|
|
|
|
*/
|
2001-10-01 20:01:07 +00:00
|
|
|
error = suser_td(td);
|
|
|
|
if (error)
|
1999-09-11 00:46:08 +00:00
|
|
|
return (error);
|
|
|
|
|
|
|
|
if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
|
|
|
|
return (ESTALE);
|
|
|
|
if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
|
|
|
|
return (error);
|
|
|
|
mp = vp->v_mount;
|
|
|
|
sp = &mp->mnt_stat;
|
|
|
|
vput(vp);
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = VFS_STATFS(mp, sp, td)) != 0)
|
1999-09-11 00:46:08 +00:00
|
|
|
return (error);
|
|
|
|
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
|
2001-09-12 08:38:13 +00:00
|
|
|
if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
|
1999-09-11 00:46:08 +00:00
|
|
|
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
|
|
|
|
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
|
|
|
|
sp = &sb;
|
|
|
|
}
|
|
|
|
return (copyout(sp, SCARG(uap, buf), sizeof(*sp)));
|
|
|
|
}
|
1999-12-19 06:08:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Syscall to push extended attribute configuration information into the
|
|
|
|
* VFS. Accepts a path, which it converts to a mountpoint, as well as
|
|
|
|
* a command (int cmd), and attribute name and misc data. For now, the
|
|
|
|
* attribute name is left in userspace for consumption by the VFS_op.
|
|
|
|
* It will probably be changed to be copied into sysspace by the
|
|
|
|
* syscall in the future, once issues with various consumers of the
|
|
|
|
* attribute code have raised their hands.
|
|
|
|
*
|
|
|
|
* Currently this is used only by UFS Extended Attributes.
|
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
extattrctl(td, uap)
|
|
|
|
struct thread *td;
|
1999-12-19 06:08:07 +00:00
|
|
|
struct extattrctl_args *uap;
|
|
|
|
{
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
struct vnode *filename_vp;
|
1999-12-19 06:08:07 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
struct mount *mp;
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
1999-12-19 06:08:07 +00:00
|
|
|
int error;
|
|
|
|
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
/*
|
|
|
|
* SCARG(uap, attrname) not always defined. We check again later
|
|
|
|
* when we invoke the VFS call so as to pass in NULL there if needed.
|
|
|
|
*/
|
|
|
|
if (SCARG(uap, attrname) != NULL) {
|
|
|
|
error = copyinstr(SCARG(uap, attrname), attrname,
|
|
|
|
EXTATTR_MAXNAMELEN, NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SCARG(uap, filename) not always defined. If it is, grab
|
|
|
|
* a vnode lock, which VFS_EXTATTRCTL() will later release.
|
|
|
|
*/
|
|
|
|
filename_vp = NULL;
|
|
|
|
if (SCARG(uap, filename) != NULL) {
|
2001-06-06 23:34:38 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, filename), td);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return (error);
|
|
|
|
filename_vp = nd.ni_vp;
|
|
|
|
NDFREE(&nd, NDF_NO_VP_RELE | NDF_NO_VP_UNLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* SCARG(uap, path) always defined. */
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-12-19 06:08:07 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return (error);
|
2000-07-11 22:07:57 +00:00
|
|
|
error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH);
|
1999-12-19 06:08:07 +00:00
|
|
|
NDFREE(&nd, 0);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
if (error) {
|
|
|
|
if (filename_vp)
|
|
|
|
vrele(filename_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (SCARG(uap, attrname) != NULL) {
|
|
|
|
error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd), filename_vp,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, attrnamespace), attrname, td);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
} else {
|
|
|
|
error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd), filename_vp,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, attrnamespace), NULL, td);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
}
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
/*
|
|
|
|
* VFS_EXTATTRCTL will have unlocked, but not de-ref'd,
|
|
|
|
* filename_vp, so vrele it if it is defined.
|
|
|
|
*/
|
|
|
|
if (filename_vp != NULL)
|
|
|
|
vrele(filename_vp);
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
1999-12-19 06:08:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-03-31 16:20:05 +00:00
|
|
|
* extattr_set_vp(): Set a named extended attribute on a file or directory
|
|
|
|
*
|
|
|
|
* Arguments: unlocked vnode "vp", attribute namespace "attrnamespace",
|
|
|
|
* kernelspace string pointer "attrname",
|
|
|
|
* userspace iovec array pointer "iovp", unsigned int iovcnt
|
|
|
|
* proc "p"
|
|
|
|
* Returns: 0 on success, an error number otherwise
|
|
|
|
* Locks: none
|
|
|
|
* References: vp must be a valid reference for the duration of the call
|
1999-12-19 06:08:07 +00:00
|
|
|
*/
|
2001-03-31 16:20:05 +00:00
|
|
|
static int
|
|
|
|
extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
|
2001-09-12 08:38:13 +00:00
|
|
|
struct iovec *iovp, unsigned iovcnt, struct thread *td)
|
1999-12-19 06:08:07 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1999-12-19 06:08:07 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
|
|
|
|
u_int iovlen, cnt;
|
|
|
|
int error, i;
|
|
|
|
|
2001-03-31 16:20:05 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
2000-07-26 20:29:20 +00:00
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
iovlen = iovcnt * sizeof(struct iovec);
|
|
|
|
if (iovcnt > UIO_SMALLIOV) {
|
|
|
|
if (iovcnt > UIO_MAXIOV) {
|
1999-12-19 06:08:07 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
|
|
|
|
needfree = iov;
|
|
|
|
} else
|
|
|
|
iov = aiov;
|
|
|
|
auio.uio_iov = iov;
|
2001-03-31 16:20:05 +00:00
|
|
|
auio.uio_iovcnt = iovcnt;
|
1999-12-19 06:08:07 +00:00
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
1999-12-19 06:08:07 +00:00
|
|
|
auio.uio_offset = 0;
|
2001-03-31 16:20:05 +00:00
|
|
|
if ((error = copyin((caddr_t)iovp, (caddr_t)iov, iovlen)))
|
1999-12-19 06:08:07 +00:00
|
|
|
goto done;
|
|
|
|
auio.uio_resid = 0;
|
2001-03-31 16:20:05 +00:00
|
|
|
for (i = 0; i < iovcnt; i++) {
|
1999-12-19 06:08:07 +00:00
|
|
|
if (iov->iov_len > INT_MAX - auio.uio_resid) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
auio.uio_resid += iov->iov_len;
|
|
|
|
iov++;
|
|
|
|
}
|
|
|
|
cnt = auio.uio_resid;
|
2001-03-31 16:20:05 +00:00
|
|
|
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio,
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_proc->p_ucred, td);
|
1999-12-19 06:08:07 +00:00
|
|
|
cnt -= auio.uio_resid;
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = cnt;
|
1999-12-19 06:08:07 +00:00
|
|
|
done:
|
|
|
|
if (needfree)
|
|
|
|
FREE(needfree, M_IOV);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1999-12-19 06:08:07 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
extattr_set_file(td, uap)
|
|
|
|
struct thread *td;
|
2001-03-31 16:20:05 +00:00
|
|
|
struct extattr_set_file_args *uap;
|
1999-12-19 06:08:07 +00:00
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
2001-03-31 16:20:05 +00:00
|
|
|
int error;
|
1999-12-19 06:08:07 +00:00
|
|
|
|
2001-03-31 16:20:05 +00:00
|
|
|
error = copyinstr(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN,
|
|
|
|
NULL);
|
1999-12-19 06:08:07 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-12-19 06:08:07 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return (error);
|
2001-03-31 16:20:05 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
|
|
|
|
error = extattr_set_vp(nd.ni_vp, SCARG(uap, attrnamespace), attrname,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, iovp), SCARG(uap, iovcnt), td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
extattr_set_fd(td, uap)
|
|
|
|
struct thread *td;
|
2001-03-31 16:20:05 +00:00
|
|
|
struct extattr_set_fd_args *uap;
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyinstr(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN,
|
|
|
|
NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
2001-03-31 16:20:05 +00:00
|
|
|
return (error);
|
|
|
|
|
|
|
|
error = extattr_set_vp((struct vnode *)fp->f_data,
|
|
|
|
SCARG(uap, attrnamespace), attrname, SCARG(uap, iovp),
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, iovcnt), td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* extattr_get_vp(): Get a named extended attribute on a file or directory
|
|
|
|
*
|
|
|
|
* Arguments: unlocked vnode "vp", attribute namespace "attrnamespace",
|
|
|
|
* kernelspace string pointer "attrname",
|
|
|
|
* userspace iovec array pointer "iovp", unsigned int iovcnt,
|
|
|
|
* proc "p"
|
|
|
|
* Returns: 0 on success, an error number otherwise
|
|
|
|
* Locks: none
|
|
|
|
* References: vp must be a valid reference for the duration of the call
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
|
2001-09-12 08:38:13 +00:00
|
|
|
struct iovec *iovp, unsigned iovcnt, struct thread *td)
|
2001-03-31 16:20:05 +00:00
|
|
|
{
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
|
|
|
|
u_int iovlen, cnt;
|
|
|
|
int error, i;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_READ);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
iovlen = iovcnt * sizeof (struct iovec);
|
|
|
|
if (iovcnt > UIO_SMALLIOV) {
|
|
|
|
if (iovcnt > UIO_MAXIOV) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
1999-12-19 06:08:07 +00:00
|
|
|
}
|
|
|
|
MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
|
|
|
|
needfree = iov;
|
2001-03-31 16:20:05 +00:00
|
|
|
} else
|
1999-12-19 06:08:07 +00:00
|
|
|
iov = aiov;
|
|
|
|
auio.uio_iov = iov;
|
2001-03-31 16:20:05 +00:00
|
|
|
auio.uio_iovcnt = iovcnt;
|
1999-12-19 06:08:07 +00:00
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
1999-12-19 06:08:07 +00:00
|
|
|
auio.uio_offset = 0;
|
2001-03-31 16:20:05 +00:00
|
|
|
if ((error = copyin((caddr_t)iovp, (caddr_t)iov, iovlen)))
|
1999-12-19 06:08:07 +00:00
|
|
|
goto done;
|
|
|
|
auio.uio_resid = 0;
|
2001-03-31 16:20:05 +00:00
|
|
|
for (i = 0; i < iovcnt; i++) {
|
1999-12-19 06:08:07 +00:00
|
|
|
if (iov->iov_len > INT_MAX - auio.uio_resid) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
auio.uio_resid += iov->iov_len;
|
|
|
|
iov++;
|
|
|
|
}
|
|
|
|
cnt = auio.uio_resid;
|
2001-03-31 16:20:05 +00:00
|
|
|
error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio,
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_proc->p_ucred, td);
|
1999-12-19 06:08:07 +00:00
|
|
|
cnt -= auio.uio_resid;
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = cnt;
|
1999-12-19 06:08:07 +00:00
|
|
|
done:
|
|
|
|
if (needfree)
|
|
|
|
FREE(needfree, M_IOV);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2001-03-31 16:20:05 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
extattr_get_file(td, uap)
|
|
|
|
struct thread *td;
|
2001-03-31 16:20:05 +00:00
|
|
|
struct extattr_get_file_args *uap;
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyinstr(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN,
|
|
|
|
NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
2001-03-31 16:20:05 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return (error);
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
|
|
|
|
error = extattr_get_vp(nd.ni_vp, SCARG(uap, attrnamespace), attrname,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, iovp), SCARG(uap, iovcnt), td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
extattr_get_fd(td, uap)
|
|
|
|
struct thread *td;
|
2001-03-31 16:20:05 +00:00
|
|
|
struct extattr_get_fd_args *uap;
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyinstr(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN,
|
|
|
|
NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
2001-03-31 16:20:05 +00:00
|
|
|
return (error);
|
|
|
|
|
|
|
|
error = extattr_get_vp((struct vnode *)fp->f_data,
|
|
|
|
SCARG(uap, attrnamespace), attrname, SCARG(uap, iovp),
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, iovcnt), td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
return (error);
|
1999-12-19 06:08:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-03-31 16:20:05 +00:00
|
|
|
* extattr_delete_vp(): Delete a named extended attribute on a file or
|
|
|
|
* directory
|
|
|
|
*
|
|
|
|
* Arguments: unlocked vnode "vp", attribute namespace "attrnamespace",
|
|
|
|
* kernelspace string pointer "attrname", proc "p"
|
|
|
|
* Returns: 0 on success, an error number otherwise
|
|
|
|
* Locks: none
|
|
|
|
* References: vp must be a valid reference for the duration of the call
|
1999-12-19 06:08:07 +00:00
|
|
|
*/
|
2001-03-31 16:20:05 +00:00
|
|
|
static int
|
|
|
|
extattr_delete_vp(struct vnode *vp, int attrnamespace, const char *attrname,
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td)
|
2001-03-31 16:20:05 +00:00
|
|
|
{
|
|
|
|
struct mount *mp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_proc->p_ucred, td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2001-03-31 16:20:05 +00:00
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1999-12-19 06:08:07 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
extattr_delete_file(td, uap)
|
|
|
|
struct thread *td;
|
1999-12-19 06:08:07 +00:00
|
|
|
struct extattr_delete_file_args *uap;
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
2001-03-31 16:20:05 +00:00
|
|
|
int error;
|
1999-12-19 06:08:07 +00:00
|
|
|
|
2001-03-31 16:20:05 +00:00
|
|
|
error = copyinstr(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN,
|
|
|
|
NULL);
|
1999-12-19 06:08:07 +00:00
|
|
|
if (error)
|
|
|
|
return(error);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
|
1999-12-19 06:08:07 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return(error);
|
2001-03-31 16:20:05 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
|
|
|
|
error = extattr_delete_vp(nd.ni_vp, SCARG(uap, attrnamespace),
|
2001-09-12 08:38:13 +00:00
|
|
|
attrname, td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
vrele(nd.ni_vp);
|
1999-12-19 06:08:07 +00:00
|
|
|
return(error);
|
|
|
|
}
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
extattr_delete_fd(td, uap)
|
|
|
|
struct thread *td;
|
2001-03-31 16:20:05 +00:00
|
|
|
struct extattr_delete_fd_args *uap;
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyinstr(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN,
|
|
|
|
NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
|
2001-03-31 16:20:05 +00:00
|
|
|
return (error);
|
|
|
|
|
|
|
|
error = extattr_delete_vp((struct vnode *)fp->f_data,
|
2001-09-12 08:38:13 +00:00
|
|
|
SCARG(uap, attrnamespace), attrname, td);
|
2001-03-31 16:20:05 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|