1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-24 11:29:10 +00:00
freebsd/sys/kern/vfs_vnops.c

979 lines
23 KiB
C
Raw Normal View History

1994-05-24 10:09:53 +00:00
/*
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
1999-08-28 01:08:13 +00:00
* $FreeBSD$
1994-05-24 10:09:53 +00:00
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/fcntl.h>
1994-05-24 10:09:53 +00:00
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/proc.h>
2001-10-11 17:52:20 +00:00
#include <sys/lock.h>
1994-05-24 10:09:53 +00:00
#include <sys/mount.h>
#include <sys/mutex.h>
1994-05-24 10:09:53 +00:00
#include <sys/namei.h>
#include <sys/vnode.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/filio.h>
#include <sys/ttycom.h>
1999-08-13 11:22:48 +00:00
#include <sys/conf.h>
1994-05-24 10:09:53 +00:00
#include <machine/limits.h>
static int vn_closefile __P((struct file *fp, struct thread *td));
static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
struct thread *td));
static int vn_read __P((struct file *fp, struct uio *uio,
struct ucred *cred, int flags, struct thread *td));
1997-09-14 02:51:16 +00:00
static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
struct thread *td));
static int vn_kqfilter __P((struct file *fp, struct knote *kn));
static int vn_statfile __P((struct file *fp, struct stat *sb, struct thread *td));
static int vn_write __P((struct file *fp, struct uio *uio,
struct ucred *cred, int flags, struct thread *td));
struct fileops vnops = {
vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
vn_statfile, vn_closefile
};
int
vn_open(ndp, flagp, cmode)
register struct nameidata *ndp;
int *flagp, cmode;
{
struct thread *td = ndp->ni_cnd.cn_thread;
return (vn_open_cred(ndp, flagp, cmode, td->td_proc->p_ucred));
}
1994-05-24 10:09:53 +00:00
/*
* Common code for vnode open operations.
* Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
*
* Note that this does NOT free nameidata for the successful case,
* due to the NDINIT being done elsewhere.
1994-05-24 10:09:53 +00:00
*/
int
vn_open_cred(ndp, flagp, cmode, cred)
1994-05-24 10:09:53 +00:00
register struct nameidata *ndp;
int *flagp, cmode;
struct ucred *cred;
1994-05-24 10:09:53 +00:00
{
struct vnode *vp;
struct mount *mp;
struct thread *td = ndp->ni_cnd.cn_thread;
1994-05-24 10:09:53 +00:00
struct vattr vat;
struct vattr *vap = &vat;
int mode, fmode, error;
1994-05-24 10:09:53 +00:00
restart:
fmode = *flagp;
1994-05-24 10:09:53 +00:00
if (fmode & O_CREAT) {
ndp->ni_cnd.cn_nameiop = CREATE;
ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
1994-05-24 10:09:53 +00:00
ndp->ni_cnd.cn_flags |= FOLLOW;
bwillwrite();
if ((error = namei(ndp)) != 0)
1994-05-24 10:09:53 +00:00
return (error);
if (ndp->ni_vp == NULL) {
VATTR_NULL(vap);
vap->va_type = VREG;
vap->va_mode = cmode;
if (fmode & O_EXCL)
vap->va_vaflags |= VA_EXCLUSIVE;
if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
NDFREE(ndp, NDF_ONLY_PNBUF);
vput(ndp->ni_dvp);
if ((error = vn_start_write(NULL, &mp,
V_XSLEEP | PCATCH)) != 0)
return (error);
goto restart;
}
VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
&ndp->ni_cnd, vap);
vput(ndp->ni_dvp);
vn_finished_write(mp);
if (error) {
NDFREE(ndp, NDF_ONLY_PNBUF);
1994-05-24 10:09:53 +00:00
return (error);
}
ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
1994-05-24 10:09:53 +00:00
fmode &= ~O_TRUNC;
vp = ndp->ni_vp;
} else {
if (ndp->ni_dvp == ndp->ni_vp)
vrele(ndp->ni_dvp);
else
vput(ndp->ni_dvp);
ndp->ni_dvp = NULL;
vp = ndp->ni_vp;
if (fmode & O_EXCL) {
error = EEXIST;
goto bad;
}
fmode &= ~O_CREAT;
}
} else {
ndp->ni_cnd.cn_nameiop = LOOKUP;
ndp->ni_cnd.cn_flags =
((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
if ((error = namei(ndp)) != 0)
1994-05-24 10:09:53 +00:00
return (error);
vp = ndp->ni_vp;
}
if (vp->v_type == VLNK) {
error = EMLINK;
goto bad;
}
if (vp->v_type == VSOCK) {
1994-05-24 10:09:53 +00:00
error = EOPNOTSUPP;
goto bad;
}
if ((fmode & O_CREAT) == 0) {
mode = 0;
1994-05-24 10:09:53 +00:00
if (fmode & (FWRITE | O_TRUNC)) {
if (vp->v_type == VDIR) {
error = EISDIR;
goto bad;
}
error = vn_writechk(vp);
if (error)
goto bad;
mode |= VWRITE;
}
if (fmode & FREAD)
mode |= VREAD;
if (mode) {
error = VOP_ACCESS(vp, mode, cred, td);
if (error)
1994-05-24 10:09:53 +00:00
goto bad;
}
}
if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0)
1994-05-24 10:09:53 +00:00
goto bad;
/*
* Make sure that a VM object is created for VMIO support.
*/
if (vn_canvmio(vp) == TRUE) {
if ((error = vfs_object_create(vp, td, cred)) != 0)
/* XXX: Should VOP_CLOSE() again here. */
goto bad;
}
NOTE: libkvm, w, ps, 'top', and any other utility which depends on struct proc or any VM system structure will have to be rebuilt!!! Much needed overhaul of the VM system. Included in this first round of changes: 1) Improved pager interfaces: init, alloc, dealloc, getpages, putpages, haspage, and sync operations are supported. The haspage interface now provides information about clusterability. All pager routines now take struct vm_object's instead of "pagers". 2) Improved data structures. In the previous paradigm, there is constant confusion caused by pagers being both a data structure ("allocate a pager") and a collection of routines. The idea of a pager structure has escentially been eliminated. Objects now have types, and this type is used to index the appropriate pager. In most cases, items in the pager structure were duplicated in the object data structure and thus were unnecessary. In the few cases that remained, a un_pager structure union was created in the object to contain these items. 3) Because of the cleanup of #1 & #2, a lot of unnecessary layering can now be removed. For instance, vm_object_enter(), vm_object_lookup(), vm_object_remove(), and the associated object hash list were some of the things that were removed. 4) simple_lock's removed. Discussion with several people reveals that the SMP locking primitives used in the VM system aren't likely the mechanism that we'll be adopting. Even if it were, the locking that was in the code was very inadequate and would have to be mostly re-done anyway. The locking in a uni-processor kernel was a no-op but went a long way toward making the code difficult to read and debug. 5) Places that attempted to kludge-up the fact that we don't have kernel thread support have been fixed to reflect the reality that we are really dealing with processes, not threads. The VM system didn't have complete thread support, so the comments and mis-named routines were just wrong. We now use tsleep and wakeup directly in the lock routines, for instance. 6) Where appropriate, the pagers have been improved, especially in the pager_alloc routines. Most of the pager_allocs have been rewritten and are now faster and easier to maintain. 7) The pagedaemon pageout clustering algorithm has been rewritten and now tries harder to output an even number of pages before and after the requested page. This is sort of the reverse of the ideal pagein algorithm and should provide better overall performance. 8) Unnecessary (incorrect) casts to caddr_t in calls to tsleep & wakeup have been removed. Some other unnecessary casts have also been removed. 9) Some almost useless debugging code removed. 10) Terminology of shadow objects vs. backing objects straightened out. The fact that the vm_object data structure escentially had this backwards really confused things. The use of "shadow" and "backing object" throughout the code is now internally consistent and correct in the Mach terminology. 11) Several minor bug fixes, including one in the vm daemon that caused 0 RSS objects to not get purged as intended. 12) A "default pager" has now been created which cleans up the transition of objects to the "swap" type. The previous checks throughout the code for swp->pg_data != NULL were really ugly. This change also provides the rudiments for future backing of "anonymous" memory by something other than the swap pager (via the vnode pager, for example), and it allows the decision about which of these pagers to use to be made dynamically (although will need some additional decision code to do this, of course). 13) (dyson) MAP_COPY has been deprecated and the corresponding "copy object" code has been removed. MAP_COPY was undocumented and non- standard. It was furthermore broken in several ways which caused its behavior to degrade to MAP_PRIVATE. Binaries that use MAP_COPY will continue to work correctly, but via the slightly different semantics of MAP_PRIVATE. 14) (dyson) Sharing maps have been removed. It's marginal usefulness in a threads design can be worked around in other ways. Both #12 and #13 were done to simplify the code and improve readability and maintain- ability. (As were most all of these changes) TODO: 1) Rewrite most of the vnode pager to use VOP_GETPAGES/PUTPAGES. Doing this will reduce the vnode pager to a mere fraction of its current size. 2) Rewrite vm_fault and the swap/vnode pagers to use the clustering information provided by the new haspage pager interface. This will substantially reduce the overhead by eliminating a large number of VOP_BMAP() calls. The VOP_BMAP() filesystem interface should be improved to provide both a "behind" and "ahead" indication of contiguousness. 3) Implement the extended features of pager_haspage in swap_pager_haspage(). It currently just says 0 pages ahead/behind. 4) Re-implement the swap device (swstrategy) in a more elegant way, perhaps via a much more general mechanism that could also be used for disk striping of regular filesystems. 5) Do something to improve the architecture of vm_object_collapse(). The fact that it makes calls into the swap pager and knows too much about how the swap pager operates really bothers me. It also doesn't allow for collapsing of non-swap pager objects ("unnamed" objects backed by other pagers).
1995-07-13 08:48:48 +00:00
if (fmode & FWRITE)
vp->v_writecount++;
*flagp = fmode;
1994-05-24 10:09:53 +00:00
return (0);
bad:
NDFREE(ndp, NDF_ONLY_PNBUF);
1994-05-24 10:09:53 +00:00
vput(vp);
*flagp = fmode;
1994-05-24 10:09:53 +00:00
return (error);
}
/*
* Check for write permissions on the specified vnode.
* Prototype text segments cannot be written.
1994-05-24 10:09:53 +00:00
*/
int
1994-05-24 10:09:53 +00:00
vn_writechk(vp)
register struct vnode *vp;
{
/*
* If there's shared text associated with
* the vnode, try to free it up once. If
* we fail, we can't allow writing.
*/
if (vp->v_flag & VTEXT)
1994-05-24 10:09:53 +00:00
return (ETXTBSY);
return (0);
}
/*
* Vnode close call
*/
int
vn_close(vp, flags, cred, td)
1994-05-24 10:09:53 +00:00
register struct vnode *vp;
int flags;
struct ucred *cred;
struct thread *td;
1994-05-24 10:09:53 +00:00
{
int error;
if (flags & FWRITE)
vp->v_writecount--;
error = VOP_CLOSE(vp, flags, cred, td);
/*
* XXX - In certain instances VOP_CLOSE has to do the vrele
* itself. If the vrele has been done, it will return EAGAIN
* to indicate that the vrele should not be done again. When
* this happens, we just return success. The correct thing to
* do would be to have all VOP_CLOSE instances do the vrele.
*/
if (error == EAGAIN)
return (0);
vrele(vp);
1994-05-24 10:09:53 +00:00
return (error);
}
static __inline
int
sequential_heuristic(struct uio *uio, struct file *fp)
{
/*
* Sequential heuristic - detect sequential operation
*/
if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
uio->uio_offset == fp->f_nextoff) {
/*
* XXX we assume that the filesystem block size is
* the default. Not true, but still gives us a pretty
* good indicator of how sequential the read operations
* are.
*/
fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
if (fp->f_seqcount >= 127)
fp->f_seqcount = 127;
return(fp->f_seqcount << 16);
}
/*
* Not sequential, quick draw-down of seqcount
*/
if (fp->f_seqcount > 1)
fp->f_seqcount = 1;
else
fp->f_seqcount = 0;
return(0);
}
1994-05-24 10:09:53 +00:00
/*
* Package up an I/O request on a vnode into a uio and do it.
*/
int
vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
1994-05-24 10:09:53 +00:00
enum uio_rw rw;
struct vnode *vp;
caddr_t base;
int len;
off_t offset;
enum uio_seg segflg;
int ioflg;
struct ucred *cred;
int *aresid;
struct thread *td;
1994-05-24 10:09:53 +00:00
{
struct uio auio;
struct iovec aiov;
struct mount *mp;
1994-05-24 10:09:53 +00:00
int error;
if ((ioflg & IO_NODELOCKED) == 0) {
mp = NULL;
if (rw == UIO_WRITE &&
vp->v_type != VCHR &&
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
}
1994-05-24 10:09:53 +00:00
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
aiov.iov_base = base;
aiov.iov_len = len;
auio.uio_resid = len;
auio.uio_offset = offset;
auio.uio_segflg = segflg;
auio.uio_rw = rw;
auio.uio_td = td;
1994-05-24 10:09:53 +00:00
if (rw == UIO_READ) {
error = VOP_READ(vp, &auio, ioflg, cred);
} else {
error = VOP_WRITE(vp, &auio, ioflg, cred);
}
if (aresid)
*aresid = auio.uio_resid;
else
if (auio.uio_resid && error == 0)
error = EIO;
if ((ioflg & IO_NODELOCKED) == 0) {
vn_finished_write(mp);
VOP_UNLOCK(vp, 0, td);
}
1994-05-24 10:09:53 +00:00
return (error);
}
/*
* Package up an I/O request on a vnode into a uio and do it. The I/O
* request is split up into smaller chunks and we try to avoid saturating
* the buffer cache while potentially holding a vnode locked, so we
* check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
* to give other processes a chance to lock the vnode (either other processes
* core'ing the same binary, or unrelated processes scanning the directory).
*/
int
vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
enum uio_rw rw;
struct vnode *vp;
caddr_t base;
int len;
off_t offset;
enum uio_seg segflg;
int ioflg;
struct ucred *cred;
int *aresid;
struct thread *td;
{
int error = 0;
do {
int chunk = (len > MAXBSIZE) ? MAXBSIZE : len;
if (rw != UIO_READ && vp->v_type == VREG)
bwillwrite();
error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
ioflg, cred, aresid, td);
len -= chunk; /* aresid calc already includes length */
if (error)
break;
offset += chunk;
base += chunk;
uio_yield();
} while (len);
if (aresid)
*aresid += len;
return (error);
}
1994-05-24 10:09:53 +00:00
/*
* File table vnode read routine.
*/
static int
vn_read(fp, uio, cred, flags, td)
1994-05-24 10:09:53 +00:00
struct file *fp;
struct uio *uio;
struct ucred *cred;
struct thread *td;
int flags;
1994-05-24 10:09:53 +00:00
{
struct vnode *vp;
int error, ioflag;
1994-05-24 10:09:53 +00:00
KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
uio->uio_td, td));
vp = (struct vnode *)fp->f_data;
ioflag = 0;
if (fp->f_flag & FNONBLOCK)
ioflag |= IO_NDELAY;
if (fp->f_flag & O_DIRECT)
ioflag |= IO_DIRECT;
VOP_LEASE(vp, td, cred, LEASE_READ);
vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
if ((flags & FOF_OFFSET) == 0)
uio->uio_offset = fp->f_offset;
ioflag |= sequential_heuristic(uio, fp);
error = VOP_READ(vp, uio, ioflag, cred);
if ((flags & FOF_OFFSET) == 0)
fp->f_offset = uio->uio_offset;
fp->f_nextoff = uio->uio_offset;
VOP_UNLOCK(vp, 0, td);
1994-05-24 10:09:53 +00:00
return (error);
}
/*
* File table vnode write routine.
*/
static int
vn_write(fp, uio, cred, flags, td)
1994-05-24 10:09:53 +00:00
struct file *fp;
struct uio *uio;
struct ucred *cred;
struct thread *td;
int flags;
1994-05-24 10:09:53 +00:00
{
These changes appear to give us benefits with both small (32MB) and large (1G) memory machine configurations. I was able to run 'dbench 32' on a 32MB system without bring the machine to a grinding halt. * buffer cache hash table now dynamically allocated. This will have no effect on memory consumption for smaller systems and will help scale the buffer cache for larger systems. * minor enhancement to pmap_clearbit(). I noticed that all the calls to it used constant arguments. Making it an inline allows the constants to propogate to deeper inlines and should produce better code. * removal of inherent vfs_ioopt support through the emplacement of appropriate #ifdef's, with John's permission. If we do not find a use for it by the end of the year we will remove it entirely. * removal of getnewbufloops* counters & sysctl's - no longer necessary for debugging, getnewbuf() is now optimal. * buffer hash table functions removed from sys/buf.h and localized to vfs_bio.c * VFS_BIO_NEED_DIRTYFLUSH flag and support code added ( bwillwrite() ), allowing processes to block when too many dirty buffers are present in the system. * removal of a softdep test in bdwrite() that is no longer necessary now that bdwrite() no longer attempts to flush dirty buffers. * slight optimization added to bqrelse() - there is no reason to test for available buffer space on B_DELWRI buffers. * addition of reverse-scanning code to vfs_bio_awrite(). vfs_bio_awrite() will attempt to locate clusterable areas in both the forward and reverse direction relative to the offset of the buffer passed to it. This will probably not make much of a difference now, but I believe we will start to rely on it heavily in the future if we decide to shift some of the burden of the clustering closer to the actual I/O initiation. * Removal of the newbufcnt and lastnewbuf counters that Kirk added. They do not fix any race conditions that haven't already been fixed by the gbincore() test done after the only call to getnewbuf(). getnewbuf() is a static, so there is no chance of it being misused by other modules. ( Unless Kirk can think of a specific thing that this code fixes. I went through it very carefully and didn't see anything ). * removal of VOP_ISLOCKED() check in flushbufqueues(). I do not think this check is necessary, the buffer should flush properly whether the vnode is locked or not. ( yes? ). * removal of extra arguments passed to getnewbuf() that are not necessary. * missed cluster_wbuild() that had to be a cluster_wbuild_wb() in vfs_cluster.c * vn_write() now calls bwillwrite() *PRIOR* to locking the vnode, which should greatly aid flushing operations in heavy load situations - both the pageout and update daemons will be able to operate more efficiently. * removal of b_usecount. We may add it back in later but for now it is useless. Prior implementations of the buffer cache never had enough buffers for it to be useful, and current implementations which make more buffers available might not benefit relative to the amount of sophistication required to implement a b_usecount. Straight LRU should work just as well, especially when most things are VMIO backed. I expect that (even though John will not like this assumption) directories will become VMIO backed some point soon. Submitted by: Matthew Dillon <dillon@backplane.com> Reviewed by: Kirk McKusick <mckusick@mckusick.com>
1999-07-08 06:06:00 +00:00
struct vnode *vp;
struct mount *mp;
int error, ioflag;
1994-05-24 10:09:53 +00:00
KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
uio->uio_td, td));
These changes appear to give us benefits with both small (32MB) and large (1G) memory machine configurations. I was able to run 'dbench 32' on a 32MB system without bring the machine to a grinding halt. * buffer cache hash table now dynamically allocated. This will have no effect on memory consumption for smaller systems and will help scale the buffer cache for larger systems. * minor enhancement to pmap_clearbit(). I noticed that all the calls to it used constant arguments. Making it an inline allows the constants to propogate to deeper inlines and should produce better code. * removal of inherent vfs_ioopt support through the emplacement of appropriate #ifdef's, with John's permission. If we do not find a use for it by the end of the year we will remove it entirely. * removal of getnewbufloops* counters & sysctl's - no longer necessary for debugging, getnewbuf() is now optimal. * buffer hash table functions removed from sys/buf.h and localized to vfs_bio.c * VFS_BIO_NEED_DIRTYFLUSH flag and support code added ( bwillwrite() ), allowing processes to block when too many dirty buffers are present in the system. * removal of a softdep test in bdwrite() that is no longer necessary now that bdwrite() no longer attempts to flush dirty buffers. * slight optimization added to bqrelse() - there is no reason to test for available buffer space on B_DELWRI buffers. * addition of reverse-scanning code to vfs_bio_awrite(). vfs_bio_awrite() will attempt to locate clusterable areas in both the forward and reverse direction relative to the offset of the buffer passed to it. This will probably not make much of a difference now, but I believe we will start to rely on it heavily in the future if we decide to shift some of the burden of the clustering closer to the actual I/O initiation. * Removal of the newbufcnt and lastnewbuf counters that Kirk added. They do not fix any race conditions that haven't already been fixed by the gbincore() test done after the only call to getnewbuf(). getnewbuf() is a static, so there is no chance of it being misused by other modules. ( Unless Kirk can think of a specific thing that this code fixes. I went through it very carefully and didn't see anything ). * removal of VOP_ISLOCKED() check in flushbufqueues(). I do not think this check is necessary, the buffer should flush properly whether the vnode is locked or not. ( yes? ). * removal of extra arguments passed to getnewbuf() that are not necessary. * missed cluster_wbuild() that had to be a cluster_wbuild_wb() in vfs_cluster.c * vn_write() now calls bwillwrite() *PRIOR* to locking the vnode, which should greatly aid flushing operations in heavy load situations - both the pageout and update daemons will be able to operate more efficiently. * removal of b_usecount. We may add it back in later but for now it is useless. Prior implementations of the buffer cache never had enough buffers for it to be useful, and current implementations which make more buffers available might not benefit relative to the amount of sophistication required to implement a b_usecount. Straight LRU should work just as well, especially when most things are VMIO backed. I expect that (even though John will not like this assumption) directories will become VMIO backed some point soon. Submitted by: Matthew Dillon <dillon@backplane.com> Reviewed by: Kirk McKusick <mckusick@mckusick.com>
1999-07-08 06:06:00 +00:00
vp = (struct vnode *)fp->f_data;
if (vp->v_type == VREG)
bwillwrite();
vp = (struct vnode *)fp->f_data; /* XXX needed? */
ioflag = IO_UNIT;
if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
1994-05-24 10:09:53 +00:00
ioflag |= IO_APPEND;
if (fp->f_flag & FNONBLOCK)
ioflag |= IO_NDELAY;
if (fp->f_flag & O_DIRECT)
ioflag |= IO_DIRECT;
if ((fp->f_flag & O_FSYNC) ||
(vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
ioflag |= IO_SYNC;
mp = NULL;
if (vp->v_type != VCHR &&
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, cred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if ((flags & FOF_OFFSET) == 0)
uio->uio_offset = fp->f_offset;
ioflag |= sequential_heuristic(uio, fp);
1994-05-24 10:09:53 +00:00
error = VOP_WRITE(vp, uio, ioflag, cred);
if ((flags & FOF_OFFSET) == 0)
1994-05-24 10:09:53 +00:00
fp->f_offset = uio->uio_offset;
fp->f_nextoff = uio->uio_offset;
VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
1994-05-24 10:09:53 +00:00
return (error);
}
/*
* File table vnode stat routine.
*/
1999-11-08 03:32:15 +00:00
static int
vn_statfile(fp, sb, td)
1999-11-08 03:32:15 +00:00
struct file *fp;
struct stat *sb;
struct thread *td;
1999-11-08 03:32:15 +00:00
{
struct vnode *vp = (struct vnode *)fp->f_data;
return vn_stat(vp, sb, td);
1999-11-08 03:32:15 +00:00
}
int
vn_stat(vp, sb, td)
1994-05-24 10:09:53 +00:00
struct vnode *vp;
register struct stat *sb;
struct thread *td;
1994-05-24 10:09:53 +00:00
{
struct vattr vattr;
register struct vattr *vap;
int error;
u_short mode;
vap = &vattr;
error = VOP_GETATTR(vp, vap, td->td_proc->p_ucred, td);
1994-05-24 10:09:53 +00:00
if (error)
return (error);
/*
* Zero the spare stat fields
*/
sb->st_lspare = 0;
sb->st_qspare[0] = 0;
sb->st_qspare[1] = 0;
1994-05-24 10:09:53 +00:00
/*
* Copy from vattr table
*/
if (vap->va_fsid != VNOVAL)
sb->st_dev = vap->va_fsid;
else
sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1994-05-24 10:09:53 +00:00
sb->st_ino = vap->va_fileid;
mode = vap->va_mode;
switch (vap->va_type) {
1994-05-24 10:09:53 +00:00
case VREG:
mode |= S_IFREG;
break;
case VDIR:
mode |= S_IFDIR;
break;
case VBLK:
mode |= S_IFBLK;
break;
case VCHR:
mode |= S_IFCHR;
break;
case VLNK:
mode |= S_IFLNK;
1998-04-10 00:09:04 +00:00
/* This is a cosmetic change, symlinks do not have a mode. */
if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
sb->st_mode &= ~ACCESSPERMS; /* 0000 */
else
sb->st_mode |= ACCESSPERMS; /* 0777 */
1994-05-24 10:09:53 +00:00
break;
case VSOCK:
mode |= S_IFSOCK;
break;
case VFIFO:
mode |= S_IFIFO;
break;
default:
return (EBADF);
};
sb->st_mode = mode;
sb->st_nlink = vap->va_nlink;
sb->st_uid = vap->va_uid;
sb->st_gid = vap->va_gid;
sb->st_rdev = vap->va_rdev;
if (vap->va_size > OFF_MAX)
return (EOVERFLOW);
1994-05-24 10:09:53 +00:00
sb->st_size = vap->va_size;
sb->st_atimespec = vap->va_atime;
sb->st_mtimespec = vap->va_mtime;
1994-05-24 10:09:53 +00:00
sb->st_ctimespec = vap->va_ctime;
/*
* According to www.opengroup.org, the meaning of st_blksize is
* "a filesystem-specific preferred I/O block size for this
* object. In some filesystem types, this may vary from file
* to file"
* Default to zero to catch bogus uses of this field.
*/
if (vap->va_type == VREG) {
sb->st_blksize = vap->va_blocksize;
} else if (vn_isdisk(vp, NULL)) {
sb->st_blksize = vp->v_rdev->si_bsize_best;
if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
sb->st_blksize = vp->v_rdev->si_bsize_phys;
if (sb->st_blksize < BLKDEV_IOSIZE)
sb->st_blksize = BLKDEV_IOSIZE;
} else {
sb->st_blksize = 0;
}
1994-05-24 10:09:53 +00:00
sb->st_flags = vap->va_flags;
if (suser_xxx(td->td_proc->p_ucred, 0, 0))
sb->st_gen = 0;
else
sb->st_gen = vap->va_gen;
#if (S_BLKSIZE == 512)
/* Optimize this case */
sb->st_blocks = vap->va_bytes >> 9;
#else
1994-05-24 10:09:53 +00:00
sb->st_blocks = vap->va_bytes / S_BLKSIZE;
#endif
1994-05-24 10:09:53 +00:00
return (0);
}
/*
* File table vnode ioctl routine.
*/
static int
vn_ioctl(fp, com, data, td)
1994-05-24 10:09:53 +00:00
struct file *fp;
u_long com;
1994-05-24 10:09:53 +00:00
caddr_t data;
struct thread *td;
1994-05-24 10:09:53 +00:00
{
register struct vnode *vp = ((struct vnode *)fp->f_data);
struct vattr vattr;
int error;
switch (vp->v_type) {
case VREG:
case VDIR:
if (com == FIONREAD) {
error = VOP_GETATTR(vp, &vattr, td->td_proc->p_ucred, td);
if (error)
1994-05-24 10:09:53 +00:00
return (error);
*(int *)data = vattr.va_size - fp->f_offset;
return (0);
}
if (com == FIONBIO || com == FIOASYNC) /* XXX */
return (0); /* XXX */
/* fall into ... */
default:
#if 0
1994-05-24 10:09:53 +00:00
return (ENOTTY);
#endif
1994-05-24 10:09:53 +00:00
case VFIFO:
case VCHR:
case VBLK:
if (com == FIODTYPE) {
if (vp->v_type != VCHR && vp->v_type != VBLK)
return (ENOTTY);
*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
return (0);
}
error = VOP_IOCTL(vp, com, data, fp->f_flag, td->td_proc->p_ucred, td);
1994-05-24 10:09:53 +00:00
if (error == 0 && com == TIOCSCTTY) {
/* Do nothing if reassigning same control tty */
if (td->td_proc->p_session->s_ttyvp == vp)
return (0);
/* Get rid of reference to old control tty */
if (td->td_proc->p_session->s_ttyvp)
vrele(td->td_proc->p_session->s_ttyvp);
td->td_proc->p_session->s_ttyvp = vp;
1994-05-24 10:09:53 +00:00
VREF(vp);
}
return (error);
}
}
/*
1997-09-14 02:51:16 +00:00
* File table vnode poll routine.
1994-05-24 10:09:53 +00:00
*/
static int
vn_poll(fp, events, cred, td)
1994-05-24 10:09:53 +00:00
struct file *fp;
1997-09-14 02:51:16 +00:00
int events;
struct ucred *cred;
struct thread *td;
1994-05-24 10:09:53 +00:00
{
return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
1994-05-24 10:09:53 +00:00
}
/*
* Check that the vnode is still valid, and if so
* acquire requested lock.
*/
int
#ifndef DEBUG_LOCKS
vn_lock(vp, flags, td)
#else
debug_vn_lock(vp, flags, td, filename, line)
#endif
struct vnode *vp;
int flags;
struct thread *td;
#ifdef DEBUG_LOCKS
const char *filename;
int line;
#endif
{
int error;
do {
if ((flags & LK_INTERLOCK) == 0)
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) {
vp->v_flag |= VXWANT;
msleep(vp, &vp->v_interlock, PINOD | PDROP,
"vn_lock", 0);
error = ENOENT;
} else {
Implement a low-memory deadlock solution. Removed most of the hacks that were trying to deal with low-memory situations prior to now. The new code is based on the concept that I/O must be able to function in a low memory situation. All major modules related to I/O (except networking) have been adjusted to allow allocation out of the system reserve memory pool. These modules now detect a low memory situation but rather then block they instead continue to operate, then return resources to the memory pool instead of cache them or leave them wired. Code has been added to stall in a low-memory situation prior to a vnode being locked. Thus situations where a process blocks in a low-memory condition while holding a locked vnode have been reduced to near nothing. Not only will I/O continue to operate, but many prior deadlock conditions simply no longer exist. Implement a number of VFS/BIO fixes (found by Ian): in biodone(), bogus-page replacement code, the loop was not properly incrementing loop variables prior to a continue statement. We do not believe this code can be hit anyway but we aren't taking any chances. We'll turn the whole section into a panic (as it already is in brelse()) after the release is rolled. In biodone(), the foff calculation was incorrectly clamped to the iosize, causing the wrong foff to be calculated for pages in the case of an I/O error or biodone() called without initiating I/O. The problem always caused a panic before. Now it doesn't. The problem is mainly an issue with NFS. Fixed casts for ~PAGE_MASK. This code worked properly before only because the calculations use signed arithmatic. Better to properly extend PAGE_MASK first before inverting it for the 64 bit masking op. In brelse(), the bogus_page fixup code was improperly throwing away the original contents of 'm' when it did the j-loop to fix the bogus pages. The result was that it would potentially invalidate parts of the *WRONG* page(!), leading to corruption. There may still be cases where a background bitmap write is being duplicated, causing potential corruption. We have identified a potentially serious bug related to this but the fix is still TBD. So instead this patch contains a KASSERT to detect the problem and panic the machine rather then continue to corrupt the filesystem. The problem does not occur very often.. it is very hard to reproduce, and it may or may not be the cause of the corruption people have reported. Review by: (VFS/BIO: mckusick, Ian Dowse <iedowse@maths.tcd.ie>) Testing by: (VM/Deadlock) Paul Saab <ps@yahoo-inc.com>
2000-11-18 23:06:26 +00:00
if (vp->v_vxproc != NULL)
printf("VXLOCK interlock avoided in vn_lock\n");
#ifdef DEBUG_LOCKS
vp->filename = filename;
vp->line = line;
#endif
error = VOP_LOCK(vp,
flags | LK_NOPAUSE | LK_INTERLOCK, td);
if (error == 0)
return (error);
}
flags &= ~LK_INTERLOCK;
} while (flags & LK_RETRY);
return (error);
}
/*
* File table vnode close routine.
*/
static int
vn_closefile(fp, td)
struct file *fp;
struct thread *td;
{
fp->f_ops = &badfileops;
return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
fp->f_cred, td));
}
/*
* Preparing to start a filesystem write operation. If the operation is
* permitted, then we bump the count of operations in progress and
* proceed. If a suspend request is in progress, we wait until the
* suspension is over, and then proceed.
*/
int
vn_start_write(vp, mpp, flags)
struct vnode *vp;
struct mount **mpp;
int flags;
{
struct mount *mp;
int error;
/*
* If a vnode is provided, get and return the mount point that
* to which it will write.
*/
if (vp != NULL) {
if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
*mpp = NULL;
if (error != EOPNOTSUPP)
return (error);
return (0);
}
}
if ((mp = *mpp) == NULL)
return (0);
/*
* Check on status of suspension.
*/
while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
if (flags & V_NOWAIT)
return (EWOULDBLOCK);
error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
"suspfs", 0);
if (error)
return (error);
}
if (flags & V_XSLEEP)
return (0);
mp->mnt_writeopcount++;
return (0);
}
/*
* Secondary suspension. Used by operations such as vop_inactive
* routines that are needed by the higher level functions. These
* are allowed to proceed until all the higher level functions have
* completed (indicated by mnt_writeopcount dropping to zero). At that
* time, these operations are halted until the suspension is over.
*/
int
This patch corrects the first round of panics and hangs reported with the new snapshot code. Update addaliasu to correctly implement the semantics of the old checkalias function. When a device vnode first comes into existence, check to see if an anonymous vnode for the same device was created at boot time by bdevvp(). If so, adopt the bdevvp vnode rather than creating a new vnode for the device. This corrects a problem which caused the kernel to panic when taking a snapshot of the root filesystem. Change the calling convention of vn_write_suspend_wait() to be the same as vn_start_write(). Split out softdep_flushworklist() from softdep_flushfiles() so that it can be used to clear the work queue when suspending filesystem operations. Access to buffers becomes recursive so that snapshots can recursively traverse their indirect blocks using ffs_copyonwrite() when checking for the need for copy on write when flushing one of their own indirect blocks. This eliminates a deadlock between the syncer daemon and a process taking a snapshot. Ensure that softdep_process_worklist() can never block because of a snapshot being taken. This eliminates a problem with buffer starvation. Cleanup change in ffs_sync() which did not synchronously wait when MNT_WAIT was specified. The result was an unclean filesystem panic when doing forcible unmount with heavy filesystem I/O in progress. Return a zero'ed block when reading a block that was not in use at the time that a snapshot was taken. Normally, these blocks should never be read. However, the readahead code will occationally read them which can cause unexpected behavior. Clean up the debugging code that ensures that no blocks be written on a filesystem while it is suspended. Snapshots must explicitly label the blocks that they are writing during the suspension so that they do not cause a `write on suspended filesystem' panic. Reorganize ffs_copyonwrite() to eliminate a deadlock and also to prevent a race condition that would permit the same block to be copied twice. This change eliminates an unexpected soft updates inconsistency in fsck caused by the double allocation. Use bqrelse rather than brelse for buffers that will be needed soon again by the snapshot code. This improves snapshot performance.
2000-07-24 05:28:33 +00:00
vn_write_suspend_wait(vp, mp, flags)
struct vnode *vp;
This patch corrects the first round of panics and hangs reported with the new snapshot code. Update addaliasu to correctly implement the semantics of the old checkalias function. When a device vnode first comes into existence, check to see if an anonymous vnode for the same device was created at boot time by bdevvp(). If so, adopt the bdevvp vnode rather than creating a new vnode for the device. This corrects a problem which caused the kernel to panic when taking a snapshot of the root filesystem. Change the calling convention of vn_write_suspend_wait() to be the same as vn_start_write(). Split out softdep_flushworklist() from softdep_flushfiles() so that it can be used to clear the work queue when suspending filesystem operations. Access to buffers becomes recursive so that snapshots can recursively traverse their indirect blocks using ffs_copyonwrite() when checking for the need for copy on write when flushing one of their own indirect blocks. This eliminates a deadlock between the syncer daemon and a process taking a snapshot. Ensure that softdep_process_worklist() can never block because of a snapshot being taken. This eliminates a problem with buffer starvation. Cleanup change in ffs_sync() which did not synchronously wait when MNT_WAIT was specified. The result was an unclean filesystem panic when doing forcible unmount with heavy filesystem I/O in progress. Return a zero'ed block when reading a block that was not in use at the time that a snapshot was taken. Normally, these blocks should never be read. However, the readahead code will occationally read them which can cause unexpected behavior. Clean up the debugging code that ensures that no blocks be written on a filesystem while it is suspended. Snapshots must explicitly label the blocks that they are writing during the suspension so that they do not cause a `write on suspended filesystem' panic. Reorganize ffs_copyonwrite() to eliminate a deadlock and also to prevent a race condition that would permit the same block to be copied twice. This change eliminates an unexpected soft updates inconsistency in fsck caused by the double allocation. Use bqrelse rather than brelse for buffers that will be needed soon again by the snapshot code. This improves snapshot performance.
2000-07-24 05:28:33 +00:00
struct mount *mp;
int flags;
{
int error;
This patch corrects the first round of panics and hangs reported with the new snapshot code. Update addaliasu to correctly implement the semantics of the old checkalias function. When a device vnode first comes into existence, check to see if an anonymous vnode for the same device was created at boot time by bdevvp(). If so, adopt the bdevvp vnode rather than creating a new vnode for the device. This corrects a problem which caused the kernel to panic when taking a snapshot of the root filesystem. Change the calling convention of vn_write_suspend_wait() to be the same as vn_start_write(). Split out softdep_flushworklist() from softdep_flushfiles() so that it can be used to clear the work queue when suspending filesystem operations. Access to buffers becomes recursive so that snapshots can recursively traverse their indirect blocks using ffs_copyonwrite() when checking for the need for copy on write when flushing one of their own indirect blocks. This eliminates a deadlock between the syncer daemon and a process taking a snapshot. Ensure that softdep_process_worklist() can never block because of a snapshot being taken. This eliminates a problem with buffer starvation. Cleanup change in ffs_sync() which did not synchronously wait when MNT_WAIT was specified. The result was an unclean filesystem panic when doing forcible unmount with heavy filesystem I/O in progress. Return a zero'ed block when reading a block that was not in use at the time that a snapshot was taken. Normally, these blocks should never be read. However, the readahead code will occationally read them which can cause unexpected behavior. Clean up the debugging code that ensures that no blocks be written on a filesystem while it is suspended. Snapshots must explicitly label the blocks that they are writing during the suspension so that they do not cause a `write on suspended filesystem' panic. Reorganize ffs_copyonwrite() to eliminate a deadlock and also to prevent a race condition that would permit the same block to be copied twice. This change eliminates an unexpected soft updates inconsistency in fsck caused by the double allocation. Use bqrelse rather than brelse for buffers that will be needed soon again by the snapshot code. This improves snapshot performance.
2000-07-24 05:28:33 +00:00
if (vp != NULL) {
if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
if (error != EOPNOTSUPP)
return (error);
return (0);
}
}
/*
* If we are not suspended or have not yet reached suspended
* mode, then let the operation proceed.
*/
if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
return (0);
if (flags & V_NOWAIT)
return (EWOULDBLOCK);
/*
* Wait for the suspension to finish.
*/
return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
"suspfs", 0));
}
/*
* Filesystem write operation has completed. If we are suspending and this
* operation is the last one, notify the suspender that the suspension is
* now in effect.
*/
void
vn_finished_write(mp)
struct mount *mp;
{
if (mp == NULL)
return;
mp->mnt_writeopcount--;
if (mp->mnt_writeopcount < 0)
panic("vn_finished_write: neg cnt");
if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
mp->mnt_writeopcount <= 0)
wakeup(&mp->mnt_writeopcount);
}
/*
* Request a filesystem to suspend write operations.
*/
void
vfs_write_suspend(mp)
struct mount *mp;
{
struct thread *td = curthread;
if (mp->mnt_kern_flag & MNTK_SUSPEND)
return;
mp->mnt_kern_flag |= MNTK_SUSPEND;
if (mp->mnt_writeopcount > 0)
(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td);
mp->mnt_kern_flag |= MNTK_SUSPENDED;
}
/*
* Request a filesystem to resume write operations.
*/
void
vfs_write_resume(mp)
struct mount *mp;
{
if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
return;
mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
wakeup(&mp->mnt_writeopcount);
wakeup(&mp->mnt_flag);
}
static int
vn_kqfilter(struct file *fp, struct knote *kn)
{
return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
}
/*
* Simplified in-kernel wrapper calls for extended attribute access.
* Both calls pass in a NULL credential, authorizing as "kernel" access.
* Set IO_NODELOCKED in ioflg if the vnode is already locked.
*/
int
vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
const char *attrname, int *buflen, char *buf, struct thread *td)
{
struct uio auio;
struct iovec iov;
int error;
iov.iov_len = *buflen;
iov.iov_base = buf;
auio.uio_iov = &iov;
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_td = td;
auio.uio_offset = 0;
auio.uio_resid = *buflen;
if ((ioflg & IO_NODELOCKED) == 0)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
/* authorize attribute retrieval as kernel */
error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
if ((ioflg & IO_NODELOCKED) == 0)
VOP_UNLOCK(vp, 0, td);
if (error == 0) {
*buflen = *buflen - auio.uio_resid;
}
return (error);
}
/*
* XXX failure mode if partially written?
*/
int
vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
const char *attrname, int buflen, char *buf, struct thread *td)
{
struct uio auio;
struct iovec iov;
struct mount *mp;
int error;
iov.iov_len = buflen;
iov.iov_base = buf;
auio.uio_iov = &iov;
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_td = td;
auio.uio_offset = 0;
auio.uio_resid = buflen;
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
}
/* authorize attribute setting as kernel */
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
if ((ioflg & IO_NODELOCKED) == 0) {
vn_finished_write(mp);
VOP_UNLOCK(vp, 0, td);
}
return (error);
}
int
vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
const char *attrname, struct thread *td)
{
struct mount *mp;
int error;
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
}
/* authorize attribute removal as kernel */
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td);
if ((ioflg & IO_NODELOCKED) == 0) {
vn_finished_write(mp);
VOP_UNLOCK(vp, 0, td);
}
return (error);
}