mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-20 11:11:24 +00:00
5673e3cb08
Assert this. In the reported panic, vdestroy() fired the assertion "vp has namecache for ..", because pseudofs may end up doing cache_enter() with reclaimed dvp, after dotdot lookup temporary unlocked dvp. Similar problem exists in ufs_lookup() for "." lookup, when vnode lock needs to be upgraded. Verify that dvp is not reclaimed before calling cache_enter(). Reported and tested by: pho Reviewed by: kan MFC after: 2 weeks
1229 lines
32 KiB
C
1229 lines
32 KiB
C
/*-
|
|
* Copyright (c) 1989, 1993, 1995
|
|
* The Regents of the University of California. All rights reserved.
|
|
*
|
|
* This code is derived from software contributed to Berkeley by
|
|
* Poul-Henning Kamp of the FreeBSD Project.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
|
|
*/
|
|
|
|
#include <sys/cdefs.h>
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
#include "opt_kdtrace.h"
|
|
#include "opt_ktrace.h"
|
|
|
|
#include <sys/param.h>
|
|
#include <sys/filedesc.h>
|
|
#include <sys/fnv_hash.h>
|
|
#include <sys/kernel.h>
|
|
#include <sys/lock.h>
|
|
#include <sys/malloc.h>
|
|
#include <sys/mount.h>
|
|
#include <sys/namei.h>
|
|
#include <sys/proc.h>
|
|
#include <sys/rwlock.h>
|
|
#include <sys/sdt.h>
|
|
#include <sys/syscallsubr.h>
|
|
#include <sys/sysctl.h>
|
|
#include <sys/sysproto.h>
|
|
#include <sys/systm.h>
|
|
#include <sys/vnode.h>
|
|
#ifdef KTRACE
|
|
#include <sys/ktrace.h>
|
|
#endif
|
|
|
|
#include <vm/uma.h>
|
|
|
|
SDT_PROVIDER_DECLARE(vfs);
|
|
SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
|
|
"struct vnode *");
|
|
SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
|
|
"char *");
|
|
SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
|
|
SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
|
|
"struct char *", "struct vnode *");
|
|
SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
|
|
SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", "struct vnode *",
|
|
"struct char *");
|
|
SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
|
|
"struct vnode *");
|
|
SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit_negative, "struct vnode *",
|
|
"char *");
|
|
SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
|
|
"char *");
|
|
SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
|
|
SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
|
|
SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
|
|
SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
|
|
"struct vnode *");
|
|
SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *",
|
|
"char *");
|
|
|
|
/*
|
|
* This structure describes the elements in the cache of recent
|
|
* names looked up by namei.
|
|
*/
|
|
|
|
struct namecache {
|
|
LIST_ENTRY(namecache) nc_hash; /* hash chain */
|
|
LIST_ENTRY(namecache) nc_src; /* source vnode list */
|
|
TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
|
|
struct vnode *nc_dvp; /* vnode of parent of name */
|
|
struct vnode *nc_vp; /* vnode the name refers to */
|
|
u_char nc_flag; /* flag bits */
|
|
u_char nc_nlen; /* length of name */
|
|
char nc_name[0]; /* segment name + nul */
|
|
};
|
|
|
|
/*
|
|
* Name caching works as follows:
|
|
*
|
|
* Names found by directory scans are retained in a cache
|
|
* for future reference. It is managed LRU, so frequently
|
|
* used names will hang around. Cache is indexed by hash value
|
|
* obtained from (vp, name) where vp refers to the directory
|
|
* containing name.
|
|
*
|
|
* If it is a "negative" entry, (i.e. for a name that is known NOT to
|
|
* exist) the vnode pointer will be NULL.
|
|
*
|
|
* Upon reaching the last segment of a path, if the reference
|
|
* is for DELETE, or NOCACHE is set (rewrite), and the
|
|
* name is located in the cache, it will be dropped.
|
|
*/
|
|
|
|
/*
|
|
* Structures associated with name cacheing.
|
|
*/
|
|
#define NCHHASH(hash) \
|
|
(&nchashtbl[(hash) & nchash])
|
|
static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
|
|
static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */
|
|
static u_long nchash; /* size of hash table */
|
|
SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
|
|
static u_long ncnegfactor = 16; /* ratio of negative entries */
|
|
SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
|
|
static u_long numneg; /* number of cache entries allocated */
|
|
SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
|
|
static u_long numcache; /* number of cache entries allocated */
|
|
SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
|
|
static u_long numcachehv; /* number of cache entries with vnodes held */
|
|
SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
|
|
#if 0
|
|
static u_long numcachepl; /* number of cache purge for leaf entries */
|
|
SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
|
|
#endif
|
|
struct nchstats nchstats; /* cache effectiveness statistics */
|
|
|
|
static struct rwlock cache_lock;
|
|
RW_SYSINIT(vfscache, &cache_lock, "Name Cache");
|
|
|
|
#define CACHE_UPGRADE_LOCK() rw_try_upgrade(&cache_lock)
|
|
#define CACHE_RLOCK() rw_rlock(&cache_lock)
|
|
#define CACHE_RUNLOCK() rw_runlock(&cache_lock)
|
|
#define CACHE_WLOCK() rw_wlock(&cache_lock)
|
|
#define CACHE_WUNLOCK() rw_wunlock(&cache_lock)
|
|
|
|
/*
|
|
* UMA zones for the VFS cache.
|
|
*
|
|
* The small cache is used for entries with short names, which are the
|
|
* most common. The large cache is used for entries which are too big to
|
|
* fit in the small cache.
|
|
*/
|
|
static uma_zone_t cache_zone_small;
|
|
static uma_zone_t cache_zone_large;
|
|
|
|
#define CACHE_PATH_CUTOFF 35
|
|
#define CACHE_ZONE_SMALL (sizeof(struct namecache) + CACHE_PATH_CUTOFF \
|
|
+ 1)
|
|
#define CACHE_ZONE_LARGE (sizeof(struct namecache) + NAME_MAX + 1)
|
|
|
|
#define cache_alloc(len) uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
|
|
cache_zone_small : cache_zone_large, M_WAITOK)
|
|
#define cache_free(ncp) do { \
|
|
if (ncp != NULL) \
|
|
uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
|
|
cache_zone_small : cache_zone_large, (ncp)); \
|
|
} while (0)
|
|
|
|
static int doingcache = 1; /* 1 => enable the cache */
|
|
SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
|
|
|
|
/* Export size information to userland */
|
|
SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0,
|
|
sizeof(struct namecache), "");
|
|
|
|
/*
|
|
* The new name cache statistics
|
|
*/
|
|
static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
|
|
#define STATNODE(mode, name, var) \
|
|
SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
|
|
STATNODE(CTLFLAG_RD, numneg, &numneg);
|
|
STATNODE(CTLFLAG_RD, numcache, &numcache);
|
|
static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
|
|
static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
|
|
static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
|
|
static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
|
|
static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
|
|
static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
|
|
static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
|
|
static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
|
|
static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
|
|
static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
|
|
static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades);
|
|
|
|
SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE,
|
|
&nchstats, sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
|
|
|
|
|
|
|
|
static void cache_zap(struct namecache *ncp);
|
|
static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
|
|
u_int *buflen);
|
|
static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
|
char *buf, char **retbuf, u_int buflen);
|
|
|
|
static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
|
|
|
|
/*
|
|
* Flags in namecache.nc_flag
|
|
*/
|
|
#define NCF_WHITE 0x01
|
|
#define NCF_ISDOTDOT 0x02
|
|
|
|
#ifdef DIAGNOSTIC
|
|
/*
|
|
* Grab an atomic snapshot of the name cache hash chain lengths
|
|
*/
|
|
SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
|
|
|
|
static int
|
|
sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
|
|
{
|
|
int error;
|
|
struct nchashhead *ncpp;
|
|
struct namecache *ncp;
|
|
int n_nchash;
|
|
int count;
|
|
|
|
n_nchash = nchash + 1; /* nchash is max index, not count */
|
|
if (!req->oldptr)
|
|
return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
|
|
|
|
/* Scan hash tables for applicable entries */
|
|
for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
|
|
CACHE_RLOCK();
|
|
count = 0;
|
|
LIST_FOREACH(ncp, ncpp, nc_hash) {
|
|
count++;
|
|
}
|
|
CACHE_RUNLOCK();
|
|
error = SYSCTL_OUT(req, &count, sizeof(count));
|
|
if (error)
|
|
return (error);
|
|
}
|
|
return (0);
|
|
}
|
|
SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
|
|
CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
|
|
"nchash chain lengths");
|
|
|
|
static int
|
|
sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
|
|
{
|
|
int error;
|
|
struct nchashhead *ncpp;
|
|
struct namecache *ncp;
|
|
int n_nchash;
|
|
int count, maxlength, used, pct;
|
|
|
|
if (!req->oldptr)
|
|
return SYSCTL_OUT(req, 0, 4 * sizeof(int));
|
|
|
|
n_nchash = nchash + 1; /* nchash is max index, not count */
|
|
used = 0;
|
|
maxlength = 0;
|
|
|
|
/* Scan hash tables for applicable entries */
|
|
for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
|
|
count = 0;
|
|
CACHE_RLOCK();
|
|
LIST_FOREACH(ncp, ncpp, nc_hash) {
|
|
count++;
|
|
}
|
|
CACHE_RUNLOCK();
|
|
if (count)
|
|
used++;
|
|
if (maxlength < count)
|
|
maxlength = count;
|
|
}
|
|
n_nchash = nchash + 1;
|
|
pct = (used * 100 * 100) / n_nchash;
|
|
error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
|
|
if (error)
|
|
return (error);
|
|
error = SYSCTL_OUT(req, &used, sizeof(used));
|
|
if (error)
|
|
return (error);
|
|
error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
|
|
if (error)
|
|
return (error);
|
|
error = SYSCTL_OUT(req, &pct, sizeof(pct));
|
|
if (error)
|
|
return (error);
|
|
return (0);
|
|
}
|
|
SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
|
|
CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
|
|
"nchash chain lengths");
|
|
#endif
|
|
|
|
/*
|
|
* cache_zap():
|
|
*
|
|
* Removes a namecache entry from cache, whether it contains an actual
|
|
* pointer to a vnode or if it is just a negative cache entry.
|
|
*/
|
|
static void
|
|
cache_zap(ncp)
|
|
struct namecache *ncp;
|
|
{
|
|
struct vnode *vp;
|
|
|
|
rw_assert(&cache_lock, RA_WLOCKED);
|
|
CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
|
|
#ifdef KDTRACE_HOOKS
|
|
if (ncp->nc_vp != NULL) {
|
|
SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp,
|
|
ncp->nc_name, ncp->nc_vp, 0, 0);
|
|
} else {
|
|
SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp,
|
|
ncp->nc_name, 0, 0, 0);
|
|
}
|
|
#endif
|
|
vp = NULL;
|
|
LIST_REMOVE(ncp, nc_hash);
|
|
if (ncp->nc_flag & NCF_ISDOTDOT) {
|
|
if (ncp == ncp->nc_dvp->v_cache_dd)
|
|
ncp->nc_dvp->v_cache_dd = NULL;
|
|
} else {
|
|
LIST_REMOVE(ncp, nc_src);
|
|
if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
|
|
vp = ncp->nc_dvp;
|
|
numcachehv--;
|
|
}
|
|
}
|
|
if (ncp->nc_vp) {
|
|
TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
|
|
if (ncp == ncp->nc_vp->v_cache_dd)
|
|
ncp->nc_vp->v_cache_dd = NULL;
|
|
} else {
|
|
TAILQ_REMOVE(&ncneg, ncp, nc_dst);
|
|
numneg--;
|
|
}
|
|
numcache--;
|
|
cache_free(ncp);
|
|
if (vp)
|
|
vdrop(vp);
|
|
}
|
|
|
|
/*
|
|
* Lookup an entry in the cache
|
|
*
|
|
* Lookup is called with dvp pointing to the directory to search,
|
|
* cnp pointing to the name of the entry being sought. If the lookup
|
|
* succeeds, the vnode is returned in *vpp, and a status of -1 is
|
|
* returned. If the lookup determines that the name does not exist
|
|
* (negative cacheing), a status of ENOENT is returned. If the lookup
|
|
* fails, a status of zero is returned. If the directory vnode is
|
|
* recycled out from under us due to a forced unmount, a status of
|
|
* ENOENT is returned.
|
|
*
|
|
* vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is
|
|
* unlocked. If we're looking up . an extra ref is taken, but the lock is
|
|
* not recursively acquired.
|
|
*/
|
|
|
|
int
|
|
cache_lookup(dvp, vpp, cnp)
|
|
struct vnode *dvp;
|
|
struct vnode **vpp;
|
|
struct componentname *cnp;
|
|
{
|
|
struct namecache *ncp;
|
|
u_int32_t hash;
|
|
int error, ltype, wlocked;
|
|
|
|
if (!doingcache) {
|
|
cnp->cn_flags &= ~MAKEENTRY;
|
|
return (0);
|
|
}
|
|
retry:
|
|
CACHE_RLOCK();
|
|
wlocked = 0;
|
|
numcalls++;
|
|
error = 0;
|
|
|
|
retry_wlocked:
|
|
if (cnp->cn_nameptr[0] == '.') {
|
|
if (cnp->cn_namelen == 1) {
|
|
*vpp = dvp;
|
|
CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
|
|
dvp, cnp->cn_nameptr);
|
|
dothits++;
|
|
SDT_PROBE(vfs, namecache, lookup, hit, dvp, ".",
|
|
*vpp, 0, 0);
|
|
goto success;
|
|
}
|
|
if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
|
|
dotdothits++;
|
|
if (dvp->v_cache_dd == NULL) {
|
|
SDT_PROBE(vfs, namecache, lookup, miss, dvp,
|
|
"..", NULL, 0, 0);
|
|
goto unlock;
|
|
}
|
|
if ((cnp->cn_flags & MAKEENTRY) == 0) {
|
|
if (!wlocked && !CACHE_UPGRADE_LOCK())
|
|
goto wlock;
|
|
if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
|
|
cache_zap(dvp->v_cache_dd);
|
|
dvp->v_cache_dd = NULL;
|
|
CACHE_WUNLOCK();
|
|
return (0);
|
|
}
|
|
if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
|
|
*vpp = dvp->v_cache_dd->nc_vp;
|
|
else
|
|
*vpp = dvp->v_cache_dd->nc_dvp;
|
|
/* Return failure if negative entry was found. */
|
|
if (*vpp == NULL) {
|
|
ncp = dvp->v_cache_dd;
|
|
goto negative_success;
|
|
}
|
|
CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
|
|
dvp, cnp->cn_nameptr, *vpp);
|
|
SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..",
|
|
*vpp, 0, 0);
|
|
goto success;
|
|
}
|
|
}
|
|
|
|
hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
|
|
hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
|
|
LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
|
|
numchecks++;
|
|
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
|
|
!bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
|
|
break;
|
|
}
|
|
|
|
/* We failed to find an entry */
|
|
if (ncp == NULL) {
|
|
SDT_PROBE(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
|
|
NULL, 0, 0);
|
|
if ((cnp->cn_flags & MAKEENTRY) == 0) {
|
|
nummisszap++;
|
|
} else {
|
|
nummiss++;
|
|
}
|
|
nchstats.ncs_miss++;
|
|
goto unlock;
|
|
}
|
|
|
|
/* We don't want to have an entry, so dump it */
|
|
if ((cnp->cn_flags & MAKEENTRY) == 0) {
|
|
numposzaps++;
|
|
nchstats.ncs_badhits++;
|
|
if (!wlocked && !CACHE_UPGRADE_LOCK())
|
|
goto wlock;
|
|
cache_zap(ncp);
|
|
CACHE_WUNLOCK();
|
|
return (0);
|
|
}
|
|
|
|
/* We found a "positive" match, return the vnode */
|
|
if (ncp->nc_vp) {
|
|
numposhits++;
|
|
nchstats.ncs_goodhits++;
|
|
*vpp = ncp->nc_vp;
|
|
CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
|
|
dvp, cnp->cn_nameptr, *vpp, ncp);
|
|
SDT_PROBE(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
|
|
*vpp, 0, 0);
|
|
goto success;
|
|
}
|
|
|
|
negative_success:
|
|
/* We found a negative match, and want to create it, so purge */
|
|
if (cnp->cn_nameiop == CREATE) {
|
|
numnegzaps++;
|
|
nchstats.ncs_badhits++;
|
|
if (!wlocked && !CACHE_UPGRADE_LOCK())
|
|
goto wlock;
|
|
cache_zap(ncp);
|
|
CACHE_WUNLOCK();
|
|
return (0);
|
|
}
|
|
|
|
if (!wlocked && !CACHE_UPGRADE_LOCK())
|
|
goto wlock;
|
|
numneghits++;
|
|
/*
|
|
* We found a "negative" match, so we shift it to the end of
|
|
* the "negative" cache entries queue to satisfy LRU. Also,
|
|
* check to see if the entry is a whiteout; indicate this to
|
|
* the componentname, if so.
|
|
*/
|
|
TAILQ_REMOVE(&ncneg, ncp, nc_dst);
|
|
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
|
|
nchstats.ncs_neghits++;
|
|
if (ncp->nc_flag & NCF_WHITE)
|
|
cnp->cn_flags |= ISWHITEOUT;
|
|
SDT_PROBE(vfs, namecache, lookup, hit_negative, dvp, ncp->nc_name,
|
|
0, 0, 0);
|
|
CACHE_WUNLOCK();
|
|
return (ENOENT);
|
|
|
|
wlock:
|
|
/*
|
|
* We need to update the cache after our lookup, so upgrade to
|
|
* a write lock and retry the operation.
|
|
*/
|
|
CACHE_RUNLOCK();
|
|
CACHE_WLOCK();
|
|
numupgrades++;
|
|
wlocked = 1;
|
|
goto retry_wlocked;
|
|
|
|
success:
|
|
/*
|
|
* On success we return a locked and ref'd vnode as per the lookup
|
|
* protocol.
|
|
*/
|
|
if (dvp == *vpp) { /* lookup on "." */
|
|
VREF(*vpp);
|
|
if (wlocked)
|
|
CACHE_WUNLOCK();
|
|
else
|
|
CACHE_RUNLOCK();
|
|
/*
|
|
* When we lookup "." we still can be asked to lock it
|
|
* differently...
|
|
*/
|
|
ltype = cnp->cn_lkflags & LK_TYPE_MASK;
|
|
if (ltype != VOP_ISLOCKED(*vpp)) {
|
|
if (ltype == LK_EXCLUSIVE) {
|
|
vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
|
|
if ((*vpp)->v_iflag & VI_DOOMED) {
|
|
/* forced unmount */
|
|
vrele(*vpp);
|
|
*vpp = NULL;
|
|
return (ENOENT);
|
|
}
|
|
} else
|
|
vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
|
|
}
|
|
return (-1);
|
|
}
|
|
ltype = 0; /* silence gcc warning */
|
|
if (cnp->cn_flags & ISDOTDOT) {
|
|
ltype = VOP_ISLOCKED(dvp);
|
|
VOP_UNLOCK(dvp, 0);
|
|
}
|
|
VI_LOCK(*vpp);
|
|
if (wlocked)
|
|
CACHE_WUNLOCK();
|
|
else
|
|
CACHE_RUNLOCK();
|
|
error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
|
|
if (cnp->cn_flags & ISDOTDOT) {
|
|
vn_lock(dvp, ltype | LK_RETRY);
|
|
if (dvp->v_iflag & VI_DOOMED) {
|
|
if (error == 0)
|
|
vput(*vpp);
|
|
*vpp = NULL;
|
|
return (ENOENT);
|
|
}
|
|
}
|
|
if (error) {
|
|
*vpp = NULL;
|
|
goto retry;
|
|
}
|
|
if ((cnp->cn_flags & ISLASTCN) &&
|
|
(cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
|
|
ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
|
|
}
|
|
return (-1);
|
|
|
|
unlock:
|
|
if (wlocked)
|
|
CACHE_WUNLOCK();
|
|
else
|
|
CACHE_RUNLOCK();
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Add an entry to the cache.
|
|
*/
|
|
void
|
|
cache_enter(dvp, vp, cnp)
|
|
struct vnode *dvp;
|
|
struct vnode *vp;
|
|
struct componentname *cnp;
|
|
{
|
|
struct namecache *ncp, *n2;
|
|
struct nchashhead *ncpp;
|
|
u_int32_t hash;
|
|
int flag;
|
|
int hold;
|
|
int zap;
|
|
int len;
|
|
|
|
CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
|
|
VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
|
|
("cache_enter: Adding a doomed vnode"));
|
|
VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
|
|
("cache_enter: Doomed vnode used as src"));
|
|
|
|
if (!doingcache)
|
|
return;
|
|
|
|
/*
|
|
* Avoid blowout in namecache entries.
|
|
*/
|
|
if (numcache >= desiredvnodes * 2)
|
|
return;
|
|
|
|
flag = 0;
|
|
if (cnp->cn_nameptr[0] == '.') {
|
|
if (cnp->cn_namelen == 1)
|
|
return;
|
|
if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
|
|
CACHE_WLOCK();
|
|
/*
|
|
* If dotdot entry already exists, just retarget it
|
|
* to new parent vnode, otherwise continue with new
|
|
* namecache entry allocation.
|
|
*/
|
|
if ((ncp = dvp->v_cache_dd) != NULL &&
|
|
ncp->nc_flag & NCF_ISDOTDOT) {
|
|
KASSERT(ncp->nc_dvp == dvp,
|
|
("wrong isdotdot parent"));
|
|
if (ncp->nc_vp != NULL)
|
|
TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
|
|
ncp, nc_dst);
|
|
else
|
|
TAILQ_REMOVE(&ncneg, ncp, nc_dst);
|
|
if (vp != NULL)
|
|
TAILQ_INSERT_HEAD(&vp->v_cache_dst,
|
|
ncp, nc_dst);
|
|
else
|
|
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
|
|
ncp->nc_vp = vp;
|
|
CACHE_WUNLOCK();
|
|
return;
|
|
}
|
|
dvp->v_cache_dd = NULL;
|
|
SDT_PROBE(vfs, namecache, enter, done, dvp, "..", vp,
|
|
0, 0);
|
|
CACHE_WUNLOCK();
|
|
flag = NCF_ISDOTDOT;
|
|
}
|
|
}
|
|
|
|
hold = 0;
|
|
zap = 0;
|
|
|
|
/*
|
|
* Calculate the hash key and setup as much of the new
|
|
* namecache entry as possible before acquiring the lock.
|
|
*/
|
|
ncp = cache_alloc(cnp->cn_namelen);
|
|
ncp->nc_vp = vp;
|
|
ncp->nc_dvp = dvp;
|
|
ncp->nc_flag = flag;
|
|
len = ncp->nc_nlen = cnp->cn_namelen;
|
|
hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
|
|
strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
|
|
hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
|
|
CACHE_WLOCK();
|
|
|
|
/*
|
|
* See if this vnode or negative entry is already in the cache
|
|
* with this name. This can happen with concurrent lookups of
|
|
* the same path name.
|
|
*/
|
|
ncpp = NCHHASH(hash);
|
|
LIST_FOREACH(n2, ncpp, nc_hash) {
|
|
if (n2->nc_dvp == dvp &&
|
|
n2->nc_nlen == cnp->cn_namelen &&
|
|
!bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
|
|
CACHE_WUNLOCK();
|
|
cache_free(ncp);
|
|
return;
|
|
}
|
|
}
|
|
|
|
if (flag == NCF_ISDOTDOT) {
|
|
/*
|
|
* See if we are trying to add .. entry, but some other lookup
|
|
* has populated v_cache_dd pointer already.
|
|
*/
|
|
if (dvp->v_cache_dd != NULL) {
|
|
CACHE_WUNLOCK();
|
|
cache_free(ncp);
|
|
return;
|
|
}
|
|
KASSERT(vp == NULL || vp->v_type == VDIR,
|
|
("wrong vnode type %p", vp));
|
|
dvp->v_cache_dd = ncp;
|
|
}
|
|
|
|
numcache++;
|
|
if (!vp) {
|
|
numneg++;
|
|
if (cnp->cn_flags & ISWHITEOUT)
|
|
ncp->nc_flag |= NCF_WHITE;
|
|
} else if (vp->v_type == VDIR) {
|
|
if (flag != NCF_ISDOTDOT) {
|
|
if ((n2 = vp->v_cache_dd) != NULL &&
|
|
(n2->nc_flag & NCF_ISDOTDOT) != 0)
|
|
cache_zap(n2);
|
|
vp->v_cache_dd = ncp;
|
|
}
|
|
} else {
|
|
vp->v_cache_dd = NULL;
|
|
}
|
|
|
|
/*
|
|
* Insert the new namecache entry into the appropriate chain
|
|
* within the cache entries table.
|
|
*/
|
|
LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
|
|
if (flag != NCF_ISDOTDOT) {
|
|
if (LIST_EMPTY(&dvp->v_cache_src)) {
|
|
hold = 1;
|
|
numcachehv++;
|
|
}
|
|
LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
|
|
}
|
|
|
|
/*
|
|
* If the entry is "negative", we place it into the
|
|
* "negative" cache queue, otherwise, we place it into the
|
|
* destination vnode's cache entries queue.
|
|
*/
|
|
if (vp) {
|
|
TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
|
|
SDT_PROBE(vfs, namecache, enter, done, dvp, ncp->nc_name, vp,
|
|
0, 0);
|
|
} else {
|
|
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
|
|
SDT_PROBE(vfs, namecache, enter_negative, done, dvp,
|
|
ncp->nc_name, 0, 0, 0);
|
|
}
|
|
if (numneg * ncnegfactor > numcache) {
|
|
ncp = TAILQ_FIRST(&ncneg);
|
|
zap = 1;
|
|
}
|
|
if (hold)
|
|
vhold(dvp);
|
|
if (zap)
|
|
cache_zap(ncp);
|
|
CACHE_WUNLOCK();
|
|
}
|
|
|
|
/*
|
|
* Name cache initialization, from vfs_init() when we are booting
|
|
*/
|
|
static void
|
|
nchinit(void *dummy __unused)
|
|
{
|
|
|
|
TAILQ_INIT(&ncneg);
|
|
|
|
cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
|
|
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
|
|
cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
|
|
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
|
|
|
|
nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
|
|
}
|
|
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
|
|
|
|
|
|
/*
|
|
* Invalidate all entries to a particular vnode.
|
|
*/
|
|
void
|
|
cache_purge(vp)
|
|
struct vnode *vp;
|
|
{
|
|
|
|
CTR1(KTR_VFS, "cache_purge(%p)", vp);
|
|
SDT_PROBE(vfs, namecache, purge, done, vp, 0, 0, 0, 0);
|
|
CACHE_WLOCK();
|
|
while (!LIST_EMPTY(&vp->v_cache_src))
|
|
cache_zap(LIST_FIRST(&vp->v_cache_src));
|
|
while (!TAILQ_EMPTY(&vp->v_cache_dst))
|
|
cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
|
|
if (vp->v_cache_dd != NULL) {
|
|
KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT,
|
|
("lost dotdot link"));
|
|
cache_zap(vp->v_cache_dd);
|
|
}
|
|
KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
|
|
CACHE_WUNLOCK();
|
|
}
|
|
|
|
/*
|
|
* Invalidate all negative entries for a particular directory vnode.
|
|
*/
|
|
void
|
|
cache_purge_negative(vp)
|
|
struct vnode *vp;
|
|
{
|
|
struct namecache *cp, *ncp;
|
|
|
|
CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
|
|
SDT_PROBE(vfs, namecache, purge_negative, done, vp, 0, 0, 0, 0);
|
|
CACHE_WLOCK();
|
|
LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) {
|
|
if (cp->nc_vp == NULL)
|
|
cache_zap(cp);
|
|
}
|
|
CACHE_WUNLOCK();
|
|
}
|
|
|
|
/*
|
|
* Flush all entries referencing a particular filesystem.
|
|
*/
|
|
void
|
|
cache_purgevfs(mp)
|
|
struct mount *mp;
|
|
{
|
|
struct nchashhead *ncpp;
|
|
struct namecache *ncp, *nnp;
|
|
|
|
/* Scan hash tables for applicable entries */
|
|
SDT_PROBE(vfs, namecache, purgevfs, done, mp, 0, 0, 0, 0);
|
|
CACHE_WLOCK();
|
|
for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
|
|
LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) {
|
|
if (ncp->nc_dvp->v_mount == mp)
|
|
cache_zap(ncp);
|
|
}
|
|
}
|
|
CACHE_WUNLOCK();
|
|
}
|
|
|
|
/*
|
|
* Perform canonical checks and cache lookup and pass on to filesystem
|
|
* through the vop_cachedlookup only if needed.
|
|
*/
|
|
|
|
int
|
|
vfs_cache_lookup(ap)
|
|
struct vop_lookup_args /* {
|
|
struct vnode *a_dvp;
|
|
struct vnode **a_vpp;
|
|
struct componentname *a_cnp;
|
|
} */ *ap;
|
|
{
|
|
struct vnode *dvp;
|
|
int error;
|
|
struct vnode **vpp = ap->a_vpp;
|
|
struct componentname *cnp = ap->a_cnp;
|
|
struct ucred *cred = cnp->cn_cred;
|
|
int flags = cnp->cn_flags;
|
|
struct thread *td = cnp->cn_thread;
|
|
|
|
*vpp = NULL;
|
|
dvp = ap->a_dvp;
|
|
|
|
if (dvp->v_type != VDIR)
|
|
return (ENOTDIR);
|
|
|
|
if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
|
|
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
|
|
return (EROFS);
|
|
|
|
error = VOP_ACCESS(dvp, VEXEC, cred, td);
|
|
if (error)
|
|
return (error);
|
|
|
|
error = cache_lookup(dvp, vpp, cnp);
|
|
if (error == 0)
|
|
return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
|
|
if (error == -1)
|
|
return (0);
|
|
return (error);
|
|
}
|
|
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
struct __getcwd_args {
|
|
u_char *buf;
|
|
u_int buflen;
|
|
};
|
|
#endif
|
|
|
|
/*
|
|
* XXX All of these sysctls would probably be more productive dead.
|
|
*/
|
|
static int disablecwd;
|
|
SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
|
|
"Disable the getcwd syscall");
|
|
|
|
/* Implementation of the getcwd syscall. */
|
|
int
|
|
__getcwd(td, uap)
|
|
struct thread *td;
|
|
struct __getcwd_args *uap;
|
|
{
|
|
|
|
return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
|
|
}
|
|
|
|
int
|
|
kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
|
|
{
|
|
char *bp, *tmpbuf;
|
|
struct filedesc *fdp;
|
|
struct vnode *cdir, *rdir;
|
|
int error, vfslocked;
|
|
|
|
if (disablecwd)
|
|
return (ENODEV);
|
|
if (buflen < 2)
|
|
return (EINVAL);
|
|
if (buflen > MAXPATHLEN)
|
|
buflen = MAXPATHLEN;
|
|
|
|
tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
|
|
fdp = td->td_proc->p_fd;
|
|
FILEDESC_SLOCK(fdp);
|
|
cdir = fdp->fd_cdir;
|
|
VREF(cdir);
|
|
rdir = fdp->fd_rdir;
|
|
VREF(rdir);
|
|
FILEDESC_SUNLOCK(fdp);
|
|
error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
|
|
vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
|
|
vrele(rdir);
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
vfslocked = VFS_LOCK_GIANT(cdir->v_mount);
|
|
vrele(cdir);
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
if (!error) {
|
|
if (bufseg == UIO_SYSSPACE)
|
|
bcopy(bp, buf, strlen(bp) + 1);
|
|
else
|
|
error = copyout(bp, buf, strlen(bp) + 1);
|
|
#ifdef KTRACE
|
|
if (KTRPOINT(curthread, KTR_NAMEI))
|
|
ktrnamei(bp);
|
|
#endif
|
|
}
|
|
free(tmpbuf, M_TEMP);
|
|
return (error);
|
|
}
|
|
|
|
/*
|
|
* Thus begins the fullpath magic.
|
|
*/
|
|
|
|
#undef STATNODE
|
|
#define STATNODE(name) \
|
|
static u_int name; \
|
|
SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
|
|
|
|
static int disablefullpath;
|
|
SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
|
|
"Disable the vn_fullpath function");
|
|
|
|
/* These count for kern___getcwd(), too. */
|
|
STATNODE(numfullpathcalls);
|
|
STATNODE(numfullpathfail1);
|
|
STATNODE(numfullpathfail2);
|
|
STATNODE(numfullpathfail4);
|
|
STATNODE(numfullpathfound);
|
|
|
|
/*
|
|
* Retrieve the full filesystem path that correspond to a vnode from the name
|
|
* cache (if available)
|
|
*/
|
|
int
|
|
vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
|
|
{
|
|
char *buf;
|
|
struct filedesc *fdp;
|
|
struct vnode *rdir;
|
|
int error, vfslocked;
|
|
|
|
if (disablefullpath)
|
|
return (ENODEV);
|
|
if (vn == NULL)
|
|
return (EINVAL);
|
|
|
|
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
|
|
fdp = td->td_proc->p_fd;
|
|
FILEDESC_SLOCK(fdp);
|
|
rdir = fdp->fd_rdir;
|
|
VREF(rdir);
|
|
FILEDESC_SUNLOCK(fdp);
|
|
error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
|
|
vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
|
|
vrele(rdir);
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
if (!error)
|
|
*freebuf = buf;
|
|
else
|
|
free(buf, M_TEMP);
|
|
return (error);
|
|
}
|
|
|
|
/*
|
|
* This function is similar to vn_fullpath, but it attempts to lookup the
|
|
* pathname relative to the global root mount point. This is required for the
|
|
* auditing sub-system, as audited pathnames must be absolute, relative to the
|
|
* global root mount point.
|
|
*/
|
|
int
|
|
vn_fullpath_global(struct thread *td, struct vnode *vn,
|
|
char **retbuf, char **freebuf)
|
|
{
|
|
char *buf;
|
|
int error;
|
|
|
|
if (disablefullpath)
|
|
return (ENODEV);
|
|
if (vn == NULL)
|
|
return (EINVAL);
|
|
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
|
|
error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
|
|
if (!error)
|
|
*freebuf = buf;
|
|
else
|
|
free(buf, M_TEMP);
|
|
return (error);
|
|
}
|
|
|
|
int
|
|
vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
|
|
{
|
|
int error;
|
|
|
|
CACHE_RLOCK();
|
|
error = vn_vptocnp_locked(vp, cred, buf, buflen);
|
|
if (error == 0) {
|
|
/*
|
|
* vn_vptocnp_locked() dropped hold acquired by
|
|
* VOP_VPTOCNP immediately after locking the
|
|
* cache. Since we are going to drop the cache rlock,
|
|
* re-hold the result.
|
|
*/
|
|
vhold(*vp);
|
|
CACHE_RUNLOCK();
|
|
}
|
|
return (error);
|
|
}
|
|
|
|
static int
|
|
vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
|
|
u_int *buflen)
|
|
{
|
|
struct vnode *dvp;
|
|
struct namecache *ncp;
|
|
int error, vfslocked;
|
|
|
|
TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
|
|
break;
|
|
}
|
|
if (ncp != NULL) {
|
|
if (*buflen < ncp->nc_nlen) {
|
|
CACHE_RUNLOCK();
|
|
numfullpathfail4++;
|
|
error = ENOMEM;
|
|
SDT_PROBE(vfs, namecache, fullpath, return, error,
|
|
vp, NULL, 0, 0);
|
|
return (error);
|
|
}
|
|
*buflen -= ncp->nc_nlen;
|
|
memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
|
|
SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp,
|
|
ncp->nc_name, vp, 0, 0);
|
|
*vp = ncp->nc_dvp;
|
|
return (0);
|
|
}
|
|
SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0);
|
|
|
|
vhold(*vp);
|
|
CACHE_RUNLOCK();
|
|
vfslocked = VFS_LOCK_GIANT((*vp)->v_mount);
|
|
vn_lock(*vp, LK_SHARED | LK_RETRY);
|
|
error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
|
|
VOP_UNLOCK(*vp, 0);
|
|
vdrop(*vp);
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
if (error) {
|
|
numfullpathfail2++;
|
|
SDT_PROBE(vfs, namecache, fullpath, return, error, vp,
|
|
NULL, 0, 0);
|
|
return (error);
|
|
}
|
|
|
|
*vp = dvp;
|
|
CACHE_RLOCK();
|
|
if ((*vp)->v_iflag & VI_DOOMED) {
|
|
/* forced unmount */
|
|
CACHE_RUNLOCK();
|
|
vdrop(*vp);
|
|
error = ENOENT;
|
|
SDT_PROBE(vfs, namecache, fullpath, return, error, vp,
|
|
NULL, 0, 0);
|
|
return (error);
|
|
}
|
|
vdrop(*vp);
|
|
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* The magic behind kern___getcwd() and vn_fullpath().
|
|
*/
|
|
static int
|
|
vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
|
char *buf, char **retbuf, u_int buflen)
|
|
{
|
|
int error, slash_prefixed;
|
|
#ifdef KDTRACE_HOOKS
|
|
struct vnode *startvp = vp;
|
|
#endif
|
|
|
|
buflen--;
|
|
buf[buflen] = '\0';
|
|
error = 0;
|
|
slash_prefixed = 0;
|
|
|
|
SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0);
|
|
numfullpathcalls++;
|
|
CACHE_RLOCK();
|
|
if (vp->v_type != VDIR) {
|
|
error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
|
|
if (error)
|
|
return (error);
|
|
if (buflen == 0) {
|
|
CACHE_RUNLOCK();
|
|
return (ENOMEM);
|
|
}
|
|
buf[--buflen] = '/';
|
|
slash_prefixed = 1;
|
|
}
|
|
while (vp != rdir && vp != rootvnode) {
|
|
if (vp->v_vflag & VV_ROOT) {
|
|
if (vp->v_iflag & VI_DOOMED) { /* forced unmount */
|
|
CACHE_RUNLOCK();
|
|
error = ENOENT;
|
|
SDT_PROBE(vfs, namecache, fullpath, return,
|
|
error, vp, NULL, 0, 0);
|
|
break;
|
|
}
|
|
vp = vp->v_mount->mnt_vnodecovered;
|
|
continue;
|
|
}
|
|
if (vp->v_type != VDIR) {
|
|
CACHE_RUNLOCK();
|
|
numfullpathfail1++;
|
|
error = ENOTDIR;
|
|
SDT_PROBE(vfs, namecache, fullpath, return,
|
|
error, vp, NULL, 0, 0);
|
|
break;
|
|
}
|
|
error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
|
|
if (error)
|
|
break;
|
|
if (buflen == 0) {
|
|
CACHE_RUNLOCK();
|
|
error = ENOMEM;
|
|
SDT_PROBE(vfs, namecache, fullpath, return, error,
|
|
startvp, NULL, 0, 0);
|
|
break;
|
|
}
|
|
buf[--buflen] = '/';
|
|
slash_prefixed = 1;
|
|
}
|
|
if (error)
|
|
return (error);
|
|
if (!slash_prefixed) {
|
|
if (buflen == 0) {
|
|
CACHE_RUNLOCK();
|
|
numfullpathfail4++;
|
|
SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM,
|
|
startvp, NULL, 0, 0);
|
|
return (ENOMEM);
|
|
}
|
|
buf[--buflen] = '/';
|
|
}
|
|
numfullpathfound++;
|
|
CACHE_RUNLOCK();
|
|
|
|
SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen,
|
|
0, 0);
|
|
*retbuf = buf + buflen;
|
|
return (0);
|
|
}
|
|
|
|
int
|
|
vn_commname(struct vnode *vp, char *buf, u_int buflen)
|
|
{
|
|
struct namecache *ncp;
|
|
int l;
|
|
|
|
CACHE_RLOCK();
|
|
TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
|
|
break;
|
|
if (ncp == NULL) {
|
|
CACHE_RUNLOCK();
|
|
return (ENOENT);
|
|
}
|
|
l = min(ncp->nc_nlen, buflen - 1);
|
|
memcpy(buf, ncp->nc_name, l);
|
|
CACHE_RUNLOCK();
|
|
buf[l] = '\0';
|
|
return (0);
|
|
}
|