mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-20 11:11:24 +00:00
MFp4:
- Plug memory leak. - Respect underlying vnode's properties rather than assuming that the user want root:wheel + 0755. Useful for using tmpfs(5) for /tmp. - Use roundup2 and howmany macros instead of rolling our own version. - Try to fix fsx -W -R foo case. - Instead of blindly zeroing a page, determine whether we need a pagein order to prevent data corruption. - Fix several bugs reported by Coverity. Submitted by: Mingyan Guo <guomingyan gmail com>, Howard Su, delphij Coverity ID: CID 2550, 2551, 2552, 2557 Approved by: re (tmpfs blanket)
This commit is contained in:
parent
ead41a8810
commit
1df86a323d
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=171308
@ -467,8 +467,8 @@ TMPFS_PAGES_MAX(struct tmpfs_mount *tmp)
|
||||
}
|
||||
|
||||
/* Returns the available space for the given file system. */
|
||||
#define TMPFS_META_PAGES(tmp) ((tmp)->tm_nodes_inuse * (sizeof(struct tmpfs_node) \
|
||||
+ sizeof(struct tmpfs_dirent))/PAGE_SIZE + 1)
|
||||
#define TMPFS_META_PAGES(tmp) (howmany((tmp)->tm_nodes_inuse * (sizeof(struct tmpfs_node) \
|
||||
+ sizeof(struct tmpfs_dirent)), PAGE_SIZE))
|
||||
#define TMPFS_FILE_PAGES(tmp) ((tmp)->tm_pages_used)
|
||||
|
||||
#define TMPFS_PAGES_AVAIL(tmp) (TMPFS_PAGES_MAX(tmp) > \
|
||||
@ -518,26 +518,4 @@ VP_TO_TMPFS_DIR(struct vnode *vp)
|
||||
return node;
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
* USER AND KERNEL DEFINITIONS
|
||||
* --------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* This structure is used to communicate mount parameters between userland
|
||||
* and kernel space.
|
||||
*/
|
||||
#define TMPFS_ARGS_VERSION 1
|
||||
struct tmpfs_args {
|
||||
int ta_version;
|
||||
|
||||
/* Size counters. */
|
||||
ino_t ta_nodes_max;
|
||||
off_t ta_size_max;
|
||||
|
||||
/* Root node attributes. */
|
||||
uid_t ta_root_uid;
|
||||
gid_t ta_root_gid;
|
||||
mode_t ta_root_mode;
|
||||
|
||||
};
|
||||
#endif /* _FS_TMPFS_TMPFS_H_ */
|
||||
|
@ -312,7 +312,9 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, struct vnode **vpp,
|
||||
loop:
|
||||
if (node->tn_vnode != NULL) {
|
||||
vp = node->tn_vnode;
|
||||
vget(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Make sure the vnode is still there after
|
||||
@ -323,7 +325,6 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, struct vnode **vpp,
|
||||
goto loop;
|
||||
}
|
||||
|
||||
error = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -385,9 +386,17 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, struct vnode **vpp,
|
||||
}
|
||||
|
||||
vnode_pager_setsize(vp, node->tn_size);
|
||||
insmntque(vp, mp);
|
||||
|
||||
error = 0;
|
||||
error = insmntque(vp, mp);
|
||||
if (error) {
|
||||
node->tn_vnode = NULL;
|
||||
if (node->tn_vpstate & TMPFS_VNODE_WANT) {
|
||||
node->tn_vpstate &= ~TMPFS_VNODE_WANT;
|
||||
TMPFS_NODE_UNLOCK(node);
|
||||
wakeup((caddr_t) &node->tn_vpstate);
|
||||
} else
|
||||
TMPFS_NODE_UNLOCK(node);
|
||||
return error;
|
||||
}
|
||||
node->tn_vnode = vp;
|
||||
|
||||
unlock:
|
||||
@ -850,32 +859,33 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize)
|
||||
node->tn_size = newsize;
|
||||
vnode_pager_setsize(vp, newsize);
|
||||
if (newsize < oldsize) {
|
||||
size_t zerolen = MIN(round_page(newsize), node->tn_size) - newsize;
|
||||
struct vm_object *uobj = node->tn_reg.tn_aobj;
|
||||
size_t zerolen = round_page(newsize) - newsize;
|
||||
vm_object_t uobj = node->tn_reg.tn_aobj;
|
||||
vm_page_t m;
|
||||
|
||||
/*
|
||||
* free "backing store"
|
||||
*/
|
||||
|
||||
VM_OBJECT_LOCK(uobj);
|
||||
if (newpages < oldpages) {
|
||||
VM_OBJECT_LOCK(uobj);
|
||||
swap_pager_freespace(uobj,
|
||||
newpages, oldpages - newpages);
|
||||
VM_OBJECT_UNLOCK(uobj);
|
||||
vm_object_page_remove(uobj,
|
||||
OFF_TO_IDX(newsize + PAGE_MASK), 0, FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* zero out the truncated part of the last page.
|
||||
*/
|
||||
|
||||
if (zerolen > 0) {
|
||||
if (zerolen > 0) {
|
||||
m = vm_page_grab(uobj, OFF_TO_IDX(newsize),
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
pmap_zero_page_area(m, PAGE_SIZE - zerolen,
|
||||
zerolen);
|
||||
vm_page_wakeup(m);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(uobj);
|
||||
|
||||
}
|
||||
|
||||
@ -1226,10 +1236,6 @@ void
|
||||
tmpfs_update(struct vnode *vp)
|
||||
{
|
||||
|
||||
struct tmpfs_node *node;
|
||||
|
||||
node = VP_TO_TMPFS_NODE(vp);
|
||||
|
||||
tmpfs_itimes(vp, NULL, NULL);
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,7 @@
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/limits.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/kernel.h>
|
||||
@ -151,6 +152,8 @@ tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
|
||||
TMPFS_LOCK(tmp);
|
||||
node->tn_id = tmp->tm_nodes_last++;
|
||||
TMPFS_UNLOCK(tmp);
|
||||
if (node->tn_id == INT_MAX)
|
||||
panic("all avariable id is used.");
|
||||
node->tn_gen = arc4random();
|
||||
} else {
|
||||
node->tn_gen++;
|
||||
@ -195,14 +198,23 @@ tmpfs_node_fini(void *mem, int size)
|
||||
}
|
||||
|
||||
static int
|
||||
tmpfs_mount(struct mount *mp, struct thread *l)
|
||||
tmpfs_mount(struct mount *mp, struct thread *td)
|
||||
{
|
||||
struct tmpfs_args args;
|
||||
struct tmpfs_mount *tmp;
|
||||
struct tmpfs_node *root;
|
||||
size_t pages, mem_size;
|
||||
ino_t nodes;
|
||||
int error;
|
||||
/* Size counters. */
|
||||
ino_t nodes_max;
|
||||
off_t size_max;
|
||||
|
||||
/* Root node attributes. */
|
||||
uid_t root_uid;
|
||||
gid_t root_gid;
|
||||
mode_t root_mode;
|
||||
|
||||
struct vattr va;
|
||||
|
||||
if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
|
||||
return (EINVAL);
|
||||
@ -214,19 +226,28 @@ tmpfs_mount(struct mount *mp, struct thread *l)
|
||||
return EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (vfs_scanopt(mp->mnt_optnew, "gid", "%d", &args.ta_root_gid) != 1)
|
||||
args.ta_root_gid = 0;
|
||||
if (vfs_scanopt(mp->mnt_optnew, "uid", "%d", &args.ta_root_uid) != 1)
|
||||
args.ta_root_uid = 0;
|
||||
if (vfs_scanopt(mp->mnt_optnew, "mode", "%o", &args.ta_root_mode) != 1)
|
||||
args.ta_root_mode = TMPFS_DEFAULT_ROOT_MODE;
|
||||
if(vfs_scanopt(mp->mnt_optnew, "inodes", "%d", &args.ta_nodes_max) != 1)
|
||||
args.ta_nodes_max = 0;
|
||||
vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY, td);
|
||||
error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred, td);
|
||||
VOP_UNLOCK(mp->mnt_vnodecovered, 0, td);
|
||||
if (error)
|
||||
return (error);
|
||||
|
||||
if (mp->mnt_cred->cr_ruid != 0 ||
|
||||
vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
|
||||
root_gid = va.va_gid;
|
||||
if (mp->mnt_cred->cr_ruid != 0 ||
|
||||
vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
|
||||
root_uid = va.va_uid;
|
||||
if (mp->mnt_cred->cr_ruid != 0 ||
|
||||
vfs_scanopt(mp->mnt_optnew, "mode", "%o", &root_mode) != 1)
|
||||
root_mode = va.va_mode;
|
||||
if(vfs_scanopt(mp->mnt_optnew, "inodes", "%d", &nodes_max) != 1)
|
||||
nodes_max = 0;
|
||||
|
||||
if(vfs_scanopt(mp->mnt_optnew,
|
||||
"size",
|
||||
"%qu", &args.ta_size_max) != 1)
|
||||
args.ta_size_max = 0;
|
||||
"%qu", &size_max) != 1)
|
||||
size_max = 0;
|
||||
|
||||
/* Do not allow mounts if we do not have enough memory to preserve
|
||||
* the minimum reserved pages. */
|
||||
@ -239,17 +260,16 @@ tmpfs_mount(struct mount *mp, struct thread *l)
|
||||
* allowed to use, based on the maximum size the user passed in
|
||||
* the mount structure. A value of zero is treated as if the
|
||||
* maximum available space was requested. */
|
||||
if (args.ta_size_max < PAGE_SIZE || args.ta_size_max >= SIZE_MAX)
|
||||
if (size_max < PAGE_SIZE || size_max >= SIZE_MAX)
|
||||
pages = SIZE_MAX;
|
||||
else
|
||||
pages = args.ta_size_max / PAGE_SIZE +
|
||||
(args.ta_size_max % PAGE_SIZE == 0 ? 0 : 1);
|
||||
pages = howmany(size_max, PAGE_SIZE);
|
||||
MPASS(pages > 0);
|
||||
|
||||
if (args.ta_nodes_max <= 3)
|
||||
if (nodes_max <= 3)
|
||||
nodes = 3 + pages * PAGE_SIZE / 1024;
|
||||
else
|
||||
nodes = args.ta_nodes_max;
|
||||
nodes = nodes_max;
|
||||
MPASS(nodes >= 3);
|
||||
|
||||
/* Allocate the tmpfs mount structure and fill it. */
|
||||
@ -277,12 +297,12 @@ tmpfs_mount(struct mount *mp, struct thread *l)
|
||||
tmpfs_node_ctor, tmpfs_node_dtor,
|
||||
tmpfs_node_init, tmpfs_node_fini,
|
||||
UMA_ALIGN_PTR,
|
||||
UMA_ZONE_NOFREE);
|
||||
0);
|
||||
|
||||
/* Allocate the root node. */
|
||||
error = tmpfs_alloc_node(tmp, VDIR, args.ta_root_uid,
|
||||
args.ta_root_gid, args.ta_root_mode & ALLPERMS, NULL, NULL,
|
||||
VNOVAL, l, &root);
|
||||
error = tmpfs_alloc_node(tmp, VDIR, root_uid,
|
||||
root_gid, root_mode & ALLPERMS, NULL, NULL,
|
||||
VNOVAL, td, &root);
|
||||
|
||||
if (error != 0 || root == NULL) {
|
||||
uma_zdestroy(tmp->tm_node_pool);
|
||||
@ -360,6 +380,7 @@ tmpfs_unmount(struct mount *mp, int mntflags, struct thread *l)
|
||||
|
||||
mtx_destroy(&tmp->allnode_lock);
|
||||
MPASS(tmp->tm_pages_used == 0);
|
||||
MPASS(tmp->tm_nodes_inuse == 0);
|
||||
|
||||
/* Throw away the tmpfs_mount structure. */
|
||||
free(mp->mnt_data, M_TMPFSMNT);
|
||||
|
@ -450,6 +450,7 @@ tmpfs_uio_xfer(struct tmpfs_mount *tmp, struct tmpfs_node *node,
|
||||
vm_page_t m;
|
||||
size_t len;
|
||||
int error = 0;
|
||||
int behind = 0, ahead = 0;
|
||||
|
||||
/* uobj - locked by caller */
|
||||
|
||||
@ -468,8 +469,21 @@ tmpfs_uio_xfer(struct tmpfs_mount *tmp, struct tmpfs_node *node,
|
||||
len = MIN(len, (PAGE_SIZE - d));
|
||||
m = vm_page_grab(uobj, idx, VM_ALLOC_WIRED | VM_ALLOC_ZERO |
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (uio->uio_rw == UIO_READ && m->valid != VM_PAGE_BITS_ALL)
|
||||
vm_page_zero_invalid(m, TRUE);
|
||||
if (m->valid != VM_PAGE_BITS_ALL){
|
||||
if (vm_pager_has_page(uobj, idx, &behind, &ahead)){
|
||||
error = vm_pager_get_pages(uobj, &m, 1, 0);
|
||||
if (error == VM_PAGER_ERROR){
|
||||
printf("vm_pager_get_pages error\n");
|
||||
goto out;
|
||||
}
|
||||
#ifdef DIAGNOSTIC
|
||||
/* XXX */
|
||||
printf("tmpfs gets page from pager\n");
|
||||
#endif
|
||||
} else {
|
||||
vm_page_zero_invalid(m, TRUE);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(uobj);
|
||||
sched_pin();
|
||||
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
|
||||
@ -488,6 +502,7 @@ tmpfs_uio_xfer(struct tmpfs_mount *tmp, struct tmpfs_node *node,
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
out:
|
||||
vm_object_pip_subtract(uobj, 1);
|
||||
VM_OBJECT_UNLOCK(uobj);
|
||||
return error;
|
||||
@ -680,14 +695,12 @@ tmpfs_link(struct vop_link_args *v)
|
||||
|
||||
int error;
|
||||
struct tmpfs_dirent *de;
|
||||
struct tmpfs_node *dnode;
|
||||
struct tmpfs_node *node;
|
||||
|
||||
MPASS(VOP_ISLOCKED(dvp, cnp->cn_thread));
|
||||
MPASS(cnp->cn_flags & HASBUF);
|
||||
MPASS(dvp != vp); /* XXX When can this be false? */
|
||||
|
||||
dnode = VP_TO_TMPFS_DIR(dvp);
|
||||
node = VP_TO_TMPFS_NODE(vp);
|
||||
|
||||
/* XXX: Why aren't the following two tests done by the caller? */
|
||||
@ -753,7 +766,6 @@ tmpfs_rename(struct vop_rename_args *v)
|
||||
char *newname;
|
||||
int error;
|
||||
struct tmpfs_dirent *de;
|
||||
struct tmpfs_mount *tmp;
|
||||
struct tmpfs_node *fdnode;
|
||||
struct tmpfs_node *fnode;
|
||||
struct tmpfs_node *tdnode;
|
||||
@ -775,7 +787,6 @@ tmpfs_rename(struct vop_rename_args *v)
|
||||
goto out;
|
||||
}
|
||||
|
||||
tmp = VFS_TO_TMPFS(tdvp->v_mount);
|
||||
tdnode = VP_TO_TMPFS_DIR(tdvp);
|
||||
|
||||
/* If source and target are the same file, there is nothing to do. */
|
||||
|
Loading…
Reference in New Issue
Block a user