1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-24 11:29:10 +00:00
freebsd/sys/kern/kern_exec.c

1619 lines
40 KiB
C
Raw Normal View History

/*-
* Copyright (c) 1993, David Greenman
* All rights reserved.
1994-05-24 10:09:53 +00:00
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1994-05-24 10:09:53 +00:00
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1994-05-24 10:09:53 +00:00
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
2003-06-11 00:56:59 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_capsicum.h"
#include "opt_hwpmc_hooks.h"
2002-07-01 19:49:04 +00:00
#include "opt_ktrace.h"
#include "opt_vm.h"
2002-07-01 19:49:04 +00:00
1994-05-24 10:09:53 +00:00
#include <sys/param.h>
#include <sys/capsicum.h>
#include <sys/systm.h>
#include <sys/eventhandler.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysproto.h>
#include <sys/signalvar.h>
#include <sys/kernel.h>
#include <sys/mount.h>
#include <sys/filedesc.h>
1995-10-21 08:38:13 +00:00
#include <sys/fcntl.h>
#include <sys/acct.h>
#include <sys/exec.h>
#include <sys/imgact.h>
#include <sys/imgact_elf.h>
#include <sys/wait.h>
#include <sys/malloc.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/pioctl.h>
#include <sys/namei.h>
#include <sys/resourcevar.h>
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sdt.h>
#include <sys/sf_buf.h>
#include <sys/syscallsubr.h>
#include <sys/sysent.h>
#include <sys/shm.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <sys/stat.h>
#ifdef KTRACE
#include <sys/ktrace.h>
#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
#endif
#include <machine/reg.h>
#include <security/audit/audit.h>
#include <security/mac/mac_framework.h>
#ifdef KDTRACE_HOOKS
#include <sys/dtrace_bsd.h>
dtrace_execexit_func_t dtrace_fasttrap_exec;
#endif
SDT_PROVIDER_DECLARE(proc);
SDT_PROBE_DEFINE1(proc, kernel, , exec, "char *");
SDT_PROBE_DEFINE1(proc, kernel, , exec__failure, "int");
SDT_PROBE_DEFINE1(proc, kernel, , exec__success, "char *");
MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
int coredump_pack_fileinfo = 1;
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
&coredump_pack_fileinfo, 0,
"Enable file path packing in 'procstat -f' coredump notes");
int coredump_pack_vmmapinfo = 1;
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
&coredump_pack_vmmapinfo, 0,
"Enable file path packing in 'procstat -v' coredump notes");
static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
static int do_execve(struct thread *td, struct image_args *args,
struct mac *mac_p);
/* XXX This should be vm_size_t. */
SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD,
NULL, 0, sysctl_kern_ps_strings, "LU", "");
/* XXX This should be vm_size_t. */
SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
CTLFLAG_CAPRD, NULL, 0, sysctl_kern_usrstack, "LU", "");
SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD,
NULL, 0, sysctl_kern_stackprot, "I", "");
u_long ps_arg_cache_limit = PAGE_SIZE / 16;
SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
&ps_arg_cache_limit, 0, "");
static int disallow_high_osrel;
SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
&disallow_high_osrel, 0,
"Disallow execution of binaries built for higher version of the world");
static int map_at_zero = 0;
SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
"Permit processes to map an object at virtual address 0.");
static int
sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
{
struct proc *p;
int error;
p = curproc;
#ifdef SCTL_MASK32
if (req->flags & SCTL_MASK32) {
unsigned int val;
val = (unsigned int)p->p_sysent->sv_psstrings;
error = SYSCTL_OUT(req, &val, sizeof(val));
} else
#endif
error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
sizeof(p->p_sysent->sv_psstrings));
return error;
}
static int
sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
{
struct proc *p;
int error;
p = curproc;
#ifdef SCTL_MASK32
if (req->flags & SCTL_MASK32) {
unsigned int val;
val = (unsigned int)p->p_sysent->sv_usrstack;
error = SYSCTL_OUT(req, &val, sizeof(val));
} else
#endif
error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
sizeof(p->p_sysent->sv_usrstack));
return error;
}
static int
sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
{
struct proc *p;
p = curproc;
return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
sizeof(p->p_sysent->sv_stackprot)));
}
/*
* Each of the items is a pointer to a `const struct execsw', hence the
* double pointer here.
*/
static const struct execsw **execsw;
#ifndef _SYS_SYSPROTO_H_
struct execve_args {
char *fname;
char **argv;
char **envv;
};
#endif
int
sys_execve(struct thread *td, struct execve_args *uap)
{
struct image_args args;
struct vmspace *oldvmspace;
int error;
error = pre_execve(td, &oldvmspace);
if (error != 0)
return (error);
error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
uap->argv, uap->envv);
if (error == 0)
error = kern_execve(td, &args, NULL);
post_execve(td, error, oldvmspace);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct fexecve_args {
int fd;
char **argv;
char **envv;
}
#endif
int
sys_fexecve(struct thread *td, struct fexecve_args *uap)
{
struct image_args args;
struct vmspace *oldvmspace;
int error;
error = pre_execve(td, &oldvmspace);
if (error != 0)
return (error);
error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
uap->argv, uap->envv);
if (error == 0) {
args.fd = uap->fd;
error = kern_execve(td, &args, NULL);
}
post_execve(td, error, oldvmspace);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct __mac_execve_args {
char *fname;
char **argv;
char **envv;
struct mac *mac_p;
};
#endif
int
sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
{
#ifdef MAC
struct image_args args;
struct vmspace *oldvmspace;
int error;
error = pre_execve(td, &oldvmspace);
if (error != 0)
return (error);
error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
uap->argv, uap->envv);
if (error == 0)
error = kern_execve(td, &args, uap->mac_p);
post_execve(td, error, oldvmspace);
return (error);
#else
return (ENOSYS);
#endif
}
int
pre_execve(struct thread *td, struct vmspace **oldvmspace)
{
struct proc *p;
int error;
KASSERT(td == curthread, ("non-current thread %p", td));
error = 0;
p = td->td_proc;
if ((p->p_flag & P_HADTHREADS) != 0) {
PROC_LOCK(p);
if (thread_single(p, SINGLE_BOUNDARY) != 0)
error = ERESTART;
PROC_UNLOCK(p);
}
KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
("nested execve"));
*oldvmspace = p->p_vmspace;
return (error);
}
void
post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
{
struct proc *p;
KASSERT(td == curthread, ("non-current thread %p", td));
p = td->td_proc;
if ((p->p_flag & P_HADTHREADS) != 0) {
PROC_LOCK(p);
/*
* If success, we upgrade to SINGLE_EXIT state to
* force other threads to suicide.
*/
if (error == 0)
thread_single(p, SINGLE_EXIT);
else
thread_single_end(p, SINGLE_BOUNDARY);
PROC_UNLOCK(p);
}
if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
KASSERT(p->p_vmspace != oldvmspace,
("oldvmspace still used"));
vmspace_free(oldvmspace);
td->td_pflags &= ~TDP_EXECVMSPC;
}
}
/*
* XXX: kern_execve has the astonishing property of not always returning to
* the caller. If sufficiently bad things happen during the call to
* do_execve(), it can end up calling exit1(); as a result, callers must
* avoid doing anything which they might need to undo (e.g., allocating
* memory).
*/
int
kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p)
{
AUDIT_ARG_ARGV(args->begin_argv, args->argc,
args->begin_envv - args->begin_argv);
AUDIT_ARG_ENVV(args->begin_envv, args->envc,
args->endp - args->begin_envv);
return (do_execve(td, args, mac_p));
}
/*
* In-kernel implementation of execve(). All arguments are assumed to be
* userspace pointers from the passed thread.
*/
static int
do_execve(td, args, mac_p)
struct thread *td;
struct image_args *args;
struct mac *mac_p;
{
struct proc *p = td->td_proc;
struct nameidata nd;
struct ucred *newcred = NULL, *oldcred;
struct uidinfo *euip = NULL;
register_t *stack_base;
int error, i;
struct image_params image_params, *imgp;
struct vattr attr;
2002-03-19 21:25:46 +00:00
int (*img_first)(struct image_params *);
struct pargs *oldargs = NULL, *newargs = NULL;
struct sigacts *oldsigacts, *newsigacts;
#ifdef KTRACE
struct vnode *tracevp = NULL;
struct ucred *tracecred = NULL;
#endif
struct vnode *oldtextvp = NULL, *newtextvp;
Change the cap_rights_t type from uint64_t to a structure that we can extend in the future in a backward compatible (API and ABI) way. The cap_rights_t represents capability rights. We used to use one bit to represent one right, but we are running out of spare bits. Currently the new structure provides place for 114 rights (so 50 more than the previous cap_rights_t), but it is possible to grow the structure to hold at least 285 rights, although we can make it even larger if 285 rights won't be enough. The structure definition looks like this: struct cap_rights { uint64_t cr_rights[CAP_RIGHTS_VERSION + 2]; }; The initial CAP_RIGHTS_VERSION is 0. The top two bits in the first element of the cr_rights[] array contain total number of elements in the array - 2. This means if those two bits are equal to 0, we have 2 array elements. The top two bits in all remaining array elements should be 0. The next five bits in all array elements contain array index. Only one bit is used and bit position in this five-bits range defines array index. This means there can be at most five array elements in the future. To define new right the CAPRIGHT() macro must be used. The macro takes two arguments - an array index and a bit to set, eg. #define CAP_PDKILL CAPRIGHT(1, 0x0000000000000800ULL) We still support aliases that combine few rights, but the rights have to belong to the same array element, eg: #define CAP_LOOKUP CAPRIGHT(0, 0x0000000000000400ULL) #define CAP_FCHMOD CAPRIGHT(0, 0x0000000000002000ULL) #define CAP_FCHMODAT (CAP_FCHMOD | CAP_LOOKUP) There is new API to manage the new cap_rights_t structure: cap_rights_t *cap_rights_init(cap_rights_t *rights, ...); void cap_rights_set(cap_rights_t *rights, ...); void cap_rights_clear(cap_rights_t *rights, ...); bool cap_rights_is_set(const cap_rights_t *rights, ...); bool cap_rights_is_valid(const cap_rights_t *rights); void cap_rights_merge(cap_rights_t *dst, const cap_rights_t *src); void cap_rights_remove(cap_rights_t *dst, const cap_rights_t *src); bool cap_rights_contains(const cap_rights_t *big, const cap_rights_t *little); Capability rights to the cap_rights_init(), cap_rights_set(), cap_rights_clear() and cap_rights_is_set() functions are provided by separating them with commas, eg: cap_rights_t rights; cap_rights_init(&rights, CAP_READ, CAP_WRITE, CAP_FSTAT); There is no need to terminate the list of rights, as those functions are actually macros that take care of the termination, eg: #define cap_rights_set(rights, ...) \ __cap_rights_set((rights), __VA_ARGS__, 0ULL) void __cap_rights_set(cap_rights_t *rights, ...); Thanks to using one bit as an array index we can assert in those functions that there are no two rights belonging to different array elements provided together. For example this is illegal and will be detected, because CAP_LOOKUP belongs to element 0 and CAP_PDKILL to element 1: cap_rights_init(&rights, CAP_LOOKUP | CAP_PDKILL); Providing several rights that belongs to the same array's element this way is correct, but is not advised. It should only be used for aliases definition. This commit also breaks compatibility with some existing Capsicum system calls, but I see no other way to do that. This should be fine as Capsicum is still experimental and this change is not going to 9.x. Sponsored by: The FreeBSD Foundation
2013-09-05 00:09:56 +00:00
cap_rights_t rights;
int credential_changing;
int textset;
#ifdef MAC
struct label *interpvplabel = NULL;
Modify the MAC Framework so that instead of embedding a (struct label) in various kernel objects to represent security data, we embed a (struct label *) pointer, which now references labels allocated using a UMA zone (mac_label.c). This allows the size and shape of struct label to be varied without changing the size and shape of these kernel objects, which become part of the frozen ABI with 5-STABLE. This opens the door for boot-time selection of the number of label slots, and hence changes to the bound on the number of simultaneous labeled policies at boot-time instead of compile-time. This also makes it easier to embed label references in new objects as required for locking/caching with fine-grained network stack locking, such as inpcb structures. This change also moves us further in the direction of hiding the structure of kernel objects from MAC policy modules, not to mention dramatically reducing the number of '&' symbols appearing in both the MAC Framework and MAC policy modules, and improving readability. While this results in minimal performance change with MAC enabled, it will observably shrink the size of a number of critical kernel data structures for the !MAC case, and should have a small (but measurable) performance benefit (i.e., struct vnode, struct socket) do to memory conservation and reduced cost of zeroing memory. NOTE: Users of MAC must recompile their kernel and all MAC modules as a result of this change. Because this is an API change, third party MAC modules will also need to be updated to make less use of the '&' symbol. Suggestions from: bmilekic Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-11-12 03:14:31 +00:00
int will_transition;
#endif
#ifdef HWPMC_HOOKS
struct pmckern_procexec pe;
#endif
static const char fexecv_proc_title[] = "(fexecv)";
imgp = &image_params;
/*
* Lock the process and set the P_INEXEC flag to indicate that
* it should be left alone until we're done here. This is
* necessary to avoid race conditions - e.g. in ptrace() -
* that might allow a local user to illicitly obtain elevated
* privileges.
*/
PROC_LOCK(p);
KASSERT((p->p_flag & P_INEXEC) == 0,
("%s(): process already has P_INEXEC flag", __func__));
p->p_flag |= P_INEXEC;
PROC_UNLOCK(p);
/*
* Initialize part of the common data
*/
bzero(imgp, sizeof(*imgp));
imgp->proc = p;
imgp->attr = &attr;
imgp->args = args;
#ifdef MAC
Modify the MAC Framework so that instead of embedding a (struct label) in various kernel objects to represent security data, we embed a (struct label *) pointer, which now references labels allocated using a UMA zone (mac_label.c). This allows the size and shape of struct label to be varied without changing the size and shape of these kernel objects, which become part of the frozen ABI with 5-STABLE. This opens the door for boot-time selection of the number of label slots, and hence changes to the bound on the number of simultaneous labeled policies at boot-time instead of compile-time. This also makes it easier to embed label references in new objects as required for locking/caching with fine-grained network stack locking, such as inpcb structures. This change also moves us further in the direction of hiding the structure of kernel objects from MAC policy modules, not to mention dramatically reducing the number of '&' symbols appearing in both the MAC Framework and MAC policy modules, and improving readability. While this results in minimal performance change with MAC enabled, it will observably shrink the size of a number of critical kernel data structures for the !MAC case, and should have a small (but measurable) performance benefit (i.e., struct vnode, struct socket) do to memory conservation and reduced cost of zeroing memory. NOTE: Users of MAC must recompile their kernel and all MAC modules as a result of this change. Because this is an API change, third party MAC modules will also need to be updated to make less use of the '&' symbol. Suggestions from: bmilekic Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-11-12 03:14:31 +00:00
error = mac_execve_enter(imgp, mac_p);
if (error)
goto exec_fail;
#endif
/*
* Translate the file name. namei() returns a vnode pointer
* in ni_vp amoung other things.
*
* XXXAUDIT: It would be desirable to also audit the name of the
* interpreter if this is an interpreted binary.
*/
if (args->fname != NULL) {
NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME
| AUDITVNODE1, UIO_SYSSPACE, args->fname, td);
}
SDT_PROBE1(proc, kernel, , exec, args->fname);
interpret:
if (args->fname != NULL) {
#ifdef CAPABILITY_MODE
/*
* While capability mode can't reach this point via direct
* path arguments to execve(), we also don't allow
* interpreters to be used in capability mode (for now).
* Catch indirect lookups and return a permissions error.
*/
if (IN_CAPABILITY_MODE(td)) {
error = ECAPMODE;
goto exec_fail;
}
#endif
error = namei(&nd);
if (error)
goto exec_fail;
newtextvp = nd.ni_vp;
imgp->vp = newtextvp;
} else {
AUDIT_ARG_FD(args->fd);
/*
* Descriptors opened only with O_EXEC or O_RDONLY are allowed.
*/
Change the cap_rights_t type from uint64_t to a structure that we can extend in the future in a backward compatible (API and ABI) way. The cap_rights_t represents capability rights. We used to use one bit to represent one right, but we are running out of spare bits. Currently the new structure provides place for 114 rights (so 50 more than the previous cap_rights_t), but it is possible to grow the structure to hold at least 285 rights, although we can make it even larger if 285 rights won't be enough. The structure definition looks like this: struct cap_rights { uint64_t cr_rights[CAP_RIGHTS_VERSION + 2]; }; The initial CAP_RIGHTS_VERSION is 0. The top two bits in the first element of the cr_rights[] array contain total number of elements in the array - 2. This means if those two bits are equal to 0, we have 2 array elements. The top two bits in all remaining array elements should be 0. The next five bits in all array elements contain array index. Only one bit is used and bit position in this five-bits range defines array index. This means there can be at most five array elements in the future. To define new right the CAPRIGHT() macro must be used. The macro takes two arguments - an array index and a bit to set, eg. #define CAP_PDKILL CAPRIGHT(1, 0x0000000000000800ULL) We still support aliases that combine few rights, but the rights have to belong to the same array element, eg: #define CAP_LOOKUP CAPRIGHT(0, 0x0000000000000400ULL) #define CAP_FCHMOD CAPRIGHT(0, 0x0000000000002000ULL) #define CAP_FCHMODAT (CAP_FCHMOD | CAP_LOOKUP) There is new API to manage the new cap_rights_t structure: cap_rights_t *cap_rights_init(cap_rights_t *rights, ...); void cap_rights_set(cap_rights_t *rights, ...); void cap_rights_clear(cap_rights_t *rights, ...); bool cap_rights_is_set(const cap_rights_t *rights, ...); bool cap_rights_is_valid(const cap_rights_t *rights); void cap_rights_merge(cap_rights_t *dst, const cap_rights_t *src); void cap_rights_remove(cap_rights_t *dst, const cap_rights_t *src); bool cap_rights_contains(const cap_rights_t *big, const cap_rights_t *little); Capability rights to the cap_rights_init(), cap_rights_set(), cap_rights_clear() and cap_rights_is_set() functions are provided by separating them with commas, eg: cap_rights_t rights; cap_rights_init(&rights, CAP_READ, CAP_WRITE, CAP_FSTAT); There is no need to terminate the list of rights, as those functions are actually macros that take care of the termination, eg: #define cap_rights_set(rights, ...) \ __cap_rights_set((rights), __VA_ARGS__, 0ULL) void __cap_rights_set(cap_rights_t *rights, ...); Thanks to using one bit as an array index we can assert in those functions that there are no two rights belonging to different array elements provided together. For example this is illegal and will be detected, because CAP_LOOKUP belongs to element 0 and CAP_PDKILL to element 1: cap_rights_init(&rights, CAP_LOOKUP | CAP_PDKILL); Providing several rights that belongs to the same array's element this way is correct, but is not advised. It should only be used for aliases definition. This commit also breaks compatibility with some existing Capsicum system calls, but I see no other way to do that. This should be fine as Capsicum is still experimental and this change is not going to 9.x. Sponsored by: The FreeBSD Foundation
2013-09-05 00:09:56 +00:00
error = fgetvp_exec(td, args->fd,
cap_rights_init(&rights, CAP_FEXECVE), &newtextvp);
if (error)
goto exec_fail;
vn_lock(newtextvp, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG_VNODE1(newtextvp);
imgp->vp = newtextvp;
}
/*
* Check file permissions (also 'opens' file)
*/
error = exec_check_permissions(imgp);
if (error)
goto exec_fail_dealloc;
imgp->object = imgp->vp->v_object;
if (imgp->object != NULL)
vm_object_reference(imgp->object);
/*
* Set VV_TEXT now so no one can write to the executable while we're
* activating it.
*
* Remember if this was set before and unset it in case this is not
* actually an executable image.
*/
textset = VOP_IS_TEXT(imgp->vp);
VOP_SET_TEXT(imgp->vp);
error = exec_map_first_page(imgp);
if (error)
goto exec_fail_dealloc;
imgp->proc->p_osrel = 0;
/*
* If the current process has a special image activator it
* wants to try first, call it. For example, emulating shell
* scripts differently.
*/
error = -1;
if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
error = img_first(imgp);
/*
* Loop through the list of image activators, calling each one.
* An activator returns -1 if there is no match, 0 on success,
* and an error otherwise.
*/
for (i = 0; error == -1 && execsw[i]; ++i) {
if (execsw[i]->ex_imgact == NULL ||
execsw[i]->ex_imgact == img_first) {
continue;
}
error = (*execsw[i]->ex_imgact)(imgp);
}
if (error) {
if (error == -1) {
if (textset == 0)
VOP_UNSET_TEXT(imgp->vp);
error = ENOEXEC;
}
goto exec_fail_dealloc;
}
/*
* Special interpreter operation, cleanup and loop up to try to
* activate the interpreter.
*/
if (imgp->interpreted) {
exec_unmap_first_page(imgp);
/*
* VV_TEXT needs to be unset for scripts. There is a short
* period before we determine that something is a script where
* VV_TEXT will be set. The vnode lock is held over this
* entire period so nothing should illegitimately be blocked.
*/
VOP_UNSET_TEXT(imgp->vp);
/* free name buffer and old vnode */
if (args->fname != NULL)
NDFREE(&nd, NDF_ONLY_PNBUF);
#ifdef MAC
mac_execve_interpreter_enter(newtextvp, &interpvplabel);
#endif
if (imgp->opened) {
VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
imgp->opened = 0;
}
vput(newtextvp);
vm_object_deallocate(imgp->object);
imgp->object = NULL;
/* set new name to that of the interpreter */
NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
UIO_SYSSPACE, imgp->interpreter_name, td);
args->fname = imgp->interpreter_name;
goto interpret;
}
/*
* NB: We unlock the vnode here because it is believed that none
* of the sv_copyout_strings/sv_fixup operations require the vnode.
*/
VOP_UNLOCK(imgp->vp, 0);
/*
* Do the best to calculate the full path to the image file.
*/
if (imgp->auxargs != NULL &&
((args->fname != NULL && args->fname[0] == '/') ||
vn_fullpath(td, imgp->vp, &imgp->execpath, &imgp->freepath) != 0))
imgp->execpath = args->fname;
if (disallow_high_osrel &&
P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
error = ENOEXEC;
uprintf("Osrel %d for image %s too high\n", p->p_osrel,
imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
goto exec_fail_dealloc;
}
/* ABI enforces the use of Capsicum. Switch into capabilities mode. */
if (SV_PROC_FLAG(p, SV_CAPSICUM))
sys_cap_enter(td, NULL);
/*
* Copy out strings (args and env) and initialize stack base
*/
if (p->p_sysent->sv_copyout_strings)
stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
else
stack_base = exec_copyout_strings(imgp);
/*
* If custom stack fixup routine present for this process
* let it do the stack setup.
* Else stuff argument count as first item on stack
*/
if (p->p_sysent->sv_fixup != NULL)
(*p->p_sysent->sv_fixup)(&stack_base, imgp);
else
suword(--stack_base, imgp->args->argc);
Implement CloudABI's exec() call. Summary: In a runtime that is purely based on capability-based security, there is a strong emphasis on how programs start their execution. We need to make sure that we execute an new program with an exact set of file descriptors, ensuring that credentials are not leaked into the process accidentally. Providing the right file descriptors is just half the problem. There also needs to be a framework in place that gives meaning to these file descriptors. How does a CloudABI mail server know which of the file descriptors corresponds to the socket that receives incoming emails? Furthermore, how will this mail server acquire its configuration parameters, as it cannot open a configuration file from a global path on disk? CloudABI solves this problem by replacing traditional string command line arguments by tree-like data structure consisting of scalars, sequences and mappings (similar to YAML/JSON). In this structure, file descriptors are treated as a first-class citizen. When calling exec(), file descriptors are passed on to the new executable if and only if they are referenced from this tree structure. See the cloudabi-run(1) man page for more details and examples (sysutils/cloudabi-utils). Fortunately, the kernel does not need to care about this tree structure at all. The C library is responsible for serializing and deserializing, but also for extracting the list of referenced file descriptors. The system call only receives a copy of the serialized data and a layout of what the new file descriptor table should look like: int proc_exec(int execfd, const void *data, size_t datalen, const int *fds, size_t fdslen); This change introduces a set of fd*_remapped() functions: - fdcopy_remapped() pulls a copy of a file descriptor table, remapping all of the file descriptors according to the provided mapping table. - fdinstall_remapped() replaces the file descriptor table of the process by the copy created by fdcopy_remapped(). - fdescfree_remapped() frees the table in case we aborted before fdinstall_remapped(). We then add a function exec_copyin_data_fds() that builds on top these functions. It copies in the data and constructs a new remapped file descriptor. This is used by cloudabi_sys_proc_exec(). Test Plan: cloudabi-run(1) is capable of spawning processes successfully, providing it data and file descriptors. procstat -f seems to confirm all is good. Regular FreeBSD processes also work properly. Reviewers: kib, mjg Reviewed By: mjg Subscribers: imp Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
if (args->fdp != NULL) {
/* Install a brand new file descriptor table. */
fdinstall_remapped(td, args->fdp);
args->fdp = NULL;
} else {
/*
* Keep on using the existing file descriptor table. For
* security and other reasons, the file descriptor table
* cannot be shared after an exec.
*/
fdunshare(td);
/* close files on exec */
fdcloseexec(td);
}
/*
* Malloc things before we need locks.
*/
i = imgp->args->begin_envv - imgp->args->begin_argv;
/* Cache arguments if they fit inside our allowance */
if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
newargs = pargs_alloc(i);
bcopy(imgp->args->begin_argv, newargs->ar_args, i);
}
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
/*
* For security and other reasons, signal handlers cannot
2001-10-09 17:25:30 +00:00
* be shared after an exec. The new process gets a copy of the old
* handlers. In execsigs(), the new process will have its signals
* reset.
*/
if (sigacts_shared(p->p_sigacts)) {
oldsigacts = p->p_sigacts;
newsigacts = sigacts_alloc();
sigacts_copy(newsigacts, oldsigacts);
} else {
oldsigacts = NULL;
newsigacts = NULL; /* satisfy gcc */
}
PROC_LOCK(p);
if (oldsigacts)
p->p_sigacts = newsigacts;
oldcred = p->p_ucred;
/* Stop profiling */
stopprofclock(p);
/* reset caught signals */
execsigs(p);
/* name this process - nameiexec(p, ndp) */
bzero(p->p_comm, sizeof(p->p_comm));
if (args->fname)
bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
#ifdef KTR
sched_clear_tdname(td);
#endif
1995-05-30 08:16:23 +00:00
/*
* mark as execed, wakeup the process that vforked (if any) and tell
* it that it now has its own resources back
*/
p->p_flag |= P_EXEC;
if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
p->p_flag2 &= ~P2_NOTRACE;
if (p->p_flag & P_PPWAIT) {
p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
cv_broadcast(&p->p_pwait);
}
1995-05-30 08:16:23 +00:00
/*
* Implement image setuid/setgid.
*
* Don't honor setuid/setgid if the filesystem prohibits it or if
* the process is being traced.
*
* We disable setuid/setgid/etc in compatibility mode on the basis
* that most setugid applications are not written with that
* environment in mind, and will therefore almost certainly operate
* incorrectly. In principle there's no reason that setugid
* applications might not be useful in capability mode, so we may want
* to reconsider this conservative design choice in the future.
*
* XXXMAC: For the time being, use NOSUID to also prohibit
* transitions on the file system.
*/
credential_changing = 0;
credential_changing |= (attr.va_mode & S_ISUID) && oldcred->cr_uid !=
attr.va_uid;
credential_changing |= (attr.va_mode & S_ISGID) && oldcred->cr_gid !=
attr.va_gid;
#ifdef MAC
will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
interpvplabel, imgp);
credential_changing |= will_transition;
#endif
if (credential_changing &&
#ifdef CAPABILITY_MODE
((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
#endif
(imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
(p->p_flag & P_TRACED) == 0) {
/*
* Turn off syscall tracing for set-id programs, except for
* root. Record any set-id flags first to make sure that
* we do not regain any tracing during a possible block.
*/
setsugid(p);
#ifdef KTRACE
if (p->p_tracecred != NULL &&
priv_check_cred(p->p_tracecred, PRIV_DEBUG_DIFFCRED, 0))
ktrprocexec(p, &tracecred, &tracevp);
#endif
/*
* Close any file descriptors 0..2 that reference procfs,
* then make sure file descriptors 0..2 are in use.
*
* Both fdsetugidsafety() and fdcheckstd() may call functions
* taking sleepable locks, so temporarily drop our locks.
*/
PROC_UNLOCK(p);
VOP_UNLOCK(imgp->vp, 0);
fdsetugidsafety(td);
error = fdcheckstd(td);
if (error != 0)
goto done1;
newcred = crdup(oldcred);
euip = uifind(attr.va_uid);
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
PROC_LOCK(p);
/*
* Set the new credentials.
*/
if (attr.va_mode & S_ISUID)
change_euid(newcred, euip);
if (attr.va_mode & S_ISGID)
o Merge contents of struct pcred into struct ucred. Specifically, add the real uid, saved uid, real gid, and saved gid to ucred, as well as the pcred->pc_uidinfo, which was associated with the real uid, only rename it to cr_ruidinfo so as not to conflict with cr_uidinfo, which corresponds to the effective uid. o Remove p_cred from struct proc; add p_ucred to struct proc, replacing original macro that pointed. p->p_ucred to p->p_cred->pc_ucred. o Universally update code so that it makes use of ucred instead of pcred, p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo, cr_{r,sv}{u,g}id instead of p_*, etc. o Remove pcred0 and its initialization from init_main.c; initialize cr_ruidinfo there. o Restruction many credential modification chunks to always crdup while we figure out locking and optimizations; generally speaking, this means moving to a structure like this: newcred = crdup(oldcred); ... p->p_ucred = newcred; crfree(oldcred); It's not race-free, but better than nothing. There are also races in sys_process.c, all inter-process authorization, fork, exec, and exit. o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid; remove comments indicating that the old arrangement was a problem. o Restructure exec1() a little to use newcred/oldcred arrangement, and use improved uid management primitives. o Clean up exit1() so as to do less work in credential cleanup due to pcred removal. o Clean up fork1() so as to do less work in credential cleanup and allocation. o Clean up ktrcanset() to take into account changes, and move to using suser_xxx() instead of performing a direct uid==0 comparision. o Improve commenting in various kern_prot.c credential modification calls to better document current behavior. In a couple of places, current behavior is a little questionable and we need to check POSIX.1 to make sure it's "right". More commenting work still remains to be done. o Update credential management calls, such as crfree(), to take into account new ruidinfo reference. o Modify or add the following uid and gid helper routines: change_euid() change_egid() change_ruid() change_rgid() change_svuid() change_svgid() In each case, the call now acts on a credential not a process, and as such no longer requires more complicated process locking/etc. They now assume the caller will do any necessary allocation of an exclusive credential reference. Each is commented to document its reference requirements. o CANSIGIO() is simplified to require only credentials, not processes and pcreds. o Remove lots of (p_pcred==NULL) checks. o Add an XXX to authorization code in nfs_lock.c, since it's questionable, and needs to be considered carefully. o Simplify posix4 authorization code to require only credentials, not processes and pcreds. Note that this authorization, as well as CANSIGIO(), needs to be updated to use the p_cansignal() and p_cansched() centralized authorization routines, as they currently do not take into account some desirable restrictions that are handled by the centralized routines, as well as being inconsistent with other similar authorization instances. o Update libkvm to take these changes into account. Obtained from: TrustedBSD Project Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
change_egid(newcred, attr.va_gid);
#ifdef MAC
if (will_transition) {
mac_vnode_execve_transition(oldcred, newcred, imgp->vp,
interpvplabel, imgp);
}
#endif
/*
* Implement correct POSIX saved-id behavior.
*
* XXXMAC: Note that the current logic will save the
* uid and gid if a MAC domain transition occurs, even
* though maybe it shouldn't.
*/
change_svuid(newcred, newcred->cr_uid);
change_svgid(newcred, newcred->cr_gid);
proc_set_cred(p, newcred);
} else {
o Merge contents of struct pcred into struct ucred. Specifically, add the real uid, saved uid, real gid, and saved gid to ucred, as well as the pcred->pc_uidinfo, which was associated with the real uid, only rename it to cr_ruidinfo so as not to conflict with cr_uidinfo, which corresponds to the effective uid. o Remove p_cred from struct proc; add p_ucred to struct proc, replacing original macro that pointed. p->p_ucred to p->p_cred->pc_ucred. o Universally update code so that it makes use of ucred instead of pcred, p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo, cr_{r,sv}{u,g}id instead of p_*, etc. o Remove pcred0 and its initialization from init_main.c; initialize cr_ruidinfo there. o Restruction many credential modification chunks to always crdup while we figure out locking and optimizations; generally speaking, this means moving to a structure like this: newcred = crdup(oldcred); ... p->p_ucred = newcred; crfree(oldcred); It's not race-free, but better than nothing. There are also races in sys_process.c, all inter-process authorization, fork, exec, and exit. o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid; remove comments indicating that the old arrangement was a problem. o Restructure exec1() a little to use newcred/oldcred arrangement, and use improved uid management primitives. o Clean up exit1() so as to do less work in credential cleanup due to pcred removal. o Clean up fork1() so as to do less work in credential cleanup and allocation. o Clean up ktrcanset() to take into account changes, and move to using suser_xxx() instead of performing a direct uid==0 comparision. o Improve commenting in various kern_prot.c credential modification calls to better document current behavior. In a couple of places, current behavior is a little questionable and we need to check POSIX.1 to make sure it's "right". More commenting work still remains to be done. o Update credential management calls, such as crfree(), to take into account new ruidinfo reference. o Modify or add the following uid and gid helper routines: change_euid() change_egid() change_ruid() change_rgid() change_svuid() change_svgid() In each case, the call now acts on a credential not a process, and as such no longer requires more complicated process locking/etc. They now assume the caller will do any necessary allocation of an exclusive credential reference. Each is commented to document its reference requirements. o CANSIGIO() is simplified to require only credentials, not processes and pcreds. o Remove lots of (p_pcred==NULL) checks. o Add an XXX to authorization code in nfs_lock.c, since it's questionable, and needs to be considered carefully. o Simplify posix4 authorization code to require only credentials, not processes and pcreds. Note that this authorization, as well as CANSIGIO(), needs to be updated to use the p_cansignal() and p_cansched() centralized authorization routines, as they currently do not take into account some desirable restrictions that are handled by the centralized routines, as well as being inconsistent with other similar authorization instances. o Update libkvm to take these changes into account. Obtained from: TrustedBSD Project Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
if (oldcred->cr_uid == oldcred->cr_ruid &&
oldcred->cr_gid == oldcred->cr_rgid)
p->p_flag &= ~P_SUGID;
/*
* Implement correct POSIX saved-id behavior.
*
* XXX: It's not clear that the existing behavior is
* POSIX-compliant. A number of sources indicate that the
* saved uid/gid should only be updated if the new ruid is
* not equal to the old ruid, or the new euid is not equal
* to the old euid and the new euid is not equal to the old
* ruid. The FreeBSD code always updates the saved uid/gid.
* Also, this code uses the new (replaced) euid and egid as
* the source, which may or may not be the right ones to use.
*/
if (oldcred->cr_svuid != oldcred->cr_uid ||
oldcred->cr_svgid != oldcred->cr_gid) {
PROC_UNLOCK(p);
VOP_UNLOCK(imgp->vp, 0);
newcred = crdup(oldcred);
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
PROC_LOCK(p);
change_svuid(newcred, newcred->cr_uid);
change_svgid(newcred, newcred->cr_gid);
proc_set_cred(p, newcred);
}
o Merge contents of struct pcred into struct ucred. Specifically, add the real uid, saved uid, real gid, and saved gid to ucred, as well as the pcred->pc_uidinfo, which was associated with the real uid, only rename it to cr_ruidinfo so as not to conflict with cr_uidinfo, which corresponds to the effective uid. o Remove p_cred from struct proc; add p_ucred to struct proc, replacing original macro that pointed. p->p_ucred to p->p_cred->pc_ucred. o Universally update code so that it makes use of ucred instead of pcred, p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo, cr_{r,sv}{u,g}id instead of p_*, etc. o Remove pcred0 and its initialization from init_main.c; initialize cr_ruidinfo there. o Restruction many credential modification chunks to always crdup while we figure out locking and optimizations; generally speaking, this means moving to a structure like this: newcred = crdup(oldcred); ... p->p_ucred = newcred; crfree(oldcred); It's not race-free, but better than nothing. There are also races in sys_process.c, all inter-process authorization, fork, exec, and exit. o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid; remove comments indicating that the old arrangement was a problem. o Restructure exec1() a little to use newcred/oldcred arrangement, and use improved uid management primitives. o Clean up exit1() so as to do less work in credential cleanup due to pcred removal. o Clean up fork1() so as to do less work in credential cleanup and allocation. o Clean up ktrcanset() to take into account changes, and move to using suser_xxx() instead of performing a direct uid==0 comparision. o Improve commenting in various kern_prot.c credential modification calls to better document current behavior. In a couple of places, current behavior is a little questionable and we need to check POSIX.1 to make sure it's "right". More commenting work still remains to be done. o Update credential management calls, such as crfree(), to take into account new ruidinfo reference. o Modify or add the following uid and gid helper routines: change_euid() change_egid() change_ruid() change_rgid() change_svuid() change_svgid() In each case, the call now acts on a credential not a process, and as such no longer requires more complicated process locking/etc. They now assume the caller will do any necessary allocation of an exclusive credential reference. Each is commented to document its reference requirements. o CANSIGIO() is simplified to require only credentials, not processes and pcreds. o Remove lots of (p_pcred==NULL) checks. o Add an XXX to authorization code in nfs_lock.c, since it's questionable, and needs to be considered carefully. o Simplify posix4 authorization code to require only credentials, not processes and pcreds. Note that this authorization, as well as CANSIGIO(), needs to be updated to use the p_cansignal() and p_cansched() centralized authorization routines, as they currently do not take into account some desirable restrictions that are handled by the centralized routines, as well as being inconsistent with other similar authorization instances. o Update libkvm to take these changes into account. Obtained from: TrustedBSD Project Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
}
/*
* Store the vp for use in procfs. This vnode was referenced by namei
* or fgetvp_exec.
*/
oldtextvp = p->p_textvp;
p->p_textvp = newtextvp;
#ifdef KDTRACE_HOOKS
/*
* Tell the DTrace fasttrap provider about the exec if it
* has declared an interest.
*/
if (dtrace_fasttrap_exec)
dtrace_fasttrap_exec(p);
#endif
/*
* Notify others that we exec'd, and clear the P_INEXEC flag
* as we're now a bona fide freshly-execed process.
*/
KNOTE_LOCKED(&p->p_klist, NOTE_EXEC);
p->p_flag &= ~P_INEXEC;
/* clear "fork but no exec" flag, as we _are_ execing */
p->p_acflag &= ~AFORK;
/*
2005-10-04 04:02:33 +00:00
* Free any previous argument cache and replace it with
* the new argument cache, if any.
*/
oldargs = p->p_args;
p->p_args = newargs;
newargs = NULL;
#ifdef HWPMC_HOOKS
/*
* Check if system-wide sampling is in effect or if the
* current process is using PMCs. If so, do exec() time
* processing. This processing needs to happen AFTER the
* P_INEXEC flag is cleared.
*
* The proc lock needs to be released before taking the PMC
* SX.
*/
if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
PROC_UNLOCK(p);
VOP_UNLOCK(imgp->vp, 0);
pe.pm_credentialschanged = credential_changing;
pe.pm_entryaddr = imgp->entry_addr;
PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
} else
PROC_UNLOCK(p);
#else /* !HWPMC_HOOKS */
PROC_UNLOCK(p);
#endif
/* Set values passed into the program in registers. */
if (p->p_sysent->sv_setregs)
(*p->p_sysent->sv_setregs)(td, imgp,
(u_long)(uintptr_t)stack_base);
else
exec_setregs(td, imgp, (u_long)(uintptr_t)stack_base);
vfs_mark_atime(imgp->vp, td->td_ucred);
SDT_PROBE1(proc, kernel, , exec__success, args->fname);
VOP_UNLOCK(imgp->vp, 0);
done1:
/*
* Free any resources malloc'd earlier that we didn't use.
*/
if (euip != NULL)
uifree(euip);
if (newcred != NULL)
crfree(oldcred);
/*
* Handle deferred decrement of ref counts.
*/
if (oldtextvp != NULL)
vrele(oldtextvp);
#ifdef KTRACE
if (tracevp != NULL)
vrele(tracevp);
if (tracecred != NULL)
crfree(tracecred);
#endif
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
pargs_drop(oldargs);
pargs_drop(newargs);
if (oldsigacts != NULL)
sigacts_free(oldsigacts);
exec_fail_dealloc:
/*
* free various allocated resources
*/
if (imgp->firstpage != NULL)
exec_unmap_first_page(imgp);
if (imgp->vp != NULL) {
if (args->fname)
NDFREE(&nd, NDF_ONLY_PNBUF);
if (imgp->opened)
VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
if (error != 0)
vput(imgp->vp);
else
VOP_UNLOCK(imgp->vp, 0);
}
if (imgp->object != NULL)
vm_object_deallocate(imgp->object);
free(imgp->freepath, M_TEMP);
if (error == 0) {
Reorganize syscall entry and leave handling. Extend struct sysvec with three new elements: sv_fetch_syscall_args - the method to fetch syscall arguments from usermode into struct syscall_args. The structure is machine-depended (this might be reconsidered after all architectures are converted). sv_set_syscall_retval - the method to set a return value for usermode from the syscall. It is a generalization of cpu_set_syscall_retval(9) to allow ABIs to override the way to set a return value. sv_syscallnames - the table of syscall names. Use sv_set_syscall_retval in kern_sigsuspend() instead of hardcoding the call to cpu_set_syscall_retval(). The new functions syscallenter(9) and syscallret(9) are provided that use sv_*syscall* pointers and contain the common repeated code from the syscall() implementations for the architecture-specific syscall trap handlers. Syscallenter() fetches arguments, calls syscall implementation from ABI sysent table, and set up return frame. The end of syscall bookkeeping is done by syscallret(). Take advantage of single place for MI syscall handling code and implement ptrace_lwpinfo pl_flags PL_FLAG_SCE, PL_FLAG_SCX and PL_FLAG_EXEC. The SCE and SCX flags notify the debugger that the thread is stopped at syscall entry or return point respectively. The EXEC flag augments SCX and notifies debugger that the process address space was changed by one of exec(2)-family syscalls. The i386, amd64, sparc64, sun4v, powerpc and ia64 syscall()s are changed to use syscallenter()/syscallret(). MIPS and arm are not converted and use the mostly unchanged syscall() implementation. Reviewed by: jhb, marcel, marius, nwhitehorn, stas Tested by: marcel (ia64), marius (sparc64), nwhitehorn (powerpc), stas (mips) MFC after: 1 month
2010-05-23 18:32:02 +00:00
PROC_LOCK(p);
td->td_dbgflags |= TDB_EXEC;
PROC_UNLOCK(p);
/*
* Stop the process here if its stop event mask has
* the S_EXEC bit set.
*/
STOPEVENT(p, S_EXEC, 0);
goto done2;
}
exec_fail:
/* we're done here, clear P_INEXEC */
PROC_LOCK(p);
p->p_flag &= ~P_INEXEC;
PROC_UNLOCK(p);
SDT_PROBE1(proc, kernel, , exec__failure, error);
done2:
#ifdef MAC
mac_execve_exit(imgp);
mac_execve_interpreter_exit(interpvplabel);
#endif
exec_free_args(args);
if (error && imgp->vmspace_destroyed) {
/* sorry, no more process anymore. exit gracefully */
exit1(td, 0, SIGABRT);
/* NOT REACHED */
}
#ifdef KTRACE
if (error == 0)
ktrprocctor(p);
#endif
return (error);
}
int
exec_map_first_page(imgp)
struct image_params *imgp;
{
int rv, i, after, initial_pagein;
vm_page_t ma[VM_INITIAL_PAGEIN];
vm_object_t object;
if (imgp->firstpage != NULL)
exec_unmap_first_page(imgp);
object = imgp->vp->v_object;
if (object == NULL)
return (EACCES);
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WLOCK(object);
#if VM_NRESERVLEVEL > 0
vm_object_color(object, 0);
#endif
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL);
if (ma[0]->valid != VM_PAGE_BITS_ALL) {
if (!vm_pager_has_page(object, 0, NULL, &after)) {
vm_page_lock(ma[0]);
vm_page_free(ma[0]);
vm_page_unlock(ma[0]);
vm_page_xunbusy(ma[0]);
VM_OBJECT_WUNLOCK(object);
return (EIO);
}
initial_pagein = min(after, VM_INITIAL_PAGEIN);
KASSERT(initial_pagein <= object->size,
("%s: initial_pagein %d object->size %ju",
__func__, initial_pagein, (uintmax_t )object->size));
for (i = 1; i < initial_pagein; i++) {
if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
if (ma[i]->valid)
break;
if (vm_page_tryxbusy(ma[i]))
break;
} else {
ma[i] = vm_page_alloc(object, i,
Change the management of cached pages (PQ_CACHE) in two fundamental ways: (1) Cached pages are no longer kept in the object's resident page splay tree and memq. Instead, they are kept in a separate per-object splay tree of cached pages. However, access to this new per-object splay tree is synchronized by the _free_ page queues lock, not to be confused with the heavily contended page queues lock. Consequently, a cached page can be reclaimed by vm_page_alloc(9) without acquiring the object's lock or the page queues lock. This solves a problem independently reported by tegge@ and Isilon. Specifically, they observed the page daemon consuming a great deal of CPU time because of pages bouncing back and forth between the cache queue (PQ_CACHE) and the inactive queue (PQ_INACTIVE). The source of this problem turned out to be a deadlock avoidance strategy employed when selecting a cached page to reclaim in vm_page_select_cache(). However, the root cause was really that reclaiming a cached page required the acquisition of an object lock while the page queues lock was already held. Thus, this change addresses the problem at its root, by eliminating the need to acquire the object's lock. Moreover, keeping cached pages in the object's primary splay tree and memq was, in effect, optimizing for the uncommon case. Cached pages are reclaimed far, far more often than they are reactivated. Instead, this change makes reclamation cheaper, especially in terms of synchronization overhead, and reactivation more expensive, because reactivated pages will have to be reentered into the object's primary splay tree and memq. (2) Cached pages are now stored alongside free pages in the physical memory allocator's buddy queues, increasing the likelihood that large allocations of contiguous physical memory (i.e., superpages) will succeed. Finally, as a result of this change long-standing restrictions on when and where a cached page can be reclaimed and returned by vm_page_alloc(9) are eliminated. Specifically, calls to vm_page_alloc(9) specifying VM_ALLOC_INTERRUPT can now reclaim and return a formerly cached page. Consequently, a call to malloc(9) specifying M_NOWAIT is less likely to fail. Discussed with: many over the course of the summer, including jeff@, Justin Husted @ Isilon, peter@, tegge@ Tested by: an earlier version by kris@ Approved by: re (kensmith)
2007-09-25 06:25:06 +00:00
VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
if (ma[i] == NULL)
break;
}
}
initial_pagein = i;
rv = vm_pager_get_pages(object, ma, initial_pagein, NULL, NULL);
if (rv != VM_PAGER_OK) {
for (i = 0; i < initial_pagein; i++) {
vm_page_lock(ma[i]);
vm_page_free(ma[i]);
vm_page_unlock(ma[i]);
vm_page_xunbusy(ma[i]);
}
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WUNLOCK(object);
return (EIO);
}
for (i = 1; i < initial_pagein; i++)
vm_page_readahead_finish(ma[i]);
}
vm_page_xunbusy(ma[0]);
vm_page_lock(ma[0]);
vm_page_hold(ma[0]);
vm_page_activate(ma[0]);
vm_page_unlock(ma[0]);
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WUNLOCK(object);
imgp->firstpage = sf_buf_alloc(ma[0], 0);
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
return (0);
}
void
exec_unmap_first_page(imgp)
struct image_params *imgp;
{
vm_page_t m;
if (imgp->firstpage != NULL) {
m = sf_buf_page(imgp->firstpage);
sf_buf_free(imgp->firstpage);
imgp->firstpage = NULL;
vm_page_lock(m);
vm_page_unhold(m);
vm_page_unlock(m);
}
}
/*
* Destroy old address space, and allocate a new stack
* The new stack is only SGROWSIZ large because it is grown
* automatically in trap.c.
*/
int
exec_new_vmspace(imgp, sv)
struct image_params *imgp;
struct sysentvec *sv;
{
int error;
struct proc *p = imgp->proc;
struct vmspace *vmspace = p->p_vmspace;
vm_object_t obj;
struct rlimit rlim_stack;
vm_offset_t sv_minuser, stack_addr;
vm_map_t map;
u_long ssiz;
imgp->vmspace_destroyed = 1;
imgp->sysent = sv;
/* May be called with Giant held */
EVENTHANDLER_INVOKE(process_exec, p, imgp);
/*
* Blow away entire process VM, if address space not shared,
* otherwise, create a new VM space so that other threads are
* not disrupted
*/
map = &vmspace->vm_map;
if (map_at_zero)
sv_minuser = sv->sv_minuser;
else
sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv_minuser &&
vm_map_max(map) == sv->sv_maxuser) {
shmexit(vmspace);
pmap_remove_pages(vmspace_pmap(vmspace));
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
} else {
error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
if (error)
return (error);
vmspace = p->p_vmspace;
map = &vmspace->vm_map;
}
/* Map a shared page */
obj = sv->sv_shared_page_obj;
if (obj != NULL) {
vm_object_reference(obj);
error = vm_map_fixed(map, obj, 0,
sv->sv_shared_page_base, sv->sv_shared_page_len,
VM_PROT_READ | VM_PROT_EXECUTE,
VM_PROT_READ | VM_PROT_EXECUTE,
MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
if (error) {
vm_object_deallocate(obj);
return (error);
}
}
/* Allocate a new stack */
if (imgp->stack_sz != 0) {
ssiz = trunc_page(imgp->stack_sz);
PROC_LOCK(p);
lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
PROC_UNLOCK(p);
if (ssiz > rlim_stack.rlim_max)
ssiz = rlim_stack.rlim_max;
if (ssiz > rlim_stack.rlim_cur) {
rlim_stack.rlim_cur = ssiz;
kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
}
} else if (sv->sv_maxssiz != NULL) {
ssiz = *sv->sv_maxssiz;
} else {
ssiz = maxssiz;
}
stack_addr = sv->sv_usrstack - ssiz;
error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz,
obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
sv->sv_stackprot,
VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
if (error)
return (error);
/*
* vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
* are still used to enforce the stack rlimit on the process stack.
*/
vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
vmspace->vm_maxsaddr = (char *)stack_addr;
return (0);
}
/*
* Copy out argument and environment strings from the old process address
* space into the temporary string buffer.
*/
int
exec_copyin_args(struct image_args *args, char *fname,
enum uio_seg segflg, char **argv, char **envv)
{
u_long argp, envp;
int error;
size_t length;
bzero(args, sizeof(*args));
if (argv == NULL)
return (EFAULT);
/*
* Allocate demand-paged memory for the file name, argument, and
* environment strings.
*/
error = exec_alloc_args(args);
if (error != 0)
return (error);
/*
* Copy the file name.
*/
if (fname != NULL) {
args->fname = args->buf;
error = (segflg == UIO_SYSSPACE) ?
copystr(fname, args->fname, PATH_MAX, &length) :
copyinstr(fname, args->fname, PATH_MAX, &length);
if (error != 0)
goto err_exit;
} else
length = 0;
args->begin_argv = args->buf + length;
args->endp = args->begin_argv;
args->stringspace = ARG_MAX;
/*
* extract arguments first
*/
for (;;) {
error = fueword(argv++, &argp);
if (error == -1) {
error = EFAULT;
goto err_exit;
}
if (argp == 0)
break;
error = copyinstr((void *)(uintptr_t)argp, args->endp,
args->stringspace, &length);
if (error != 0) {
if (error == ENAMETOOLONG)
error = E2BIG;
goto err_exit;
}
args->stringspace -= length;
args->endp += length;
args->argc++;
}
args->begin_envv = args->endp;
/*
* extract environment strings
*/
if (envv) {
for (;;) {
error = fueword(envv++, &envp);
if (error == -1) {
error = EFAULT;
goto err_exit;
}
if (envp == 0)
break;
error = copyinstr((void *)(uintptr_t)envp,
args->endp, args->stringspace, &length);
if (error != 0) {
if (error == ENAMETOOLONG)
error = E2BIG;
goto err_exit;
}
args->stringspace -= length;
args->endp += length;
args->envc++;
}
}
return (0);
err_exit:
exec_free_args(args);
return (error);
}
Implement CloudABI's exec() call. Summary: In a runtime that is purely based on capability-based security, there is a strong emphasis on how programs start their execution. We need to make sure that we execute an new program with an exact set of file descriptors, ensuring that credentials are not leaked into the process accidentally. Providing the right file descriptors is just half the problem. There also needs to be a framework in place that gives meaning to these file descriptors. How does a CloudABI mail server know which of the file descriptors corresponds to the socket that receives incoming emails? Furthermore, how will this mail server acquire its configuration parameters, as it cannot open a configuration file from a global path on disk? CloudABI solves this problem by replacing traditional string command line arguments by tree-like data structure consisting of scalars, sequences and mappings (similar to YAML/JSON). In this structure, file descriptors are treated as a first-class citizen. When calling exec(), file descriptors are passed on to the new executable if and only if they are referenced from this tree structure. See the cloudabi-run(1) man page for more details and examples (sysutils/cloudabi-utils). Fortunately, the kernel does not need to care about this tree structure at all. The C library is responsible for serializing and deserializing, but also for extracting the list of referenced file descriptors. The system call only receives a copy of the serialized data and a layout of what the new file descriptor table should look like: int proc_exec(int execfd, const void *data, size_t datalen, const int *fds, size_t fdslen); This change introduces a set of fd*_remapped() functions: - fdcopy_remapped() pulls a copy of a file descriptor table, remapping all of the file descriptors according to the provided mapping table. - fdinstall_remapped() replaces the file descriptor table of the process by the copy created by fdcopy_remapped(). - fdescfree_remapped() frees the table in case we aborted before fdinstall_remapped(). We then add a function exec_copyin_data_fds() that builds on top these functions. It copies in the data and constructs a new remapped file descriptor. This is used by cloudabi_sys_proc_exec(). Test Plan: cloudabi-run(1) is capable of spawning processes successfully, providing it data and file descriptors. procstat -f seems to confirm all is good. Regular FreeBSD processes also work properly. Reviewers: kib, mjg Reviewed By: mjg Subscribers: imp Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
int
exec_copyin_data_fds(struct thread *td, struct image_args *args,
const void *data, size_t datalen, const int *fds, size_t fdslen)
{
struct filedesc *ofdp;
const char *p;
int *kfds;
int error;
memset(args, '\0', sizeof(*args));
ofdp = td->td_proc->p_fd;
if (datalen >= ARG_MAX || fdslen > ofdp->fd_lastfile + 1)
return (E2BIG);
error = exec_alloc_args(args);
if (error != 0)
return (error);
args->begin_argv = args->buf;
args->stringspace = ARG_MAX;
if (datalen > 0) {
/*
* Argument buffer has been provided. Copy it into the
* kernel as a single string and add a terminating null
* byte.
*/
error = copyin(data, args->begin_argv, datalen);
if (error != 0)
goto err_exit;
args->begin_argv[datalen] = '\0';
args->endp = args->begin_argv + datalen + 1;
args->stringspace -= datalen + 1;
/*
* Traditional argument counting. Count the number of
* null bytes.
*/
for (p = args->begin_argv; p < args->endp; ++p)
if (*p == '\0')
++args->argc;
} else {
/* No argument buffer provided. */
args->endp = args->begin_argv;
}
/* There are no environment variables. */
args->begin_envv = args->endp;
/* Create new file descriptor table. */
kfds = malloc(fdslen * sizeof(int), M_TEMP, M_WAITOK);
error = copyin(fds, kfds, fdslen * sizeof(int));
if (error != 0) {
free(kfds, M_TEMP);
goto err_exit;
}
error = fdcopy_remapped(ofdp, kfds, fdslen, &args->fdp);
free(kfds, M_TEMP);
if (error != 0)
goto err_exit;
return (0);
err_exit:
exec_free_args(args);
return (error);
}
/*
* Allocate temporary demand-paged, zero-filled memory for the file name,
* argument, and environment strings. Returns zero if the allocation succeeds
* and ENOMEM otherwise.
*/
int
exec_alloc_args(struct image_args *args)
{
args->buf = (char *)kmap_alloc_wait(exec_map, PATH_MAX + ARG_MAX);
return (args->buf != NULL ? 0 : ENOMEM);
}
void
exec_free_args(struct image_args *args)
{
if (args->buf != NULL) {
kmap_free_wakeup(exec_map, (vm_offset_t)args->buf,
PATH_MAX + ARG_MAX);
args->buf = NULL;
}
if (args->fname_buf != NULL) {
free(args->fname_buf, M_TEMP);
args->fname_buf = NULL;
}
Implement CloudABI's exec() call. Summary: In a runtime that is purely based on capability-based security, there is a strong emphasis on how programs start their execution. We need to make sure that we execute an new program with an exact set of file descriptors, ensuring that credentials are not leaked into the process accidentally. Providing the right file descriptors is just half the problem. There also needs to be a framework in place that gives meaning to these file descriptors. How does a CloudABI mail server know which of the file descriptors corresponds to the socket that receives incoming emails? Furthermore, how will this mail server acquire its configuration parameters, as it cannot open a configuration file from a global path on disk? CloudABI solves this problem by replacing traditional string command line arguments by tree-like data structure consisting of scalars, sequences and mappings (similar to YAML/JSON). In this structure, file descriptors are treated as a first-class citizen. When calling exec(), file descriptors are passed on to the new executable if and only if they are referenced from this tree structure. See the cloudabi-run(1) man page for more details and examples (sysutils/cloudabi-utils). Fortunately, the kernel does not need to care about this tree structure at all. The C library is responsible for serializing and deserializing, but also for extracting the list of referenced file descriptors. The system call only receives a copy of the serialized data and a layout of what the new file descriptor table should look like: int proc_exec(int execfd, const void *data, size_t datalen, const int *fds, size_t fdslen); This change introduces a set of fd*_remapped() functions: - fdcopy_remapped() pulls a copy of a file descriptor table, remapping all of the file descriptors according to the provided mapping table. - fdinstall_remapped() replaces the file descriptor table of the process by the copy created by fdcopy_remapped(). - fdescfree_remapped() frees the table in case we aborted before fdinstall_remapped(). We then add a function exec_copyin_data_fds() that builds on top these functions. It copies in the data and constructs a new remapped file descriptor. This is used by cloudabi_sys_proc_exec(). Test Plan: cloudabi-run(1) is capable of spawning processes successfully, providing it data and file descriptors. procstat -f seems to confirm all is good. Regular FreeBSD processes also work properly. Reviewers: kib, mjg Reviewed By: mjg Subscribers: imp Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
if (args->fdp != NULL)
fdescfree_remapped(args->fdp);
}
/*
* Copy strings out to the new process address space, constructing new arg
* and env vector tables. Return a pointer to the base so that it can be used
* as the initial stack pointer.
*/
register_t *
exec_copyout_strings(imgp)
struct image_params *imgp;
{
int argc, envc;
char **vectp;
char *stringp;
uintptr_t destp;
register_t *stack_base;
struct ps_strings *arginfo;
struct proc *p;
size_t execpath_len;
int szsigcode, szps;
char canary[sizeof(long) * 8];
szps = sizeof(pagesizes[0]) * MAXPAGESIZES;
/*
* Calculate string base and vector table pointers.
Mega-commit for Linux emulator update.. This has been stress tested under netscape-2.0 for Linux running all the Java stuff. The scrollbars are now working, at least on my machine. (whew! :-) I'm uncomfortable with the size of this commit, but it's too inter-dependant to easily seperate out. The main changes: COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386 machine dependent section into the linux emulator itself. The int 0x80 syscall code was almost identical to the lcall 7,0 code and a minor tweak allows them to both be used with the same C code. All kernels can now just modload the lkm and it'll DTRT without having to rebuild the kernel first. Like IBCS2, you can statically compile it in with "options LINUX". A pile of new syscalls implemented, including getdents(), llseek(), readv(), writev(), msync(), personality(). The Linux-ELF libraries want to use some of these. linux_select() now obeys Linux semantics, ie: returns the time remaining of the timeout value rather than leaving it the original value. Quite a few bugs removed, including incorrect arguments being used in syscalls.. eg: mixups between passing the sigset as an int, vs passing it as a pointer and doing a copyin(), missing return values, unhandled cases, SIOC* ioctls, etc. The build for the code has changed. i386/conf/files now knows how to build linux_genassym and generate linux_assym.h on the fly. Supporting changes elsewhere in the kernel: The user-mode signal trampoline has moved from the U area to immediately below the top of the stack (below PS_STRINGS). This allows the different binary emulations to have their own signal trampoline code (which gets rid of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so that the emulator can provide the exact "struct sigcontext *" argument to the program's signal handlers. The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which have the same values as the re-used SA_DISABLE and SA_ONSTACK which are intended for sigaction only. This enables the support of a SA_RESETHAND flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal semantics where the signal handler is reset when it's triggered. makesyscalls.sh no longer appends the struct sysentvec on the end of the generated init_sysent.c code. It's a lot saner to have it in a seperate file rather than trying to update the structure inside the awk script. :-) At exec time, the dozen bytes or so of signal trampoline code are copied to the top of the user's stack, rather than obtaining the trampoline code the old way by getting a clone of the parent's user area. This allows Linux and native binaries to freely exec each other without getting trampolines mixed up.
1996-03-02 19:38:20 +00:00
* Also deal with signal trampoline code for this exec type.
*/
if (imgp->execpath != NULL && imgp->auxargs != NULL)
execpath_len = strlen(imgp->execpath) + 1;
else
execpath_len = 0;
p = imgp->proc;
szsigcode = 0;
arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
if (p->p_sysent->sv_sigcode_base == 0) {
if (p->p_sysent->sv_szsigcode != NULL)
szsigcode = *(p->p_sysent->sv_szsigcode);
}
destp = (uintptr_t)arginfo;
Mega-commit for Linux emulator update.. This has been stress tested under netscape-2.0 for Linux running all the Java stuff. The scrollbars are now working, at least on my machine. (whew! :-) I'm uncomfortable with the size of this commit, but it's too inter-dependant to easily seperate out. The main changes: COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386 machine dependent section into the linux emulator itself. The int 0x80 syscall code was almost identical to the lcall 7,0 code and a minor tweak allows them to both be used with the same C code. All kernels can now just modload the lkm and it'll DTRT without having to rebuild the kernel first. Like IBCS2, you can statically compile it in with "options LINUX". A pile of new syscalls implemented, including getdents(), llseek(), readv(), writev(), msync(), personality(). The Linux-ELF libraries want to use some of these. linux_select() now obeys Linux semantics, ie: returns the time remaining of the timeout value rather than leaving it the original value. Quite a few bugs removed, including incorrect arguments being used in syscalls.. eg: mixups between passing the sigset as an int, vs passing it as a pointer and doing a copyin(), missing return values, unhandled cases, SIOC* ioctls, etc. The build for the code has changed. i386/conf/files now knows how to build linux_genassym and generate linux_assym.h on the fly. Supporting changes elsewhere in the kernel: The user-mode signal trampoline has moved from the U area to immediately below the top of the stack (below PS_STRINGS). This allows the different binary emulations to have their own signal trampoline code (which gets rid of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so that the emulator can provide the exact "struct sigcontext *" argument to the program's signal handlers. The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which have the same values as the re-used SA_DISABLE and SA_ONSTACK which are intended for sigaction only. This enables the support of a SA_RESETHAND flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal semantics where the signal handler is reset when it's triggered. makesyscalls.sh no longer appends the struct sysentvec on the end of the generated init_sysent.c code. It's a lot saner to have it in a seperate file rather than trying to update the structure inside the awk script. :-) At exec time, the dozen bytes or so of signal trampoline code are copied to the top of the user's stack, rather than obtaining the trampoline code the old way by getting a clone of the parent's user area. This allows Linux and native binaries to freely exec each other without getting trampolines mixed up.
1996-03-02 19:38:20 +00:00
/*
* install sigcode
*/
if (szsigcode != 0) {
destp -= szsigcode;
destp = rounddown2(destp, sizeof(void *));
copyout(p->p_sysent->sv_sigcode, (void *)destp, szsigcode);
}
Mega-commit for Linux emulator update.. This has been stress tested under netscape-2.0 for Linux running all the Java stuff. The scrollbars are now working, at least on my machine. (whew! :-) I'm uncomfortable with the size of this commit, but it's too inter-dependant to easily seperate out. The main changes: COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386 machine dependent section into the linux emulator itself. The int 0x80 syscall code was almost identical to the lcall 7,0 code and a minor tweak allows them to both be used with the same C code. All kernels can now just modload the lkm and it'll DTRT without having to rebuild the kernel first. Like IBCS2, you can statically compile it in with "options LINUX". A pile of new syscalls implemented, including getdents(), llseek(), readv(), writev(), msync(), personality(). The Linux-ELF libraries want to use some of these. linux_select() now obeys Linux semantics, ie: returns the time remaining of the timeout value rather than leaving it the original value. Quite a few bugs removed, including incorrect arguments being used in syscalls.. eg: mixups between passing the sigset as an int, vs passing it as a pointer and doing a copyin(), missing return values, unhandled cases, SIOC* ioctls, etc. The build for the code has changed. i386/conf/files now knows how to build linux_genassym and generate linux_assym.h on the fly. Supporting changes elsewhere in the kernel: The user-mode signal trampoline has moved from the U area to immediately below the top of the stack (below PS_STRINGS). This allows the different binary emulations to have their own signal trampoline code (which gets rid of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so that the emulator can provide the exact "struct sigcontext *" argument to the program's signal handlers. The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which have the same values as the re-used SA_DISABLE and SA_ONSTACK which are intended for sigaction only. This enables the support of a SA_RESETHAND flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal semantics where the signal handler is reset when it's triggered. makesyscalls.sh no longer appends the struct sysentvec on the end of the generated init_sysent.c code. It's a lot saner to have it in a seperate file rather than trying to update the structure inside the awk script. :-) At exec time, the dozen bytes or so of signal trampoline code are copied to the top of the user's stack, rather than obtaining the trampoline code the old way by getting a clone of the parent's user area. This allows Linux and native binaries to freely exec each other without getting trampolines mixed up.
1996-03-02 19:38:20 +00:00
/*
* Copy the image path for the rtld.
*/
if (execpath_len != 0) {
destp -= execpath_len;
imgp->execpathp = destp;
copyout(imgp->execpath, (void *)destp, execpath_len);
}
/*
* Prepare the canary for SSP.
*/
arc4rand(canary, sizeof(canary), 0);
destp -= sizeof(canary);
imgp->canary = destp;
copyout(canary, (void *)destp, sizeof(canary));
imgp->canarylen = sizeof(canary);
/*
* Prepare the pagesizes array.
*/
destp -= szps;
destp = rounddown2(destp, sizeof(void *));
imgp->pagesizes = destp;
copyout(pagesizes, (void *)destp, szps);
imgp->pagesizeslen = szps;
destp -= ARG_MAX - imgp->args->stringspace;
destp = rounddown2(destp, sizeof(void *));
/*
* If we have a valid auxargs ptr, prepare some room
* on the stack.
*/
if (imgp->auxargs) {
/*
* 'AT_COUNT*2' is size for the ELF Auxargs data. This is for
* lower compatibility.
*/
imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size :
(AT_COUNT * 2);
/*
* The '+ 2' is for the null pointers at the end of each of
* the arg and env vector sets,and imgp->auxarg_size is room
* for argument of Runtime loader.
*/
vectp = (char **)(destp - (imgp->args->argc +
imgp->args->envc + 2 + imgp->auxarg_size)
* sizeof(char *));
} else {
/*
* The '+ 2' is for the null pointers at the end of each of
* the arg and env vector sets
*/
vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc
+ 2) * sizeof(char *));
}
/*
* vectp also becomes our initial stack base
*/
stack_base = (register_t *)vectp;
stringp = imgp->args->begin_argv;
argc = imgp->args->argc;
envc = imgp->args->envc;
/*
* Copy out strings - arguments and environment.
*/
copyout(stringp, (void *)destp, ARG_MAX - imgp->args->stringspace);
/*
* Fill in "ps_strings" struct for ps, w, etc.
*/
suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
suword32(&arginfo->ps_nargvstr, argc);
/*
* Fill in argument portion of vector table.
*/
for (; argc > 0; --argc) {
suword(vectp++, (long)(intptr_t)destp);
while (*stringp++ != 0)
destp++;
destp++;
}
/* a null vector table pointer separates the argp's from the envp's */
suword(vectp++, 0);
suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
suword32(&arginfo->ps_nenvstr, envc);
/*
* Fill in environment portion of vector table.
*/
for (; envc > 0; --envc) {
suword(vectp++, (long)(intptr_t)destp);
while (*stringp++ != 0)
destp++;
destp++;
}
/* end of vector table is a null pointer */
suword(vectp, 0);
return (stack_base);
}
1994-05-24 10:09:53 +00:00
/*
* Check permissions of file to execute.
* Called with imgp->vp locked.
* Return 0 for success or error code on failure.
1994-05-24 10:09:53 +00:00
*/
int
exec_check_permissions(imgp)
struct image_params *imgp;
1994-05-24 10:09:53 +00:00
{
struct vnode *vp = imgp->vp;
struct vattr *attr = imgp->attr;
struct thread *td;
int error, writecount;
td = curthread;
/* Get file attributes */
error = VOP_GETATTR(vp, attr, td->td_ucred);
if (error)
return (error);
#ifdef MAC
error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
if (error)
return (error);
#endif
/*
* 1) Check if file execution is disabled for the filesystem that
* this file resides on.
* 2) Ensure that at least one execute bit is on. Otherwise, a
* privileged user will always succeed, and we don't want this
* to happen unless the file really is executable.
* 3) Ensure that the file is a regular file.
*/
if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
(attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
(attr->va_type != VREG))
return (EACCES);
1994-05-24 10:09:53 +00:00
/*
* Zero length files can't be exec'd
1994-05-24 10:09:53 +00:00
*/
if (attr->va_size == 0)
return (ENOEXEC);
/*
* Check for execute permission to file based on current credentials.
*/
error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
if (error)
return (error);
/*
* Check number of open-for-writes on the file and deny execution
* if there are any.
*/
error = VOP_GET_WRITECOUNT(vp, &writecount);
if (error != 0)
return (error);
if (writecount != 0)
return (ETXTBSY);
/*
* Call filesystem specific open routine (which does nothing in the
* general case).
*/
error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
if (error == 0)
imgp->opened = 1;
return (error);
1994-05-24 10:09:53 +00:00
}
/*
* Exec handler registration
*/
int
exec_register(execsw_arg)
const struct execsw *execsw_arg;
{
const struct execsw **es, **xs, **newexecsw;
int count = 2; /* New slot and trailing NULL */
if (execsw)
for (es = execsw; *es; es++)
count++;
newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
if (newexecsw == NULL)
return (ENOMEM);
xs = newexecsw;
if (execsw)
for (es = execsw; *es; es++)
*xs++ = *es;
*xs++ = execsw_arg;
*xs = NULL;
if (execsw)
free(execsw, M_TEMP);
execsw = newexecsw;
return (0);
}
int
exec_unregister(execsw_arg)
const struct execsw *execsw_arg;
{
const struct execsw **es, **xs, **newexecsw;
int count = 1;
if (execsw == NULL)
panic("unregister with no handlers left?\n");
for (es = execsw; *es; es++) {
if (*es == execsw_arg)
break;
}
if (*es == NULL)
return (ENOENT);
for (es = execsw; *es; es++)
if (*es != execsw_arg)
count++;
newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
if (newexecsw == NULL)
return (ENOMEM);
xs = newexecsw;
for (es = execsw; *es; es++)
if (*es != execsw_arg)
*xs++ = *es;
*xs = NULL;
if (execsw)
free(execsw, M_TEMP);
execsw = newexecsw;
return (0);
}