1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-24 11:29:10 +00:00
freebsd/sys/kern/kern_proc.c

1244 lines
29 KiB
C
Raw Normal View History

1994-05-24 10:09:53 +00:00
/*
* Copyright (c) 1982, 1986, 1989, 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
* $FreeBSD$
1994-05-24 10:09:53 +00:00
*/
2003-06-11 00:56:59 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_ktrace.h"
#include "opt_kstack_pages.h"
1994-05-24 10:09:53 +00:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
1994-05-24 10:09:53 +00:00
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sysent.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/filedesc.h>
1994-05-24 10:09:53 +00:00
#include <sys/tty.h>
#include <sys/signalvar.h>
#include <sys/sx.h>
#include <sys/user.h>
#include <sys/jail.h>
#ifdef KTRACE
#include <sys/uio.h>
#include <sys/ktrace.h>
#endif
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/uma.h>
#include <machine/critical.h>
1994-05-24 10:09:53 +00:00
MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
MALLOC_DEFINE(M_SESSION, "session", "session header");
static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
static void doenterpgrp(struct proc *, struct pgrp *);
static void orphanpg(struct pgrp *pg);
static void pgadjustjobc(struct pgrp *pgrp, int entering);
static void pgdelete(struct pgrp *);
static int proc_ctor(void *mem, int size, void *arg, int flags);
static void proc_dtor(void *mem, int size, void *arg);
static int proc_init(void *mem, int size, int flags);
static void proc_fini(void *mem, int size);
1994-05-24 10:09:53 +00:00
/*
* Other process lists
*/
struct pidhashhead *pidhashtbl;
u_long pidhash;
struct pgrphashhead *pgrphashtbl;
u_long pgrphash;
struct proclist allproc;
struct proclist zombproc;
struct sx allproc_lock;
struct sx proctree_lock;
struct mtx pargs_ref_lock;
struct mtx ppeers_lock;
uma_zone_t proc_zone;
uma_zone_t ithread_zone;
int kstack_pages = KSTACK_PAGES;
int uarea_pages = UAREA_PAGES;
SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
/*
* Initialize global process hashing structures.
1994-05-24 10:09:53 +00:00
*/
void
procinit()
1994-05-24 10:09:53 +00:00
{
sx_init(&allproc_lock, "allproc");
sx_init(&proctree_lock, "proctree");
mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
LIST_INIT(&allproc);
LIST_INIT(&zombproc);
pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
proc_ctor, proc_dtor, proc_init, proc_fini,
UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uihashinit();
1994-05-24 10:09:53 +00:00
}
/*
* Prepare a proc for use.
*/
static int
proc_ctor(void *mem, int size, void *arg, int flags)
{
struct proc *p;
p = (struct proc *)mem;
return (0);
}
/*
* Reclaim a proc after use.
*/
static void
proc_dtor(void *mem, int size, void *arg)
{
struct proc *p;
struct thread *td;
Refactor a bunch of scheduler code to give basically the same behaviour but with slightly cleaned up interfaces. The KSE structure has become the same as the "per thread scheduler private data" structure. In order to not make the diffs too great one is #defined as the other at this time. The KSE (or td_sched) structure is now allocated per thread and has no allocation code of its own. Concurrency for a KSEGRP is now kept track of via a simple pair of counters rather than using KSE structures as tokens. Since the KSE structure is different in each scheduler, kern_switch.c is now included at the end of each scheduler. Nothing outside the scheduler knows the contents of the KSE (aka td_sched) structure. The fields in the ksegrp structure that are to do with the scheduler's queueing mechanisms are now moved to the kg_sched structure. (per ksegrp scheduler private data structure). In other words how the scheduler queues and keeps track of threads is no-one's business except the scheduler's. This should allow people to write experimental schedulers with completely different internal structuring. A scheduler call sched_set_concurrency(kg, N) has been added that notifies teh scheduler that no more than N threads from that ksegrp should be allowed to be on concurrently scheduled. This is also used to enforce 'fainess' at this time so that a ksegrp with 10000 threads can not swamp a the run queue and force out a process with 1 thread, since the current code will not set the concurrency above NCPU, and both schedulers will not allow more than that many onto the system run queue at a time. Each scheduler should eventualy develop their own methods to do this now that they are effectively separated. Rejig libthr's kernel interface to follow the same code paths as linkse for scope system threads. This has slightly hurt libthr's performance but I will work to recover as much of it as I can. Thread exit code has been cleaned up greatly. exit and exec code now transitions a process back to 'standard non-threaded mode' before taking the next step. Reviewed by: scottl, peter MFC after: 1 week
2004-09-05 02:09:54 +00:00
#ifdef INVARIANTS
struct ksegrp *kg;
Refactor a bunch of scheduler code to give basically the same behaviour but with slightly cleaned up interfaces. The KSE structure has become the same as the "per thread scheduler private data" structure. In order to not make the diffs too great one is #defined as the other at this time. The KSE (or td_sched) structure is now allocated per thread and has no allocation code of its own. Concurrency for a KSEGRP is now kept track of via a simple pair of counters rather than using KSE structures as tokens. Since the KSE structure is different in each scheduler, kern_switch.c is now included at the end of each scheduler. Nothing outside the scheduler knows the contents of the KSE (aka td_sched) structure. The fields in the ksegrp structure that are to do with the scheduler's queueing mechanisms are now moved to the kg_sched structure. (per ksegrp scheduler private data structure). In other words how the scheduler queues and keeps track of threads is no-one's business except the scheduler's. This should allow people to write experimental schedulers with completely different internal structuring. A scheduler call sched_set_concurrency(kg, N) has been added that notifies teh scheduler that no more than N threads from that ksegrp should be allowed to be on concurrently scheduled. This is also used to enforce 'fainess' at this time so that a ksegrp with 10000 threads can not swamp a the run queue and force out a process with 1 thread, since the current code will not set the concurrency above NCPU, and both schedulers will not allow more than that many onto the system run queue at a time. Each scheduler should eventualy develop their own methods to do this now that they are effectively separated. Rejig libthr's kernel interface to follow the same code paths as linkse for scope system threads. This has slightly hurt libthr's performance but I will work to recover as much of it as I can. Thread exit code has been cleaned up greatly. exit and exec code now transitions a process back to 'standard non-threaded mode' before taking the next step. Reviewed by: scottl, peter MFC after: 1 week
2004-09-05 02:09:54 +00:00
#endif
/* INVARIANTS checks go here */
p = (struct proc *)mem;
Refactor a bunch of scheduler code to give basically the same behaviour but with slightly cleaned up interfaces. The KSE structure has become the same as the "per thread scheduler private data" structure. In order to not make the diffs too great one is #defined as the other at this time. The KSE (or td_sched) structure is now allocated per thread and has no allocation code of its own. Concurrency for a KSEGRP is now kept track of via a simple pair of counters rather than using KSE structures as tokens. Since the KSE structure is different in each scheduler, kern_switch.c is now included at the end of each scheduler. Nothing outside the scheduler knows the contents of the KSE (aka td_sched) structure. The fields in the ksegrp structure that are to do with the scheduler's queueing mechanisms are now moved to the kg_sched structure. (per ksegrp scheduler private data structure). In other words how the scheduler queues and keeps track of threads is no-one's business except the scheduler's. This should allow people to write experimental schedulers with completely different internal structuring. A scheduler call sched_set_concurrency(kg, N) has been added that notifies teh scheduler that no more than N threads from that ksegrp should be allowed to be on concurrently scheduled. This is also used to enforce 'fainess' at this time so that a ksegrp with 10000 threads can not swamp a the run queue and force out a process with 1 thread, since the current code will not set the concurrency above NCPU, and both schedulers will not allow more than that many onto the system run queue at a time. Each scheduler should eventualy develop their own methods to do this now that they are effectively separated. Rejig libthr's kernel interface to follow the same code paths as linkse for scope system threads. This has slightly hurt libthr's performance but I will work to recover as much of it as I can. Thread exit code has been cleaned up greatly. exit and exec code now transitions a process back to 'standard non-threaded mode' before taking the next step. Reviewed by: scottl, peter MFC after: 1 week
2004-09-05 02:09:54 +00:00
td = FIRST_THREAD_IN_PROC(p);
#ifdef INVARIANTS
KASSERT((p->p_numthreads == 1),
("bad number of threads in exiting process"));
KASSERT((p->p_numksegrps == 1), ("free proc with > 1 ksegrp"));
td = FIRST_THREAD_IN_PROC(p);
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
kg = FIRST_KSEGRP_IN_PROC(p);
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
Refactor a bunch of scheduler code to give basically the same behaviour but with slightly cleaned up interfaces. The KSE structure has become the same as the "per thread scheduler private data" structure. In order to not make the diffs too great one is #defined as the other at this time. The KSE (or td_sched) structure is now allocated per thread and has no allocation code of its own. Concurrency for a KSEGRP is now kept track of via a simple pair of counters rather than using KSE structures as tokens. Since the KSE structure is different in each scheduler, kern_switch.c is now included at the end of each scheduler. Nothing outside the scheduler knows the contents of the KSE (aka td_sched) structure. The fields in the ksegrp structure that are to do with the scheduler's queueing mechanisms are now moved to the kg_sched structure. (per ksegrp scheduler private data structure). In other words how the scheduler queues and keeps track of threads is no-one's business except the scheduler's. This should allow people to write experimental schedulers with completely different internal structuring. A scheduler call sched_set_concurrency(kg, N) has been added that notifies teh scheduler that no more than N threads from that ksegrp should be allowed to be on concurrently scheduled. This is also used to enforce 'fainess' at this time so that a ksegrp with 10000 threads can not swamp a the run queue and force out a process with 1 thread, since the current code will not set the concurrency above NCPU, and both schedulers will not allow more than that many onto the system run queue at a time. Each scheduler should eventualy develop their own methods to do this now that they are effectively separated. Rejig libthr's kernel interface to follow the same code paths as linkse for scope system threads. This has slightly hurt libthr's performance but I will work to recover as much of it as I can. Thread exit code has been cleaned up greatly. exit and exec code now transitions a process back to 'standard non-threaded mode' before taking the next step. Reviewed by: scottl, peter MFC after: 1 week
2004-09-05 02:09:54 +00:00
#endif
/* Dispose of an alternate kstack, if it exists.
* XXX What if there are more than one thread in the proc?
* The first thread in the proc is special and not
* freed, so you gotta do this here.
*/
if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
vm_thread_dispose_altkstack(td);
}
/*
* Initialize type-stable parts of a proc (when newly created).
*/
static int
proc_init(void *mem, int size, int flags)
{
struct proc *p;
struct thread *td;
struct ksegrp *kg;
p = (struct proc *)mem;
p->p_sched = (struct p_sched *)&p[1];
vm_proc_new(p);
td = thread_alloc();
kg = ksegrp_alloc();
bzero(&p->p_mtx, sizeof(struct mtx));
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
Refactor a bunch of scheduler code to give basically the same behaviour but with slightly cleaned up interfaces. The KSE structure has become the same as the "per thread scheduler private data" structure. In order to not make the diffs too great one is #defined as the other at this time. The KSE (or td_sched) structure is now allocated per thread and has no allocation code of its own. Concurrency for a KSEGRP is now kept track of via a simple pair of counters rather than using KSE structures as tokens. Since the KSE structure is different in each scheduler, kern_switch.c is now included at the end of each scheduler. Nothing outside the scheduler knows the contents of the KSE (aka td_sched) structure. The fields in the ksegrp structure that are to do with the scheduler's queueing mechanisms are now moved to the kg_sched structure. (per ksegrp scheduler private data structure). In other words how the scheduler queues and keeps track of threads is no-one's business except the scheduler's. This should allow people to write experimental schedulers with completely different internal structuring. A scheduler call sched_set_concurrency(kg, N) has been added that notifies teh scheduler that no more than N threads from that ksegrp should be allowed to be on concurrently scheduled. This is also used to enforce 'fainess' at this time so that a ksegrp with 10000 threads can not swamp a the run queue and force out a process with 1 thread, since the current code will not set the concurrency above NCPU, and both schedulers will not allow more than that many onto the system run queue at a time. Each scheduler should eventualy develop their own methods to do this now that they are effectively separated. Rejig libthr's kernel interface to follow the same code paths as linkse for scope system threads. This has slightly hurt libthr's performance but I will work to recover as much of it as I can. Thread exit code has been cleaned up greatly. exit and exec code now transitions a process back to 'standard non-threaded mode' before taking the next step. Reviewed by: scottl, peter MFC after: 1 week
2004-09-05 02:09:54 +00:00
proc_linkup(p, kg, td);
sched_newproc(p, kg, td);
return (0);
}
/*
* UMA should ensure that this function is never called.
* Freeing a proc structure would violate type stability.
*/
static void
proc_fini(void *mem, int size)
{
panic("proc reclaimed");
}
1994-05-24 10:09:53 +00:00
/*
* Is p an inferior of the current process?
*/
2000-06-23 07:10:34 +00:00
int
1994-05-24 10:09:53 +00:00
inferior(p)
register struct proc *p;
{
sx_assert(&proctree_lock, SX_LOCKED);
for (; p != curproc; p = p->p_pptr)
if (p->p_pid == 0)
return (0);
return (1);
1994-05-24 10:09:53 +00:00
}
/*
* Locate a process by number; return only "live" processes -- i.e., neither
* zombies nor newly born but incompletely initialized processes. By not
* returning processes in the PRS_NEW state, we allow callers to avoid
* testing for that condition to avoid dereferencing p_ucred, et al.
1994-05-24 10:09:53 +00:00
*/
struct proc *
pfind(pid)
register pid_t pid;
{
register struct proc *p;
sx_slock(&allproc_lock);
LIST_FOREACH(p, PIDHASH(pid), p_hash)
if (p->p_pid == pid) {
if (p->p_state == PRS_NEW) {
p = NULL;
break;
}
PROC_LOCK(p);
break;
}
2003-01-04 11:45:50 +00:00
sx_sunlock(&allproc_lock);
return (p);
1994-05-24 10:09:53 +00:00
}
/*
* Locate a process group by number.
* The caller must hold proctree_lock.
1994-05-24 10:09:53 +00:00
*/
struct pgrp *
pgfind(pgid)
register pid_t pgid;
{
register struct pgrp *pgrp;
sx_assert(&proctree_lock, SX_LOCKED);
LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
if (pgrp->pg_id == pgid) {
PGRP_LOCK(pgrp);
1994-05-24 10:09:53 +00:00
return (pgrp);
}
}
1994-05-24 10:09:53 +00:00
return (NULL);
}
/*
* Create a new process group.
* pgid must be equal to the pid of p.
* Begin a new session if required.
1994-05-24 10:09:53 +00:00
*/
int
enterpgrp(p, pgid, pgrp, sess)
1994-05-24 10:09:53 +00:00
register struct proc *p;
pid_t pgid;
struct pgrp *pgrp;
struct session *sess;
1994-05-24 10:09:53 +00:00
{
struct pgrp *pgrp2;
sx_assert(&proctree_lock, SX_XLOCKED);
KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
KASSERT(p->p_pid == pgid,
("enterpgrp: new pgrp and pid != pgid"));
pgrp2 = pgfind(pgid);
1994-05-24 10:09:53 +00:00
KASSERT(pgrp2 == NULL,
("enterpgrp: pgrp with pgid exists"));
KASSERT(!SESS_LEADER(p),
1999-01-10 01:58:29 +00:00
("enterpgrp: session leader attempted setpgrp"));
mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
if (sess != NULL) {
1994-05-24 10:09:53 +00:00
/*
* new session
1994-05-24 10:09:53 +00:00
*/
mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
PROC_LOCK(p);
p->p_flag &= ~P_CONTROLT;
PROC_UNLOCK(p);
PGRP_LOCK(pgrp);
sess->s_leader = p;
sess->s_sid = p->p_pid;
sess->s_count = 1;
sess->s_ttyvp = NULL;
sess->s_ttyp = NULL;
bcopy(p->p_session->s_login, sess->s_login,
1994-05-24 10:09:53 +00:00
sizeof(sess->s_login));
pgrp->pg_session = sess;
KASSERT(p == curproc,
("enterpgrp: mksession and p != curproc"));
} else {
pgrp->pg_session = p->p_session;
SESS_LOCK(pgrp->pg_session);
pgrp->pg_session->s_count++;
SESS_UNLOCK(pgrp->pg_session);
PGRP_LOCK(pgrp);
}
pgrp->pg_id = pgid;
LIST_INIT(&pgrp->pg_members);
/*
* As we have an exclusive lock of proctree_lock,
* this should not deadlock.
*/
LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
pgrp->pg_jobc = 0;
SLIST_INIT(&pgrp->pg_sigiolst);
PGRP_UNLOCK(pgrp);
doenterpgrp(p, pgrp);
return (0);
}
/*
* Move p to an existing process group
*/
int
enterthispgrp(p, pgrp)
register struct proc *p;
struct pgrp *pgrp;
{
sx_assert(&proctree_lock, SX_XLOCKED);
PROC_LOCK_ASSERT(p, MA_NOTOWNED);
PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
KASSERT(pgrp->pg_session == p->p_session,
("%s: pgrp's session %p, p->p_session %p.\n",
__func__,
pgrp->pg_session,
p->p_session));
KASSERT(pgrp != p->p_pgrp,
("%s: p belongs to pgrp.", __func__));
doenterpgrp(p, pgrp);
return (0);
}
/*
* Move p to a process group
*/
static void
doenterpgrp(p, pgrp)
struct proc *p;
struct pgrp *pgrp;
{
struct pgrp *savepgrp;
sx_assert(&proctree_lock, SX_XLOCKED);
PROC_LOCK_ASSERT(p, MA_NOTOWNED);
PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
savepgrp = p->p_pgrp;
1994-05-24 10:09:53 +00:00
/*
* Adjust eligibility of affected pgrps to participate in job control.
* Increment eligibility counts before decrementing, otherwise we
* could reach 0 spuriously during the first call.
*/
fixjobc(p, pgrp, 1);
fixjobc(p, p->p_pgrp, 0);
PGRP_LOCK(pgrp);
PGRP_LOCK(savepgrp);
PROC_LOCK(p);
LIST_REMOVE(p, p_pglist);
1994-05-24 10:09:53 +00:00
p->p_pgrp = pgrp;
PROC_UNLOCK(p);
LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
PGRP_UNLOCK(savepgrp);
PGRP_UNLOCK(pgrp);
if (LIST_EMPTY(&savepgrp->pg_members))
pgdelete(savepgrp);
1994-05-24 10:09:53 +00:00
}
/*
* remove process from process group
*/
int
1994-05-24 10:09:53 +00:00
leavepgrp(p)
register struct proc *p;
{
struct pgrp *savepgrp;
1994-05-24 10:09:53 +00:00
sx_assert(&proctree_lock, SX_XLOCKED);
savepgrp = p->p_pgrp;
PGRP_LOCK(savepgrp);
PROC_LOCK(p);
LIST_REMOVE(p, p_pglist);
p->p_pgrp = NULL;
PROC_UNLOCK(p);
PGRP_UNLOCK(savepgrp);
if (LIST_EMPTY(&savepgrp->pg_members))
pgdelete(savepgrp);
1994-05-24 10:09:53 +00:00
return (0);
}
/*
* delete a process group
*/
static void
1994-05-24 10:09:53 +00:00
pgdelete(pgrp)
register struct pgrp *pgrp;
{
struct session *savesess;
2004-06-09 09:29:08 +00:00
int i;
sx_assert(&proctree_lock, SX_XLOCKED);
PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
/*
* Reset any sigio structures pointing to us as a result of
* F_SETOWN with our pgid.
*/
funsetownlst(&pgrp->pg_sigiolst);
PGRP_LOCK(pgrp);
1995-05-30 08:16:23 +00:00
if (pgrp->pg_session->s_ttyp != NULL &&
1994-05-24 10:09:53 +00:00
pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
pgrp->pg_session->s_ttyp->t_pgrp = NULL;
LIST_REMOVE(pgrp, pg_hash);
savesess = pgrp->pg_session;
SESS_LOCK(savesess);
2004-06-09 09:29:08 +00:00
i = --savesess->s_count;
SESS_UNLOCK(savesess);
PGRP_UNLOCK(pgrp);
2004-06-09 09:29:08 +00:00
if (i == 0) {
if (savesess->s_ttyp != NULL)
ttyrel(savesess->s_ttyp);
mtx_destroy(&savesess->s_mtx);
FREE(savesess, M_SESSION);
}
mtx_destroy(&pgrp->pg_mtx);
1994-05-24 10:09:53 +00:00
FREE(pgrp, M_PGRP);
}
static void
pgadjustjobc(pgrp, entering)
struct pgrp *pgrp;
int entering;
{
PGRP_LOCK(pgrp);
if (entering)
pgrp->pg_jobc++;
else {
--pgrp->pg_jobc;
if (pgrp->pg_jobc == 0)
orphanpg(pgrp);
}
PGRP_UNLOCK(pgrp);
}
1994-05-24 10:09:53 +00:00
/*
* Adjust pgrp jobc counters when specified process changes process group.
* We count the number of processes in each process group that "qualify"
* the group for terminal job control (those with a parent in a different
* process group of the same session). If that count reaches zero, the
* process group becomes orphaned. Check both the specified process'
* process group and that of its children.
* entering == 0 => p is leaving specified group.
* entering == 1 => p is entering specified group.
*/
void
1994-05-24 10:09:53 +00:00
fixjobc(p, pgrp, entering)
register struct proc *p;
register struct pgrp *pgrp;
int entering;
{
register struct pgrp *hispgrp;
register struct session *mysession;
sx_assert(&proctree_lock, SX_LOCKED);
PROC_LOCK_ASSERT(p, MA_NOTOWNED);
PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
1994-05-24 10:09:53 +00:00
/*
* Check p's parent to see whether p qualifies its own process
* group; if so, adjust count for p's process group.
*/
mysession = pgrp->pg_session;
1994-05-24 10:09:53 +00:00
if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
hispgrp->pg_session == mysession)
pgadjustjobc(pgrp, entering);
1994-05-24 10:09:53 +00:00
/*
* Check this process' children to see whether they qualify
* their process groups; if so, adjust counts for children's
* process groups.
*/
LIST_FOREACH(p, &p->p_children, p_sibling) {
hispgrp = p->p_pgrp;
if (hispgrp == pgrp ||
hispgrp->pg_session != mysession)
continue;
PROC_LOCK(p);
if (p->p_state == PRS_ZOMBIE) {
PROC_UNLOCK(p);
continue;
}
PROC_UNLOCK(p);
pgadjustjobc(hispgrp, entering);
}
1994-05-24 10:09:53 +00:00
}
1995-05-30 08:16:23 +00:00
/*
1994-05-24 10:09:53 +00:00
* A process group has become orphaned;
* if there are any stopped processes in the group,
* hang-up all process in that group.
*/
static void
orphanpg(pg)
struct pgrp *pg;
{
register struct proc *p;
PGRP_LOCK_ASSERT(pg, MA_OWNED);
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
PROC_LOCK(p);
if (P_SHOULDSTOP(p)) {
PROC_UNLOCK(p);
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
PROC_LOCK(p);
1994-05-24 10:09:53 +00:00
psignal(p, SIGHUP);
psignal(p, SIGCONT);
PROC_UNLOCK(p);
1994-05-24 10:09:53 +00:00
}
return;
}
PROC_UNLOCK(p);
1994-05-24 10:09:53 +00:00
}
}
#include "opt_ddb.h"
#ifdef DDB
#include <ddb/ddb.h>
DB_SHOW_COMMAND(pgrpdump, pgrpdump)
1994-05-24 10:09:53 +00:00
{
register struct pgrp *pgrp;
register struct proc *p;
register int i;
1994-05-24 10:09:53 +00:00
for (i = 0; i <= pgrphash; i++) {
if (!LIST_EMPTY(&pgrphashtbl[i])) {
printf("\tindx %d\n", i);
LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
1998-07-11 07:46:16 +00:00
printf(
"\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
(void *)pgrp, (long)pgrp->pg_id,
(void *)pgrp->pg_session,
pgrp->pg_session->s_count,
(void *)LIST_FIRST(&pgrp->pg_members));
LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1998-07-11 07:46:16 +00:00
printf("\t\tpid %ld addr %p pgrp %p\n",
(long)p->p_pid, (void *)p,
(void *)p->p_pgrp);
}
}
1994-05-24 10:09:53 +00:00
}
}
}
#endif /* DDB */
void
fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp);
/*
* Fill in a kinfo_proc structure for the specified process.
* Must be called with the target process locked.
*/
void
fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
{
fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp);
}
void
fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
{
struct proc *p;
struct thread *td0;
struct ksegrp *kg;
struct tty *tp;
struct session *sp;
struct timeval tv;
struct sigacts *ps;
p = td->td_proc;
bzero(kp, sizeof(*kp));
kp->ki_structsize = sizeof(*kp);
kp->ki_paddr = p;
PROC_LOCK_ASSERT(p, MA_OWNED);
kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
kp->ki_args = p->p_args;
kp->ki_textvp = p->p_textvp;
#ifdef KTRACE
kp->ki_tracep = p->p_tracevp;
mtx_lock(&ktrace_mtx);
kp->ki_traceflag = p->p_traceflag;
mtx_unlock(&ktrace_mtx);
#endif
kp->ki_fd = p->p_fd;
kp->ki_vmspace = p->p_vmspace;
o Merge contents of struct pcred into struct ucred. Specifically, add the real uid, saved uid, real gid, and saved gid to ucred, as well as the pcred->pc_uidinfo, which was associated with the real uid, only rename it to cr_ruidinfo so as not to conflict with cr_uidinfo, which corresponds to the effective uid. o Remove p_cred from struct proc; add p_ucred to struct proc, replacing original macro that pointed. p->p_ucred to p->p_cred->pc_ucred. o Universally update code so that it makes use of ucred instead of pcred, p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo, cr_{r,sv}{u,g}id instead of p_*, etc. o Remove pcred0 and its initialization from init_main.c; initialize cr_ruidinfo there. o Restruction many credential modification chunks to always crdup while we figure out locking and optimizations; generally speaking, this means moving to a structure like this: newcred = crdup(oldcred); ... p->p_ucred = newcred; crfree(oldcred); It's not race-free, but better than nothing. There are also races in sys_process.c, all inter-process authorization, fork, exec, and exit. o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid; remove comments indicating that the old arrangement was a problem. o Restructure exec1() a little to use newcred/oldcred arrangement, and use improved uid management primitives. o Clean up exit1() so as to do less work in credential cleanup due to pcred removal. o Clean up fork1() so as to do less work in credential cleanup and allocation. o Clean up ktrcanset() to take into account changes, and move to using suser_xxx() instead of performing a direct uid==0 comparision. o Improve commenting in various kern_prot.c credential modification calls to better document current behavior. In a couple of places, current behavior is a little questionable and we need to check POSIX.1 to make sure it's "right". More commenting work still remains to be done. o Update credential management calls, such as crfree(), to take into account new ruidinfo reference. o Modify or add the following uid and gid helper routines: change_euid() change_egid() change_ruid() change_rgid() change_svuid() change_svgid() In each case, the call now acts on a credential not a process, and as such no longer requires more complicated process locking/etc. They now assume the caller will do any necessary allocation of an exclusive credential reference. Each is commented to document its reference requirements. o CANSIGIO() is simplified to require only credentials, not processes and pcreds. o Remove lots of (p_pcred==NULL) checks. o Add an XXX to authorization code in nfs_lock.c, since it's questionable, and needs to be considered carefully. o Simplify posix4 authorization code to require only credentials, not processes and pcreds. Note that this authorization, as well as CANSIGIO(), needs to be updated to use the p_cansignal() and p_cansched() centralized authorization routines, as they currently do not take into account some desirable restrictions that are handled by the centralized routines, as well as being inconsistent with other similar authorization instances. o Update libkvm to take these changes into account. Obtained from: TrustedBSD Project Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
if (p->p_ucred) {
kp->ki_uid = p->p_ucred->cr_uid;
kp->ki_ruid = p->p_ucred->cr_ruid;
kp->ki_svuid = p->p_ucred->cr_svuid;
/* XXX bde doesn't like KI_NGROUPS */
kp->ki_ngroups = min(p->p_ucred->cr_ngroups, KI_NGROUPS);
o Merge contents of struct pcred into struct ucred. Specifically, add the real uid, saved uid, real gid, and saved gid to ucred, as well as the pcred->pc_uidinfo, which was associated with the real uid, only rename it to cr_ruidinfo so as not to conflict with cr_uidinfo, which corresponds to the effective uid. o Remove p_cred from struct proc; add p_ucred to struct proc, replacing original macro that pointed. p->p_ucred to p->p_cred->pc_ucred. o Universally update code so that it makes use of ucred instead of pcred, p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo, cr_{r,sv}{u,g}id instead of p_*, etc. o Remove pcred0 and its initialization from init_main.c; initialize cr_ruidinfo there. o Restruction many credential modification chunks to always crdup while we figure out locking and optimizations; generally speaking, this means moving to a structure like this: newcred = crdup(oldcred); ... p->p_ucred = newcred; crfree(oldcred); It's not race-free, but better than nothing. There are also races in sys_process.c, all inter-process authorization, fork, exec, and exit. o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid; remove comments indicating that the old arrangement was a problem. o Restructure exec1() a little to use newcred/oldcred arrangement, and use improved uid management primitives. o Clean up exit1() so as to do less work in credential cleanup due to pcred removal. o Clean up fork1() so as to do less work in credential cleanup and allocation. o Clean up ktrcanset() to take into account changes, and move to using suser_xxx() instead of performing a direct uid==0 comparision. o Improve commenting in various kern_prot.c credential modification calls to better document current behavior. In a couple of places, current behavior is a little questionable and we need to check POSIX.1 to make sure it's "right". More commenting work still remains to be done. o Update credential management calls, such as crfree(), to take into account new ruidinfo reference. o Modify or add the following uid and gid helper routines: change_euid() change_egid() change_ruid() change_rgid() change_svuid() change_svgid() In each case, the call now acts on a credential not a process, and as such no longer requires more complicated process locking/etc. They now assume the caller will do any necessary allocation of an exclusive credential reference. Each is commented to document its reference requirements. o CANSIGIO() is simplified to require only credentials, not processes and pcreds. o Remove lots of (p_pcred==NULL) checks. o Add an XXX to authorization code in nfs_lock.c, since it's questionable, and needs to be considered carefully. o Simplify posix4 authorization code to require only credentials, not processes and pcreds. Note that this authorization, as well as CANSIGIO(), needs to be updated to use the p_cansignal() and p_cansched() centralized authorization routines, as they currently do not take into account some desirable restrictions that are handled by the centralized routines, as well as being inconsistent with other similar authorization instances. o Update libkvm to take these changes into account. Obtained from: TrustedBSD Project Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
bcopy(p->p_ucred->cr_groups, kp->ki_groups,
kp->ki_ngroups * sizeof(gid_t));
o Merge contents of struct pcred into struct ucred. Specifically, add the real uid, saved uid, real gid, and saved gid to ucred, as well as the pcred->pc_uidinfo, which was associated with the real uid, only rename it to cr_ruidinfo so as not to conflict with cr_uidinfo, which corresponds to the effective uid. o Remove p_cred from struct proc; add p_ucred to struct proc, replacing original macro that pointed. p->p_ucred to p->p_cred->pc_ucred. o Universally update code so that it makes use of ucred instead of pcred, p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo, cr_{r,sv}{u,g}id instead of p_*, etc. o Remove pcred0 and its initialization from init_main.c; initialize cr_ruidinfo there. o Restruction many credential modification chunks to always crdup while we figure out locking and optimizations; generally speaking, this means moving to a structure like this: newcred = crdup(oldcred); ... p->p_ucred = newcred; crfree(oldcred); It's not race-free, but better than nothing. There are also races in sys_process.c, all inter-process authorization, fork, exec, and exit. o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid; remove comments indicating that the old arrangement was a problem. o Restructure exec1() a little to use newcred/oldcred arrangement, and use improved uid management primitives. o Clean up exit1() so as to do less work in credential cleanup due to pcred removal. o Clean up fork1() so as to do less work in credential cleanup and allocation. o Clean up ktrcanset() to take into account changes, and move to using suser_xxx() instead of performing a direct uid==0 comparision. o Improve commenting in various kern_prot.c credential modification calls to better document current behavior. In a couple of places, current behavior is a little questionable and we need to check POSIX.1 to make sure it's "right". More commenting work still remains to be done. o Update credential management calls, such as crfree(), to take into account new ruidinfo reference. o Modify or add the following uid and gid helper routines: change_euid() change_egid() change_ruid() change_rgid() change_svuid() change_svgid() In each case, the call now acts on a credential not a process, and as such no longer requires more complicated process locking/etc. They now assume the caller will do any necessary allocation of an exclusive credential reference. Each is commented to document its reference requirements. o CANSIGIO() is simplified to require only credentials, not processes and pcreds. o Remove lots of (p_pcred==NULL) checks. o Add an XXX to authorization code in nfs_lock.c, since it's questionable, and needs to be considered carefully. o Simplify posix4 authorization code to require only credentials, not processes and pcreds. Note that this authorization, as well as CANSIGIO(), needs to be updated to use the p_cansignal() and p_cansched() centralized authorization routines, as they currently do not take into account some desirable restrictions that are handled by the centralized routines, as well as being inconsistent with other similar authorization instances. o Update libkvm to take these changes into account. Obtained from: TrustedBSD Project Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
kp->ki_rgid = p->p_ucred->cr_rgid;
kp->ki_svgid = p->p_ucred->cr_svgid;
}
if (p->p_sigacts) {
ps = p->p_sigacts;
mtx_lock(&ps->ps_mtx);
kp->ki_sigignore = ps->ps_sigignore;
kp->ki_sigcatch = ps->ps_sigcatch;
mtx_unlock(&ps->ps_mtx);
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock_spin(&sched_lock);
if (p->p_state != PRS_NEW &&
p->p_state != PRS_ZOMBIE &&
p->p_vmspace != NULL) {
struct vmspace *vm = p->p_vmspace;
kp->ki_size = vm->vm_map.size;
kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
if (p->p_sflag & PS_INMEM)
kp->ki_rssize += UAREA_PAGES;
FOREACH_THREAD_IN_PROC(p, td0) {
if (!TD_IS_SWAPPED(td0))
kp->ki_rssize += td0->td_kstack_pages;
if (td0->td_altkstack_obj != NULL)
kp->ki_rssize += td0->td_altkstack_pages;
}
kp->ki_swrss = vm->vm_swrss;
kp->ki_tsize = vm->vm_tsize;
kp->ki_dsize = vm->vm_dsize;
kp->ki_ssize = vm->vm_ssize;
}
if ((p->p_sflag & PS_INMEM) && p->p_stats) {
kp->ki_start = p->p_stats->p_start;
timevaladd(&kp->ki_start, &boottime);
kp->ki_rusage = p->p_stats->p_ru;
calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime,
NULL);
kp->ki_childstime = p->p_stats->p_cru.ru_stime;
kp->ki_childutime = p->p_stats->p_cru.ru_utime;
/* Some callers want child-times in a single value */
kp->ki_childtime = kp->ki_childstime;
timevaladd(&kp->ki_childtime, &kp->ki_childutime);
}
kp->ki_sflag = p->p_sflag;
kp->ki_swtime = p->p_swtime;
kp->ki_pid = p->p_pid;
kp->ki_nice = p->p_nice;
bintime2timeval(&p->p_runtime, &tv);
kp->ki_runtime = tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
if (p->p_state != PRS_ZOMBIE) {
#if 0
if (td == NULL) {
/* XXXKSE: This should never happen. */
printf("fill_kinfo_proc(): pid %d has no threads!\n",
p->p_pid);
mtx_unlock_spin(&sched_lock);
return;
}
#endif
if (td->td_wmesg != NULL) {
strlcpy(kp->ki_wmesg, td->td_wmesg,
sizeof(kp->ki_wmesg));
}
if (TD_ON_LOCK(td)) {
kp->ki_kiflag |= KI_LOCKBLOCK;
strlcpy(kp->ki_lockname, td->td_lockname,
sizeof(kp->ki_lockname));
}
if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
if (TD_ON_RUNQ(td) ||
TD_CAN_RUN(td) ||
TD_IS_RUNNING(td)) {
kp->ki_stat = SRUN;
} else if (P_SHOULDSTOP(p)) {
kp->ki_stat = SSTOP;
} else if (TD_IS_SLEEPING(td)) {
kp->ki_stat = SSLEEP;
} else if (TD_ON_LOCK(td)) {
kp->ki_stat = SLOCK;
} else {
kp->ki_stat = SWAIT;
}
} else {
kp->ki_stat = SIDL;
}
kg = td->td_ksegrp;
/* things in the KSE GROUP */
kp->ki_estcpu = kg->kg_estcpu;
kp->ki_slptime = kg->kg_slptime;
kp->ki_pri.pri_user = kg->kg_user_pri;
kp->ki_pri.pri_class = kg->kg_pri_class;
/* Things in the thread */
kp->ki_wchan = td->td_wchan;
kp->ki_pri.pri_level = td->td_priority;
kp->ki_pri.pri_native = td->td_base_pri;
kp->ki_lastcpu = td->td_lastcpu;
kp->ki_oncpu = td->td_oncpu;
kp->ki_tdflags = td->td_flags;
kp->ki_tid = td->td_tid;
kp->ki_numthreads = p->p_numthreads;
kp->ki_pcb = td->td_pcb;
kp->ki_kstack = (void *)td->td_kstack;
kp->ki_pctcpu = sched_pctcpu(td);
Refactor a bunch of scheduler code to give basically the same behaviour but with slightly cleaned up interfaces. The KSE structure has become the same as the "per thread scheduler private data" structure. In order to not make the diffs too great one is #defined as the other at this time. The KSE (or td_sched) structure is now allocated per thread and has no allocation code of its own. Concurrency for a KSEGRP is now kept track of via a simple pair of counters rather than using KSE structures as tokens. Since the KSE structure is different in each scheduler, kern_switch.c is now included at the end of each scheduler. Nothing outside the scheduler knows the contents of the KSE (aka td_sched) structure. The fields in the ksegrp structure that are to do with the scheduler's queueing mechanisms are now moved to the kg_sched structure. (per ksegrp scheduler private data structure). In other words how the scheduler queues and keeps track of threads is no-one's business except the scheduler's. This should allow people to write experimental schedulers with completely different internal structuring. A scheduler call sched_set_concurrency(kg, N) has been added that notifies teh scheduler that no more than N threads from that ksegrp should be allowed to be on concurrently scheduled. This is also used to enforce 'fainess' at this time so that a ksegrp with 10000 threads can not swamp a the run queue and force out a process with 1 thread, since the current code will not set the concurrency above NCPU, and both schedulers will not allow more than that many onto the system run queue at a time. Each scheduler should eventualy develop their own methods to do this now that they are effectively separated. Rejig libthr's kernel interface to follow the same code paths as linkse for scope system threads. This has slightly hurt libthr's performance but I will work to recover as much of it as I can. Thread exit code has been cleaned up greatly. exit and exec code now transitions a process back to 'standard non-threaded mode' before taking the next step. Reviewed by: scottl, peter MFC after: 1 week
2004-09-05 02:09:54 +00:00
/* We can't get this anymore but ps etc never used it anyway. */
kp->ki_rqindex = 0;
} else {
kp->ki_stat = SZOMB;
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&sched_lock);
sp = NULL;
tp = NULL;
if (p->p_pgrp) {
kp->ki_pgid = p->p_pgrp->pg_id;
kp->ki_jobc = p->p_pgrp->pg_jobc;
sp = p->p_pgrp->pg_session;
if (sp != NULL) {
kp->ki_sid = sp->s_sid;
SESS_LOCK(sp);
strlcpy(kp->ki_login, sp->s_login,
sizeof(kp->ki_login));
if (sp->s_ttyvp)
kp->ki_kiflag |= KI_CTTY;
if (SESS_LEADER(p))
kp->ki_kiflag |= KI_SLEADER;
tp = sp->s_ttyp;
SESS_UNLOCK(sp);
}
}
if ((p->p_flag & P_CONTROLT) && tp != NULL) {
kp->ki_tdev = dev2udev(tp->t_dev);
kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
if (tp->t_session)
kp->ki_tsid = tp->t_session->s_sid;
} else
kp->ki_tdev = NODEV;
if (p->p_comm[0] != '\0') {
strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
}
if (p->p_sysent && p->p_sysent->sv_name != NULL &&
p->p_sysent->sv_name[0] != '\0')
strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul));
kp->ki_siglist = p->p_siglist;
SIGSETOR(kp->ki_siglist, td->td_siglist);
kp->ki_sigmask = td->td_sigmask;
kp->ki_xstat = p->p_xstat;
kp->ki_acflag = p->p_acflag;
kp->ki_flag = p->p_flag;
/* If jailed(p->p_ucred), emulate the old P_JAILED flag. */
if (jailed(p->p_ucred))
kp->ki_flag |= P_JAILED;
kp->ki_lock = p->p_lock;
if (p->p_pptr)
kp->ki_ppid = p->p_pptr->p_pid;
}
/*
* Locate a zombie process by number
*/
struct proc *
zpfind(pid_t pid)
{
struct proc *p;
sx_slock(&allproc_lock);
LIST_FOREACH(p, &zombproc, p_list)
if (p->p_pid == pid) {
PROC_LOCK(p);
break;
}
sx_sunlock(&allproc_lock);
return (p);
}
#define KERN_PROC_ZOMBMASK 0x3
#define KERN_PROC_NOTHREADS 0x4
/*
* Must be called with the process locked and will return with it unlocked.
*/
static int
sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
{
struct thread *td;
struct kinfo_proc kinfo_proc;
int error = 0;
struct proc *np;
pid_t pid = p->p_pid;
PROC_LOCK_ASSERT(p, MA_OWNED);
if (flags & KERN_PROC_NOTHREADS) {
fill_kinfo_proc(p, &kinfo_proc);
PROC_UNLOCK(p);
error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
sizeof(kinfo_proc));
PROC_LOCK(p);
} else {
_PHOLD(p);
FOREACH_THREAD_IN_PROC(p, td) {
fill_kinfo_thread(td, &kinfo_proc);
PROC_UNLOCK(p);
error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
sizeof(kinfo_proc));
PROC_LOCK(p);
if (error)
break;
}
_PRELE(p);
}
PROC_UNLOCK(p);
if (error)
return (error);
if (flags & KERN_PROC_ZOMBMASK)
np = zpfind(pid);
else {
if (pid == 0)
return (0);
np = pfind(pid);
}
if (np == NULL)
return EAGAIN;
if (np != p) {
PROC_UNLOCK(np);
return EAGAIN;
}
PROC_UNLOCK(np);
return (0);
}
static int
sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
{
int *name = (int*) arg1;
u_int namelen = arg2;
struct proc *p;
int flags, doingzomb, oid_number;
int error = 0;
oid_number = oidp->oid_number;
if (oid_number != KERN_PROC_ALL &&
(oid_number & KERN_PROC_INC_THREAD) == 0)
flags = KERN_PROC_NOTHREADS;
else {
flags = 0;
oid_number &= ~KERN_PROC_INC_THREAD;
}
if (oid_number == KERN_PROC_PID) {
if (namelen != 1)
return (EINVAL);
p = pfind((pid_t)name[0]);
if (!p)
return (ESRCH);
if ((error = p_cansee(curthread, p))) {
PROC_UNLOCK(p);
return (error);
}
error = sysctl_out_proc(p, req, flags);
return (error);
}
switch (oid_number) {
case KERN_PROC_ALL:
if (namelen != 0)
return (EINVAL);
break;
case KERN_PROC_PROC:
if (namelen != 0 && namelen != 1)
return (EINVAL);
break;
default:
if (namelen != 1)
return (EINVAL);
break;
}
if (!req->oldptr) {
/* overestimate by 5 procs */
error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
if (error)
return (error);
}
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sx_slock(&allproc_lock);
for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
if (!doingzomb)
p = LIST_FIRST(&allproc);
else
p = LIST_FIRST(&zombproc);
for (; p != 0; p = LIST_NEXT(p, p_list)) {
/*
* Skip embryonic processes.
*/
mtx_lock_spin(&sched_lock);
if (p->p_state == PRS_NEW) {
mtx_unlock_spin(&sched_lock);
continue;
}
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
/*
* Show a user only appropriate processes.
*/
if (p_cansee(curthread, p)) {
PROC_UNLOCK(p);
continue;
}
/*
* TODO - make more efficient (see notes below).
* do by session.
*/
switch (oid_number) {
case KERN_PROC_GID:
if (p->p_ucred == NULL ||
p->p_ucred->cr_gid != (gid_t)name[0]) {
PROC_UNLOCK(p);
continue;
}
break;
case KERN_PROC_PGRP:
/* could do this by traversing pgrp */
if (p->p_pgrp == NULL ||
p->p_pgrp->pg_id != (pid_t)name[0]) {
PROC_UNLOCK(p);
continue;
}
break;
case KERN_PROC_RGID:
if (p->p_ucred == NULL ||
p->p_ucred->cr_rgid != (gid_t)name[0]) {
PROC_UNLOCK(p);
continue;
}
break;
case KERN_PROC_SESSION:
if (p->p_session == NULL ||
p->p_session->s_sid != (pid_t)name[0]) {
PROC_UNLOCK(p);
continue;
}
break;
case KERN_PROC_TTY:
if ((p->p_flag & P_CONTROLT) == 0 ||
p->p_session == NULL) {
PROC_UNLOCK(p);
continue;
}
SESS_LOCK(p->p_session);
if (p->p_session->s_ttyp == NULL ||
dev2udev(p->p_session->s_ttyp->t_dev) !=
(dev_t)name[0]) {
SESS_UNLOCK(p->p_session);
PROC_UNLOCK(p);
continue;
}
SESS_UNLOCK(p->p_session);
break;
case KERN_PROC_UID:
if (p->p_ucred == NULL ||
p->p_ucred->cr_uid != (uid_t)name[0]) {
PROC_UNLOCK(p);
continue;
}
break;
case KERN_PROC_RUID:
if (p->p_ucred == NULL ||
p->p_ucred->cr_ruid != (uid_t)name[0]) {
PROC_UNLOCK(p);
continue;
}
break;
case KERN_PROC_PROC:
break;
default:
break;
}
error = sysctl_out_proc(p, req, flags | doingzomb);
if (error) {
sx_sunlock(&allproc_lock);
return (error);
}
}
}
sx_sunlock(&allproc_lock);
return (0);
}
struct pargs *
pargs_alloc(int len)
{
struct pargs *pa;
MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
M_WAITOK);
pa->ar_ref = 1;
pa->ar_length = len;
return (pa);
}
void
pargs_free(struct pargs *pa)
{
FREE(pa, M_PARGS);
}
void
pargs_hold(struct pargs *pa)
{
if (pa == NULL)
return;
PARGS_LOCK(pa);
pa->ar_ref++;
PARGS_UNLOCK(pa);
}
void
pargs_drop(struct pargs *pa)
{
if (pa == NULL)
return;
PARGS_LOCK(pa);
if (--pa->ar_ref == 0) {
PARGS_UNLOCK(pa);
pargs_free(pa);
} else
PARGS_UNLOCK(pa);
}
/*
* This sysctl allows a process to retrieve the argument list or process
* title for another process without groping around in the address space
* of the other process. It also allow a process to set its own "process
* title to a string of its own choice.
*/
static int
sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
{
int *name = (int*) arg1;
u_int namelen = arg2;
struct pargs *newpa, *pa;
struct proc *p;
int error = 0;
if (namelen != 1)
return (EINVAL);
p = pfind((pid_t)name[0]);
if (!p)
return (ESRCH);
if ((error = p_cansee(curthread, p)) != 0) {
PROC_UNLOCK(p);
return (error);
}
if (req->newptr && curproc != p) {
PROC_UNLOCK(p);
return (EPERM);
}
pa = p->p_args;
pargs_hold(pa);
PROC_UNLOCK(p);
if (req->oldptr != NULL && pa != NULL)
error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
pargs_drop(pa);
if (error != 0 || req->newptr == NULL)
return (error);
if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
return (ENOMEM);
newpa = pargs_alloc(req->newlen);
error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
if (error != 0) {
pargs_free(newpa);
return (error);
}
PROC_LOCK(p);
pa = p->p_args;
p->p_args = newpa;
PROC_UNLOCK(p);
pargs_drop(pa);
return (0);
}
static int
sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
{
struct proc *p;
char *sv_name;
int *name;
int namelen;
int error;
namelen = arg2;
if (namelen != 1)
return (EINVAL);
name = (int *)arg1;
if ((p = pfind((pid_t)name[0])) == NULL)
return (ESRCH);
if ((error = p_cansee(curthread, p))) {
PROC_UNLOCK(p);
return (error);
}
sv_name = p->p_sysent->sv_name;
PROC_UNLOCK(p);
return (sysctl_handle_string(oidp, sv_name, 0, req));
}
SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD,
sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD,
sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD,
sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD,
sysctl_kern_proc, "Return process table, no threads");
SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
sysctl_kern_proc_args, "Process argument list");
SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD,
sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)");
SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
CTLFLAG_RD, sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
CTLFLAG_RD, sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
CTLFLAG_RD, sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), sid_td,
CTLFLAG_RD, sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
CTLFLAG_RD, sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
CTLFLAG_RD, sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
CTLFLAG_RD, sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
CTLFLAG_RD, sysctl_kern_proc, "Process table");
SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
CTLFLAG_RD, sysctl_kern_proc, "Return process table, no threads");