mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-14 10:09:48 +00:00
Push Giant down a little further:
- no longer serialize on Giant for thread_single*() and family in fork, exit and exec - thread_wait() is mpsafe, assert no Giant - reduce scope of Giant in exit to not cover thread_wait and just do vm_waitproc(). - assert that thread_single() family are not called with Giant - remove the DROP/PICKUP_GIANT macros from thread_single() family - assert that thread_suspend_check() s not called with Giant - remove manual drop_giant hack in thread_suspend_check since we know it isn't held. - remove the DROP/PICKUP_GIANT macros from thread_suspend_check() family - mark kse_create() mpsafe
This commit is contained in:
parent
f8325b428c
commit
37814395c1
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=126932
@ -253,7 +253,6 @@ kern_execve(td, fname, argv, envv, mac_p)
|
||||
* that might allow a local user to illicitly obtain elevated
|
||||
* privileges.
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
PROC_LOCK(p);
|
||||
KASSERT((p->p_flag & P_INEXEC) == 0,
|
||||
("%s(): process already has P_INEXEC flag", __func__));
|
||||
@ -271,7 +270,6 @@ kern_execve(td, fname, argv, envv, mac_p)
|
||||
td->td_mailbox = NULL;
|
||||
thread_single_end();
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
p->p_flag |= P_INEXEC;
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
|
@ -137,7 +137,6 @@ exit1(struct thread *td, int rv)
|
||||
/*
|
||||
* MUST abort all other threads before proceeding past here.
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
PROC_LOCK(p);
|
||||
if (p->p_flag & P_SA || p->p_numthreads > 1) {
|
||||
/*
|
||||
@ -160,9 +159,8 @@ exit1(struct thread *td, int rv)
|
||||
* from userret(). thread_exit() will unsuspend us
|
||||
* when the last other thread exits.
|
||||
*/
|
||||
if (thread_single(SINGLE_EXIT)) {
|
||||
if (thread_single(SINGLE_EXIT))
|
||||
panic ("Exit: Single threading fouled up");
|
||||
}
|
||||
/*
|
||||
* All other activity in this process is now stopped.
|
||||
* Remove excess KSEs and KSEGRPS. XXXKSE (when we have them)
|
||||
@ -172,7 +170,6 @@ exit1(struct thread *td, int rv)
|
||||
p->p_flag &= ~P_SA;
|
||||
thread_single_end(); /* Don't need this any more. */
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
/*
|
||||
* With this state set:
|
||||
* Any thread entering the kernel from userspace will thread_exit()
|
||||
@ -716,7 +713,6 @@ wait1(struct thread *td, struct wait_args *uap, int compat)
|
||||
/*
|
||||
* do any thread-system specific cleanups
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
thread_wait(p);
|
||||
|
||||
/*
|
||||
@ -724,6 +720,7 @@ wait1(struct thread *td, struct wait_args *uap, int compat)
|
||||
* to free anything that cpu_exit couldn't
|
||||
* release while still running in process context.
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
vm_waitproc(p);
|
||||
mtx_unlock(&Giant);
|
||||
#ifdef MAC
|
||||
|
@ -272,16 +272,13 @@ fork1(td, flags, pages, procp)
|
||||
* where they will try restart in the parent and will
|
||||
* be aborted in the child.
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
PROC_LOCK(p1);
|
||||
if (thread_single(SINGLE_NO_EXIT)) {
|
||||
/* Abort. Someone else is single threading before us. */
|
||||
PROC_UNLOCK(p1);
|
||||
mtx_unlock(&Giant);
|
||||
return (ERESTART);
|
||||
}
|
||||
PROC_UNLOCK(p1);
|
||||
mtx_unlock(&Giant);
|
||||
/*
|
||||
* All other activity in this process
|
||||
* is now suspended at the user boundary,
|
||||
|
@ -1331,13 +1331,14 @@ thread_exit(void)
|
||||
|
||||
/*
|
||||
* Do any thread specific cleanups that may be needed in wait()
|
||||
* called with Giant held, proc and schedlock not held.
|
||||
* called with Giant, proc and schedlock not held.
|
||||
*/
|
||||
void
|
||||
thread_wait(struct proc *p)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
|
||||
KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
@ -1468,6 +1469,7 @@ kse_purge(struct proc *p, struct thread *td)
|
||||
void
|
||||
thread_alloc_spare(struct thread *td, struct thread *spare)
|
||||
{
|
||||
|
||||
if (td->td_standin)
|
||||
return;
|
||||
if (spare == NULL)
|
||||
@ -1876,7 +1878,7 @@ thread_single(int force_exit)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
mtx_assert(&Giant, MA_OWNED);
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
KASSERT((td != NULL), ("curthread is NULL"));
|
||||
|
||||
@ -1933,11 +1935,9 @@ thread_single(int force_exit)
|
||||
* In the mean time we suspend as well.
|
||||
*/
|
||||
thread_suspend_one(td);
|
||||
DROP_GIANT();
|
||||
PROC_UNLOCK(p);
|
||||
mi_switch(SW_VOL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
@ -1991,6 +1991,7 @@ thread_suspend_check(int return_instead)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
while (P_SHOULDSTOP(p)) {
|
||||
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
|
||||
@ -2016,8 +2017,6 @@ thread_suspend_check(int return_instead)
|
||||
* Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
|
||||
*/
|
||||
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
|
||||
while (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
if (p->p_flag & P_SA)
|
||||
thread_exit();
|
||||
else
|
||||
@ -2035,11 +2034,9 @@ thread_suspend_check(int return_instead)
|
||||
thread_unsuspend_one(p->p_singlethread);
|
||||
}
|
||||
}
|
||||
DROP_GIANT();
|
||||
PROC_UNLOCK(p);
|
||||
mi_switch(SW_INVOL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
}
|
||||
return (0);
|
||||
|
@ -1331,13 +1331,14 @@ thread_exit(void)
|
||||
|
||||
/*
|
||||
* Do any thread specific cleanups that may be needed in wait()
|
||||
* called with Giant held, proc and schedlock not held.
|
||||
* called with Giant, proc and schedlock not held.
|
||||
*/
|
||||
void
|
||||
thread_wait(struct proc *p)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
|
||||
KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
@ -1468,6 +1469,7 @@ kse_purge(struct proc *p, struct thread *td)
|
||||
void
|
||||
thread_alloc_spare(struct thread *td, struct thread *spare)
|
||||
{
|
||||
|
||||
if (td->td_standin)
|
||||
return;
|
||||
if (spare == NULL)
|
||||
@ -1876,7 +1878,7 @@ thread_single(int force_exit)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
mtx_assert(&Giant, MA_OWNED);
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
KASSERT((td != NULL), ("curthread is NULL"));
|
||||
|
||||
@ -1933,11 +1935,9 @@ thread_single(int force_exit)
|
||||
* In the mean time we suspend as well.
|
||||
*/
|
||||
thread_suspend_one(td);
|
||||
DROP_GIANT();
|
||||
PROC_UNLOCK(p);
|
||||
mi_switch(SW_VOL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
@ -1991,6 +1991,7 @@ thread_suspend_check(int return_instead)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
while (P_SHOULDSTOP(p)) {
|
||||
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
|
||||
@ -2016,8 +2017,6 @@ thread_suspend_check(int return_instead)
|
||||
* Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
|
||||
*/
|
||||
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
|
||||
while (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
if (p->p_flag & P_SA)
|
||||
thread_exit();
|
||||
else
|
||||
@ -2035,11 +2034,9 @@ thread_suspend_check(int return_instead)
|
||||
thread_unsuspend_one(p->p_singlethread);
|
||||
}
|
||||
}
|
||||
DROP_GIANT();
|
||||
PROC_UNLOCK(p);
|
||||
mi_switch(SW_INVOL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
}
|
||||
return (0);
|
||||
|
@ -114,9 +114,8 @@ userret(td, frame, oticks)
|
||||
/*
|
||||
* Do special thread processing, e.g. upcall tweaking and such.
|
||||
*/
|
||||
if (p->p_flag & P_SA) {
|
||||
if (p->p_flag & P_SA)
|
||||
thread_userret(td, frame);
|
||||
}
|
||||
|
||||
/*
|
||||
* Charge system time if profiling.
|
||||
|
@ -540,7 +540,7 @@
|
||||
int flags); }
|
||||
379 MSTD { int kse_exit(void); }
|
||||
380 MSTD { int kse_wakeup(struct kse_mailbox *mbx); }
|
||||
381 STD { int kse_create(struct kse_mailbox *mbx, \
|
||||
381 MSTD { int kse_create(struct kse_mailbox *mbx, \
|
||||
int newgroup); }
|
||||
382 MSTD { int kse_thr_interrupt(struct kse_thr_mailbox *tmbx, int cmd, \
|
||||
long data); }
|
||||
|
Loading…
Reference in New Issue
Block a user