1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-18 10:35:55 +00:00

Removed sched_nest variable in sched_switch(). Context switches always

begin with sched_lock held but not recursed, so this variable was
always 0.

Removed fixup of sched_lock.mtx_recurse after context switches in
sched_switch().  Context switches always end with this variable in the
same state that it began in, so there is no need to fix it up.  Only
sched_lock.mtx_lock really needs a fixup.

Replaced fixup of sched_lock.mtx_recurse in fork_exit() by an assertion
that sched_lock is owned and not recursed after it is fixed up.  This
assertion much match the one in mi_switch(), and if sched_lock were
recursed then a non-null fixup of sched_lock.mtx_recurse would probably
be needed again, unlike in sched_switch(), since fork_exit() doesn't
return to its caller in the normal way.
This commit is contained in:
Bruce Evans 2003-10-29 14:40:41 +00:00
parent 92d5836987
commit 89674a9f77
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=121682
3 changed files with 1 additions and 7 deletions

View File

@ -778,7 +778,7 @@ fork_exit(callout, arg, frame)
* non-nested critical section with sched_lock held but not recursed.
*/
sched_lock.mtx_lock = (uintptr_t)td;
sched_lock.mtx_recurse = 0;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
cpu_critical_fork_exit();
CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);

View File

@ -567,7 +567,6 @@ void
sched_switch(struct thread *td)
{
struct thread *newtd;
u_long sched_nest;
struct kse *ke;
struct proc *p;
@ -597,11 +596,9 @@ sched_switch(struct thread *td)
*/
kse_reassign(ke);
}
sched_nest = sched_lock.mtx_recurse;
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
}

View File

@ -806,7 +806,6 @@ void
sched_switch(struct thread *td)
{
struct thread *newtd;
u_int sched_nest;
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
@ -847,11 +846,9 @@ sched_switch(struct thread *td)
if (td->td_proc->p_flag & P_SA)
kse_reassign(ke);
}
sched_nest = sched_lock.mtx_recurse;
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);