mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-12 09:58:36 +00:00
Move a bunch of flags from the KSE to the thread.
I was in two minds as to where to put them in the first case.. I should have listenned to the other mind. Submitted by: parts by davidxu@ Reviewed by: jeff@ mini@
This commit is contained in:
parent
d1d78df69b
commit
4a338afd7a
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=111032
@ -133,9 +133,8 @@
|
||||
2: ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
|
||||
call_pal PAL_OSF1_swpipl
|
||||
ldq s0, PC_CURTHREAD(pcpup) /* checking for pending asts */
|
||||
ldq s1, TD_KSE(s0) /* atomically with returning */
|
||||
ldl s1, KE_FLAGS(s1)
|
||||
ldiq s2, KEF_ASTPENDING | KEF_NEEDRESCHED
|
||||
ldl s1, TD_FLAGS(s0)
|
||||
ldiq s2, TDF_ASTPENDING | TDF_NEEDRESCHED
|
||||
and s1, s2
|
||||
beq s1, 3f
|
||||
ldiq a0, ALPHA_PSL_IPL_0 /* reenable interrupts */
|
||||
@ -278,9 +277,8 @@ Ler1: LDGP(pv)
|
||||
/* Handle any AST's or resched's. */
|
||||
1: ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
|
||||
call_pal PAL_OSF1_swpipl
|
||||
ldq s2, TD_KSE(s0) /* checking for pending asts */
|
||||
ldl s2, KE_FLAGS(s2) /* atomically with returning */
|
||||
ldiq s3, KEF_ASTPENDING | KEF_NEEDRESCHED
|
||||
ldl s2, TD_FLAGS(s0) /* atomically with returning */
|
||||
ldiq s3, TDF_ASTPENDING | TDF_NEEDRESCHED
|
||||
and s2, s3
|
||||
beq s2, 2f
|
||||
ldiq a0, ALPHA_PSL_IPL_0 /* reenable interrupts */
|
||||
|
@ -78,13 +78,12 @@ ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
|
||||
ASSYM(MTX_UNOWNED, MTX_UNOWNED);
|
||||
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
|
||||
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
|
||||
|
||||
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
|
||||
ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
|
||||
ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(TD_MD_FLAGS, offsetof(struct thread, td_md.md_flags));
|
||||
ASSYM(TD_MD_PCBPADDR, offsetof(struct thread, td_md.md_pcbpaddr));
|
||||
|
@ -280,8 +280,7 @@ doreti_ast:
|
||||
*/
|
||||
cli
|
||||
movl PCPU(CURTHREAD),%eax
|
||||
movl TD_KSE(%eax), %eax
|
||||
testl $KEF_ASTPENDING | KEF_NEEDRESCHED,KE_FLAGS(%eax)
|
||||
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax)
|
||||
je doreti_exit
|
||||
sti
|
||||
pushl %esp /* pass a pointer to the trapframe */
|
||||
|
@ -280,8 +280,7 @@ doreti_ast:
|
||||
*/
|
||||
cli
|
||||
movl PCPU(CURTHREAD),%eax
|
||||
movl TD_KSE(%eax), %eax
|
||||
testl $KEF_ASTPENDING | KEF_NEEDRESCHED,KE_FLAGS(%eax)
|
||||
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax)
|
||||
je doreti_exit
|
||||
sti
|
||||
pushl %esp /* pass a pointer to the trapframe */
|
||||
|
@ -260,7 +260,7 @@ npx_intr(dummy)
|
||||
if (td != NULL) {
|
||||
td->td_pcb->pcb_flags |= PCB_NPXTRAP;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_kse->ke_flags |= KEF_ASTPENDING;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
@ -96,10 +96,8 @@ ASSYM(TD_MD, offsetof(struct thread, td_md));
|
||||
ASSYM(P_MD, offsetof(struct proc, p_md));
|
||||
ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));
|
||||
|
||||
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
|
||||
|
||||
ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
|
||||
ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
|
||||
ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall));
|
||||
|
@ -165,7 +165,7 @@ i386_extend_pcb(struct thread *td)
|
||||
td->td_pcb->pcb_ext = ext;
|
||||
|
||||
/* switch to the new TSS after syscall completes */
|
||||
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return 0;
|
||||
|
@ -260,7 +260,7 @@ npx_intr(dummy)
|
||||
if (td != NULL) {
|
||||
td->td_pcb->pcb_flags |= PCB_NPXTRAP;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_kse->ke_flags |= KEF_ASTPENDING;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
@ -280,8 +280,7 @@ doreti_ast:
|
||||
*/
|
||||
cli
|
||||
movl PCPU(CURTHREAD),%eax
|
||||
movl TD_KSE(%eax), %eax
|
||||
testl $KEF_ASTPENDING | KEF_NEEDRESCHED,KE_FLAGS(%eax)
|
||||
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax)
|
||||
je doreti_exit
|
||||
sti
|
||||
pushl %esp /* pass a pointer to the trapframe */
|
||||
|
@ -96,10 +96,8 @@ ASSYM(TD_MD, offsetof(struct thread, td_md));
|
||||
ASSYM(P_MD, offsetof(struct proc, p_md));
|
||||
ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));
|
||||
|
||||
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
|
||||
|
||||
ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
|
||||
ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
|
||||
ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall));
|
||||
|
@ -165,7 +165,7 @@ i386_extend_pcb(struct thread *td)
|
||||
td->td_pcb->pcb_ext = ext;
|
||||
|
||||
/* switch to the new TSS after syscall completes */
|
||||
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return 0;
|
||||
|
@ -260,7 +260,7 @@ npx_intr(dummy)
|
||||
if (td != NULL) {
|
||||
td->td_pcb->pcb_flags |= PCB_NPXTRAP;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_kse->ke_flags |= KEF_ASTPENDING;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
@ -831,26 +831,24 @@ ENTRY(exception_restore, 0)
|
||||
add r3=PC_CURTHREAD,r13 // &curthread
|
||||
;;
|
||||
ld8 r3=[r3] // curthread
|
||||
add r2=(KEF_ASTPENDING|KEF_NEEDRESCHED),r0
|
||||
add r2=(TDF_ASTPENDING|TDF_NEEDRESCHED),r0
|
||||
;;
|
||||
}
|
||||
{ .mmb
|
||||
add r3=TD_KSE,r3 // &curthread->td_kse
|
||||
mov r15=psr // save interrupt enable status
|
||||
nop 4
|
||||
;;
|
||||
}
|
||||
{ .mmi
|
||||
ld8 r3=[r3] // curkse
|
||||
;;
|
||||
rsm psr.i // disable interrupts
|
||||
add r3=KE_FLAGS,r3 // &curkse->ke_flags
|
||||
add r3=TD_FLAGS,r3 // &curthread->td_flags
|
||||
;;
|
||||
}
|
||||
{ .mmi
|
||||
ld4 r14=[r3] // fetch curkse->ke_flags
|
||||
ld4 r14=[r3] // fetch curthread->td_flags
|
||||
;;
|
||||
and r14=r2,r14 // flags & (KEF_ASTPENDING|KEF_NEEDRESCHED)
|
||||
and r14=r2,r14 // flags & (TDF_ASTPENDING|TDF_NEEDRESCHED)
|
||||
nop 5
|
||||
;;
|
||||
}
|
||||
|
@ -831,26 +831,24 @@ ENTRY(exception_restore, 0)
|
||||
add r3=PC_CURTHREAD,r13 // &curthread
|
||||
;;
|
||||
ld8 r3=[r3] // curthread
|
||||
add r2=(KEF_ASTPENDING|KEF_NEEDRESCHED),r0
|
||||
add r2=(TDF_ASTPENDING|TDF_NEEDRESCHED),r0
|
||||
;;
|
||||
}
|
||||
{ .mmb
|
||||
add r3=TD_KSE,r3 // &curthread->td_kse
|
||||
mov r15=psr // save interrupt enable status
|
||||
nop 4
|
||||
;;
|
||||
}
|
||||
{ .mmi
|
||||
ld8 r3=[r3] // curkse
|
||||
;;
|
||||
rsm psr.i // disable interrupts
|
||||
add r3=KE_FLAGS,r3 // &curkse->ke_flags
|
||||
add r3=TD_FLAGS,r3 // &curthread->td_flags
|
||||
;;
|
||||
}
|
||||
{ .mmi
|
||||
ld4 r14=[r3] // fetch curkse->ke_flags
|
||||
ld4 r14=[r3] // fetch curthread->td_flags
|
||||
;;
|
||||
and r14=r2,r14 // flags & (KEF_ASTPENDING|KEF_NEEDRESCHED)
|
||||
and r14=r2,r14 // flags & (TDF_ASTPENDING|TDF_NEEDRESCHED)
|
||||
nop 5
|
||||
;;
|
||||
}
|
||||
|
@ -82,14 +82,13 @@ ASSYM(MTX_UNOWNED, MTX_UNOWNED);
|
||||
|
||||
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
|
||||
ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack));
|
||||
ASSYM(TD_MD_FLAGS, offsetof(struct thread, td_md.md_flags));
|
||||
|
||||
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
|
||||
ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
|
||||
ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
|
||||
|
||||
|
@ -174,12 +174,12 @@ hardclock_process(frame)
|
||||
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
|
||||
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
|
||||
p->p_sflag |= PS_ALRMPEND;
|
||||
td->td_kse->ke_flags |= KEF_ASTPENDING;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
|
||||
itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
|
||||
p->p_sflag |= PS_PROFPEND;
|
||||
td->td_kse->ke_flags |= KEF_ASTPENDING;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
}
|
||||
mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
|
||||
@ -435,6 +435,7 @@ profclock(frame)
|
||||
int i;
|
||||
#endif
|
||||
|
||||
td = curthread;
|
||||
if (CLKF_USERMODE(frame)) {
|
||||
/*
|
||||
* Came from user mode; CPU was in user state.
|
||||
@ -445,7 +446,7 @@ profclock(frame)
|
||||
td = curthread;
|
||||
if ((td->td_proc->p_sflag & PS_PROFIL) &&
|
||||
!(td->td_flags & TDF_UPCALLING))
|
||||
addupc_intr(td->td_kse, CLKF_PC(frame), 1);
|
||||
addupc_intr(td, CLKF_PC(frame), 1);
|
||||
}
|
||||
#ifdef GPROF
|
||||
else {
|
||||
|
@ -400,7 +400,7 @@ ithread_schedule(struct ithd *ithread, int do_switch)
|
||||
ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
|
||||
mi_switch();
|
||||
} else {
|
||||
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
}
|
||||
} else {
|
||||
CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
|
||||
|
@ -652,8 +652,6 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
kse_link(newke, newkg);
|
||||
if (p->p_sflag & PS_NEEDSIGCHK)
|
||||
newke->ke_flags |= KEF_ASTPENDING;
|
||||
/* Add engine */
|
||||
kse_reassign(newke);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -1065,8 +1063,7 @@ thread_statclock(int user)
|
||||
return (-1);
|
||||
if (user) {
|
||||
/* Current always do via ast() */
|
||||
td->td_kse->ke_flags |= KEF_ASTPENDING; /* XXX TDF_ASTPENDING */
|
||||
td->td_flags |= TDF_USTATCLOCK;
|
||||
td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
|
||||
td->td_uuticks++;
|
||||
} else {
|
||||
if (td->td_mailbox != NULL)
|
||||
|
@ -193,7 +193,7 @@ cursig(struct thread *td)
|
||||
void
|
||||
signotify(struct proc *p)
|
||||
{
|
||||
struct kse *ke;
|
||||
struct thread *td;
|
||||
struct ksegrp *kg;
|
||||
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
@ -202,8 +202,8 @@ signotify(struct proc *p)
|
||||
p->p_sflag |= PS_NEEDSIGCHK;
|
||||
/* XXXKSE for now punish all KSEs */
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
FOREACH_KSE_IN_GROUP(kg, ke) {
|
||||
ke->ke_flags |= KEF_ASTPENDING;
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -220,6 +220,8 @@ kse_reassign(struct kse *ke)
|
||||
kg->kg_last_assigned = td;
|
||||
td->td_kse = ke;
|
||||
ke->ke_thread = td;
|
||||
if (td->td_proc->p_sflag & PS_NEEDSIGCHK)
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
sched_add(ke);
|
||||
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
|
||||
return;
|
||||
|
@ -448,7 +448,6 @@ mi_switch(void)
|
||||
struct bintime new_switchtime;
|
||||
struct thread *td = curthread; /* XXX */
|
||||
struct proc *p = td->td_proc; /* XXX */
|
||||
struct kse *ke = td->td_kse;
|
||||
u_int sched_nest;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
|
||||
@ -489,7 +488,7 @@ mi_switch(void)
|
||||
if (p->p_cpulimit != RLIM_INFINITY &&
|
||||
p->p_runtime.sec > p->p_cpulimit) {
|
||||
p->p_sflag |= PS_XCPU;
|
||||
ke->ke_flags |= KEF_ASTPENDING;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -652,8 +652,6 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
kse_link(newke, newkg);
|
||||
if (p->p_sflag & PS_NEEDSIGCHK)
|
||||
newke->ke_flags |= KEF_ASTPENDING;
|
||||
/* Add engine */
|
||||
kse_reassign(newke);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -1065,8 +1063,7 @@ thread_statclock(int user)
|
||||
return (-1);
|
||||
if (user) {
|
||||
/* Current always do via ast() */
|
||||
td->td_kse->ke_flags |= KEF_ASTPENDING; /* XXX TDF_ASTPENDING */
|
||||
td->td_flags |= TDF_USTATCLOCK;
|
||||
td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
|
||||
td->td_uuticks++;
|
||||
} else {
|
||||
if (td->td_mailbox != NULL)
|
||||
|
@ -186,7 +186,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
rtp_to_pri(&rtp, kg);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
} else if (TD_ON_RUNQ(td)) {
|
||||
if (td->td_priority > kg->kg_user_pri) {
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
@ -216,7 +216,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
*/
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
} else if (TD_ON_RUNQ(td)) {
|
||||
if (td->td_priority > kg->kg_user_pri) {
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
@ -242,7 +242,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *t
|
||||
int ksched_yield(register_t *ret, struct ksched *ksched)
|
||||
{
|
||||
mtx_lock_spin(&sched_lock);
|
||||
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ maybe_resched(struct thread *td)
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
if (td->td_priority < curthread->td_priority && curthread->td_kse)
|
||||
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -535,7 +535,7 @@ sched_switchout(struct thread *td)
|
||||
td->td_lastcpu = ke->ke_oncpu;
|
||||
td->td_last_kse = ke;
|
||||
ke->ke_oncpu = NOCPU;
|
||||
ke->ke_flags &= ~KEF_NEEDRESCHED;
|
||||
td->td_flags &= ~TDF_NEEDRESCHED;
|
||||
/*
|
||||
* At the last moment, if this thread is still marked RUNNING,
|
||||
* then put it back on the run queue as it has not been suspended
|
||||
|
@ -492,7 +492,7 @@ sched_switchout(struct thread *td)
|
||||
td->td_last_kse = ke;
|
||||
td->td_lastcpu = ke->ke_oncpu;
|
||||
ke->ke_oncpu = NOCPU;
|
||||
ke->ke_flags &= ~KEF_NEEDRESCHED;
|
||||
td->td_flags &= ~TDF_NEEDRESCHED;
|
||||
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
setrunqueue(td);
|
||||
@ -518,7 +518,7 @@ sched_switchin(struct thread *td)
|
||||
#if SCHED_STRICT_RESCHED
|
||||
if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
|
||||
td->td_priority != td->td_ksegrp->kg_user_pri)
|
||||
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -530,7 +530,7 @@ sched_nice(struct ksegrp *kg, int nice)
|
||||
kg->kg_nice = nice;
|
||||
sched_priority(kg);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
}
|
||||
}
|
||||
|
||||
@ -584,7 +584,7 @@ sched_wakeup(struct thread *td)
|
||||
setrunqueue(td);
|
||||
#if SCHED_STRICT_RESCHED
|
||||
if (td->td_priority < curthread->td_priority)
|
||||
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -686,7 +686,7 @@ sched_clock(struct thread *td)
|
||||
|
||||
if (nke && nke->ke_thread &&
|
||||
nke->ke_thread->td_priority < td->td_priority)
|
||||
ke->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
#endif
|
||||
/*
|
||||
* We used a tick charge it to the ksegrp so that we can compute our
|
||||
@ -704,7 +704,7 @@ sched_clock(struct thread *td)
|
||||
if (ke->ke_slice == 0) {
|
||||
td->td_priority = sched_priority(kg);
|
||||
ke->ke_slice = sched_slice(kg);
|
||||
ke->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
ke->ke_runq = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -478,7 +478,7 @@ profil(td, uap)
|
||||
* inaccurate.
|
||||
*/
|
||||
void
|
||||
addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
|
||||
addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
|
||||
{
|
||||
struct uprof *prof;
|
||||
caddr_t addr;
|
||||
@ -487,7 +487,7 @@ addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
|
||||
|
||||
if (ticks == 0)
|
||||
return;
|
||||
prof = &ke->ke_proc->p_stats->p_prof;
|
||||
prof = &td->td_proc->p_stats->p_prof;
|
||||
if (pc < prof->pr_off ||
|
||||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
|
||||
return; /* out of range; ignore */
|
||||
@ -497,7 +497,7 @@ addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
|
||||
mtx_lock_spin(&sched_lock);
|
||||
prof->pr_addr = pc;
|
||||
prof->pr_ticks = ticks;
|
||||
ke->ke_flags |= KEF_OWEUPC | KEF_ASTPENDING ;
|
||||
td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING ;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
@ -508,9 +508,9 @@ addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
|
||||
* XXXKSE, don't use kse unless we got sched lock.
|
||||
*/
|
||||
void
|
||||
addupc_task(struct kse *ke, uintptr_t pc, u_int ticks)
|
||||
addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
|
||||
{
|
||||
struct proc *p = ke->ke_proc;
|
||||
struct proc *p = td->td_proc;
|
||||
struct uprof *prof;
|
||||
caddr_t addr;
|
||||
u_int i;
|
||||
|
@ -123,7 +123,7 @@ forward_signal(struct thread *td)
|
||||
int id;
|
||||
|
||||
/*
|
||||
* signotify() has already set KEF_ASTPENDING and PS_NEEDSIGCHECK on
|
||||
* signotify() has already set TDF_ASTPENDING and PS_NEEDSIGCHECK on
|
||||
* this process, so all we need to do is poke it if it is currently
|
||||
* executing so that it executes ast().
|
||||
*/
|
||||
@ -169,7 +169,7 @@ forward_roundrobin(void)
|
||||
id = pc->pc_cpumask;
|
||||
if (id != PCPU_GET(cpumask) && (id & stopped_cpus) == 0 &&
|
||||
td != pc->pc_idlethread) {
|
||||
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
map |= id;
|
||||
}
|
||||
}
|
||||
|
@ -73,7 +73,6 @@ userret(td, frame, oticks)
|
||||
u_int oticks;
|
||||
{
|
||||
struct proc *p = td->td_proc;
|
||||
struct kse *ke = td->td_kse;
|
||||
|
||||
CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
|
||||
p->p_comm);
|
||||
@ -83,7 +82,7 @@ userret(td, frame, oticks)
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (SIGPENDING(p) && ((p->p_sflag & PS_NEEDSIGCHK) == 0 ||
|
||||
(td->td_kse->ke_flags & KEF_ASTPENDING) == 0))
|
||||
(td->td_flags & TDF_ASTPENDING) == 0))
|
||||
printf("failed to set signal flags properly for ast()\n");
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
@ -126,7 +125,7 @@ userret(td, frame, oticks)
|
||||
mtx_lock_spin(&sched_lock);
|
||||
ticks = td->td_sticks - oticks;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
addupc_task(ke, TRAPF_PC(frame), (u_int)ticks * psratio);
|
||||
addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio);
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,16 +175,16 @@ ast(struct trapframe *framep)
|
||||
mtx_lock_spin(&sched_lock);
|
||||
ke = td->td_kse;
|
||||
sticks = td->td_sticks;
|
||||
flags = ke->ke_flags;
|
||||
flags = td->td_flags;
|
||||
sflag = p->p_sflag;
|
||||
p->p_sflag &= ~(PS_ALRMPEND | PS_NEEDSIGCHK | PS_PROFPEND | PS_XCPU);
|
||||
#ifdef MAC
|
||||
p->p_sflag &= ~PS_MACPEND;
|
||||
#endif
|
||||
ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED | KEF_OWEUPC);
|
||||
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDRESCHED | TDF_OWEUPC);
|
||||
cnt.v_soft++;
|
||||
prticks = 0;
|
||||
if (flags & KEF_OWEUPC && sflag & PS_PROFIL) {
|
||||
if (flags & TDF_OWEUPC && sflag & PS_PROFIL) {
|
||||
prticks = p->p_stats->p_prof.pr_ticks;
|
||||
p->p_stats->p_prof.pr_ticks = 0;
|
||||
}
|
||||
@ -200,8 +199,8 @@ ast(struct trapframe *framep)
|
||||
|
||||
if (td->td_ucred != p->p_ucred)
|
||||
cred_update_thread(td);
|
||||
if (flags & KEF_OWEUPC && sflag & PS_PROFIL)
|
||||
addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks);
|
||||
if (flags & TDF_OWEUPC && sflag & PS_PROFIL)
|
||||
addupc_task(td, p->p_stats->p_prof.pr_addr, prticks);
|
||||
if (sflag & PS_ALRMPEND) {
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGVTALRM);
|
||||
@ -240,7 +239,7 @@ ast(struct trapframe *framep)
|
||||
if (sflag & PS_MACPEND)
|
||||
mac_thread_userret(td);
|
||||
#endif
|
||||
if (flags & KEF_NEEDRESCHED) {
|
||||
if (flags & TDF_NEEDRESCHED) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
p->p_stats->p_ru.ru_nivcsw++;
|
||||
|
@ -186,7 +186,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
rtp_to_pri(&rtp, kg);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
} else if (TD_ON_RUNQ(td)) {
|
||||
if (td->td_priority > kg->kg_user_pri) {
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
@ -216,7 +216,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
*/
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
} else if (TD_ON_RUNQ(td)) {
|
||||
if (td->td_priority > kg->kg_user_pri) {
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
@ -242,7 +242,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *t
|
||||
int ksched_yield(register_t *ret, struct ksched *ksched)
|
||||
{
|
||||
mtx_lock_spin(&sched_lock);
|
||||
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -140,15 +140,14 @@ ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
|
||||
|
||||
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
|
||||
|
||||
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
|
||||
|
||||
ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
|
||||
|
||||
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
|
||||
ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
|
||||
ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
|
||||
|
@ -530,7 +530,7 @@ maybe_demote(struct mac_lomac *subjlabel, struct mac_lomac *objlabel,
|
||||
subj->mac_lomac.ml_rangehigh = objlabel->ml_single;
|
||||
subj->mac_lomac.ml_flags |= MAC_LOMAC_FLAG_UPDATE;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
curthread->td_kse->ke_flags |= KEF_ASTPENDING;
|
||||
curthread->td_flags |= TDF_ASTPENDING;
|
||||
curthread->td_proc->p_sflag |= PS_MACPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
subjtext = subjlabeltext = objlabeltext = xxx;
|
||||
|
@ -2316,10 +2316,10 @@ ENTRY(tl0_ret)
|
||||
*/
|
||||
wrpr %g0, PIL_TICK, %pil
|
||||
ldx [PCPU(CURTHREAD)], %l0
|
||||
ldx [%l0 + TD_KSE], %l1
|
||||
lduw [%l1 + KE_FLAGS], %l2
|
||||
and %l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
|
||||
brz,a,pt %l2, 1f
|
||||
lduw [%l0 + TD_FLAGS], %l1
|
||||
set TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
|
||||
and %l1, %l2, %l1
|
||||
brz,a,pt %l1, 1f
|
||||
nop
|
||||
|
||||
/*
|
||||
|
@ -220,8 +220,8 @@ ASSYM(IV_PRI, offsetof(struct intr_vector, iv_pri));
|
||||
|
||||
ASSYM(IV_MAX, IV_MAX);
|
||||
|
||||
ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
|
||||
ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(MD_UTRAP, offsetof(struct mdproc, md_utrap));
|
||||
|
||||
@ -235,10 +235,9 @@ ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
|
||||
|
||||
ASSYM(RW_SHIFT, RW_SHIFT);
|
||||
|
||||
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
|
||||
ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
|
||||
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
|
||||
ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack));
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
|
||||
|
@ -298,6 +298,7 @@ struct thread {
|
||||
struct ucred *td_ucred; /* (k) Reference to credentials. */
|
||||
void (*td_switchin)(void); /* (k) Switchin special func. */
|
||||
struct thread *td_standin; /* (?) Use this for an upcall */
|
||||
u_int td_prticks; /* (?) Profclock hits in sys for user */
|
||||
struct kse_upcall *td_upcall; /* our upcall structure. */
|
||||
u_int64_t td_sticks; /* (j) Statclock hits in system mode. */
|
||||
u_int td_uuticks; /* Statclock hits in user, for UTS */
|
||||
@ -345,9 +346,12 @@ struct thread {
|
||||
#define TDF_UPCALLING 0x000100 /* This thread is doing an upcall. */
|
||||
#define TDF_ONSLEEPQ 0x000200 /* On the sleep queue. */
|
||||
#define TDF_INMSLEEP 0x000400 /* Don't recurse in msleep(). */
|
||||
#define TDF_ASTPENDING 0x000800 /* Thread has some asynchronous events. */
|
||||
#define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */
|
||||
#define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */
|
||||
#define TDF_USTATCLOCK 0x004000 /* Stat clock hits in userland. */
|
||||
#define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next AST. */
|
||||
#define TDF_NEEDRESCHED 0x010000 /* Process needs to yield. */
|
||||
#define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */
|
||||
|
||||
#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
|
||||
@ -437,11 +441,8 @@ struct kse {
|
||||
};
|
||||
|
||||
/* flags kept in ke_flags */
|
||||
#define KEF_OWEUPC 0x008000 /* Owe thread an addupc() call at next ast. */
|
||||
#define KEF_IDLEKSE 0x00004 /* A 'Per CPU idle process'.. has one thread */
|
||||
#define KEF_USER 0x00200 /* Process is not officially in the kernel */
|
||||
#define KEF_ASTPENDING 0x00400 /* KSE has a pending ast. */
|
||||
#define KEF_NEEDRESCHED 0x00800 /* Process needs to yield. */
|
||||
#define KEF_DIDRUN 0x02000 /* KSE actually ran. */
|
||||
#define KEF_EXIT 0x04000 /* KSE is being killed. */
|
||||
|
||||
|
@ -105,8 +105,8 @@ struct thread;
|
||||
struct kse;
|
||||
struct proc;
|
||||
|
||||
void addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks);
|
||||
void addupc_task(struct kse *ke, uintptr_t pc, u_int ticks);
|
||||
void addupc_intr(struct thread *td, uintptr_t pc, u_int ticks);
|
||||
void addupc_task(struct thread *td, uintptr_t pc, u_int ticks);
|
||||
void calcru(struct proc *p, struct timeval *up, struct timeval *sp,
|
||||
struct timeval *ip);
|
||||
int chgproccnt(struct uidinfo *uip, int diff, int max);
|
||||
|
Loading…
Reference in New Issue
Block a user