1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-12 09:58:36 +00:00

- Garbage collect several unused members of struct kse and struce ksegrp.

As best as I can tell, some of these were never used.
This commit is contained in:
Jeff Roberson 2004-12-14 10:53:55 +00:00
parent 8ffb8f5558
commit 7842f65e7f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=138843
5 changed files with 0 additions and 28 deletions

View File

@ -159,7 +159,6 @@ choosethread(void)
threadqueue, td_runq);
}
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
kg->kg_runnable--;
}
CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
td, td->td_priority);
@ -254,7 +253,6 @@ remrunqueue(struct thread *td)
}
td3 = TAILQ_PREV(td, threadqueue, td_runq);
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
kg->kg_runnable--;
if (ke->ke_state == KES_ONRUNQ) {
/*
* This thread has been assigned to the system run queue.
@ -310,7 +308,6 @@ adjustrunqueue( struct thread *td, int newpri)
sched_rem(td);
}
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
kg->kg_runnable--;
TD_SET_CAN_RUN(td);
td->td_priority = newpri;
setrunqueue(td, SRQ_BORING);
@ -514,14 +511,12 @@ setrunqueue(struct thread *td, int flags)
*/
TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
if (td2->td_priority > td->td_priority) {
kg->kg_runnable++;
TAILQ_INSERT_BEFORE(td2, td, td_runq);
break;
}
}
if (td2 == NULL) {
/* We ran off the end of the TAILQ or it was empty. */
kg->kg_runnable++;
TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
}
@ -945,7 +940,6 @@ sched_newthread(struct thread *td)
bzero(ke, sizeof(*ke));
td->td_sched = ke;
ke->ke_thread = td;
ke->ke_oncpu = NOCPU;
ke->ke_state = KES_THREAD;
}

View File

@ -296,7 +296,6 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
TAILQ_INIT(&kg->kg_threads);
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
kg->kg_proc = p;
/*
@ -304,7 +303,6 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
* and may not need clearing
*/
kg->kg_numthreads = 0;
kg->kg_runnable = 0;
kg->kg_numupcalls = 0;
/* link it in now that it's consistent */
p->p_numksegrps++;

View File

@ -74,12 +74,9 @@ __FBSDID("$FreeBSD$");
* for the group.
*/
struct kse {
TAILQ_ENTRY(kse) ke_kglist; /* (*) Queue of KSEs in ke_ksegrp. */
TAILQ_ENTRY(kse) ke_kgrlist; /* (*) Queue of KSEs in this state. */
TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
struct thread *ke_thread; /* (*) Active associated thread. */
fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
u_char ke_oncpu; /* (j) Which cpu we are on. */
char ke_rqindex; /* (j) Run queue index. */
enum {
KES_THREAD = 0x0, /* slaved to thread state */
@ -112,12 +109,10 @@ struct kg_sched {
/* the system scheduler. */
int skg_avail_opennings; /* (j) Num KSEs requested in group. */
int skg_concurrency; /* (j) Num KSEs requested in group. */
int skg_runq_kses; /* (j) Num KSEs on runq. */
};
#define kg_last_assigned kg_sched->skg_last_assigned
#define kg_avail_opennings kg_sched->skg_avail_opennings
#define kg_concurrency kg_sched->skg_concurrency
#define kg_runq_kses kg_sched->skg_runq_kses
#define SLOT_RELEASE(kg) \
do { \
@ -615,7 +610,6 @@ schedinit(void)
ksegrp0.kg_sched = &kg_sched0;
thread0.td_sched = &kse0;
kse0.ke_thread = &thread0;
kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
kse0.ke_state = KES_THREAD;
kg_sched0.skg_concurrency = 1;
kg_sched0.skg_avail_opennings = 0; /* we are already running */
@ -1059,7 +1053,6 @@ sched_add(struct thread *td, int flags)
sched_tdcnt++;
SLOT_USE(td->td_ksegrp);
runq_add(ke->ke_runq, ke, flags);
ke->ke_ksegrp->kg_runq_kses++;
ke->ke_state = KES_ONRUNQ;
maybe_resched(td);
}
@ -1082,7 +1075,6 @@ sched_rem(struct thread *td)
runq_remove(ke->ke_runq, ke);
ke->ke_state = KES_THREAD;
td->td_ksegrp->kg_runq_kses--;
}
/*
@ -1121,7 +1113,6 @@ sched_choose(void)
if (ke != NULL) {
runq_remove(rq, ke);
ke->ke_state = KES_THREAD;
ke->ke_ksegrp->kg_runq_kses--;
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
("sched_choose: process swapped out"));

View File

@ -87,13 +87,10 @@ int tickincr = 1;
* for the group.
*/
struct kse {
TAILQ_ENTRY(kse) ke_kglist; /* (*) Queue of threads in ke_ksegrp. */
TAILQ_ENTRY(kse) ke_kgrlist; /* (*) Queue of threads in this state.*/
TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
int ke_flags; /* (j) KEF_* flags. */
struct thread *ke_thread; /* (*) Active associated thread. */
fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
u_char ke_oncpu; /* (j) Which cpu we are on. */
char ke_rqindex; /* (j) Run queue index. */
enum {
KES_THREAD = 0x0, /* slaved to thread state */
@ -147,12 +144,10 @@ struct kg_sched {
int skg_runtime; /* Number of ticks we were running */
int skg_avail_opennings; /* (j) Num unfilled slots in group.*/
int skg_concurrency; /* (j) Num threads requested in group.*/
int skg_runq_threads; /* (j) Num KSEs on runq. */
};
#define kg_last_assigned kg_sched->skg_last_assigned
#define kg_avail_opennings kg_sched->skg_avail_opennings
#define kg_concurrency kg_sched->skg_concurrency
#define kg_runq_threads kg_sched->skg_runq_threads
#define kg_runtime kg_sched->skg_runtime
#define kg_slptime kg_sched->skg_slptime
@ -1175,7 +1170,6 @@ schedinit(void)
ksegrp0.kg_sched = &kg_sched0;
thread0.td_sched = &kse0;
kse0.ke_thread = &thread0;
kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
kse0.ke_state = KES_THREAD;
kg_sched0.skg_concurrency = 1;
kg_sched0.skg_avail_opennings = 0; /* we are already running */
@ -1815,7 +1809,6 @@ sched_add_internal(struct thread *td, int preemptive)
if (preemptive && maybe_preempt(td))
return;
SLOT_USE(td->td_ksegrp);
ke->ke_ksegrp->kg_runq_threads++;
ke->ke_state = KES_ONRUNQ;
kseq_runq_add(kseq, ke);
@ -1846,7 +1839,6 @@ sched_rem(struct thread *td)
SLOT_RELEASE(td->td_ksegrp);
ke->ke_state = KES_THREAD;
ke->ke_ksegrp->kg_runq_threads--;
kseq = KSEQ_CPU(ke->ke_cpu);
kseq_runq_rem(kseq, ke);
kseq_load_rem(kseq, ke);
@ -1899,7 +1891,6 @@ sched_bind(struct thread *td, int cpu)
return;
/* sched_rem without the runq_remove */
ke->ke_state = KES_THREAD;
ke->ke_ksegrp->kg_runq_threads--;
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
kseq_notify(ke, cpu);
/* When we return from mi_switch we'll be on the correct cpu. */

View File

@ -465,13 +465,11 @@ struct ksegrp {
TAILQ_ENTRY(ksegrp) kg_ksegrp; /* (*) Queue of KSEGs in kg_proc. */
TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */
TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */
TAILQ_HEAD(, kse_upcall) kg_upcalls; /* All upcalls in the group. */
#define kg_startzero kg_estcpu
u_int kg_estcpu; /* (j) Sum of the same field in KSEs. */
u_int kg_slptime; /* (j) How long completely blocked. */
int kg_runnable; /* (j) Num runnable threads on queue. */
int kg_numupcalls; /* (j) Num upcalls. */
int kg_upsleeps; /* (c) Num threads in kse_release(). */
struct kse_thr_mailbox *kg_completed; /* (c) Completed thread mboxes. */