diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index 396f98fbd41..509885d2cd6 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -573,7 +573,9 @@ hardclock_cnt(int cnt, int usermode) void hardclock_sync(int cpu) { - int *t = DPCPU_ID_PTR(cpu, pcputicks); + int *t; + KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu)); + t = DPCPU_ID_PTR(cpu, pcputicks); *t = ticks; } diff --git a/sys/kern/kern_clocksource.c b/sys/kern/kern_clocksource.c index 61293173f22..b7d8bb91685 100644 --- a/sys/kern/kern_clocksource.c +++ b/sys/kern/kern_clocksource.c @@ -822,6 +822,8 @@ cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt) CTR6(KTR_SPARE2, "new co at %d: on %d at %d.%08x - %d.%08x", curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff), (int)(bt >> 32), (u_int)(bt & 0xffffffff)); + + KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu)); state = DPCPU_ID_PTR(cpu, timerstate); ET_HW_LOCK(state); diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c index 93d89ec84c0..ad080ad07f7 100644 --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -373,15 +373,16 @@ kern_reboot(int howto) #if defined(SMP) /* - * Bind us to CPU 0 so that all shutdown code runs there. Some + * Bind us to the first CPU so that all shutdown code runs there. Some * systems don't shutdown properly (i.e., ACPI power off) if we * run on another processor. */ if (!SCHEDULER_STOPPED()) { thread_lock(curthread); - sched_bind(curthread, 0); + sched_bind(curthread, CPU_FIRST()); thread_unlock(curthread); - KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0")); + KASSERT(PCPU_GET(cpuid) == CPU_FIRST(), + ("boot: not running on cpu 0")); } #endif /* We're in the process of rebooting. */ diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index d245b0d4059..81b4a14ecf0 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -264,7 +264,7 @@ cc_cce_migrating(struct callout_cpu *cc, int direct) /* * Kernel low level callwheel initialization - * called on cpu0 during kernel startup. + * called on the BSP during kernel startup. */ static void callout_callwheel_init(void *dummy) @@ -277,7 +277,7 @@ callout_callwheel_init(void *dummy) * XXX: Clip callout to result of previous function of maxusers * maximum 384. This is still huge, but acceptable. */ - memset(CC_CPU(0), 0, sizeof(cc_cpu)); + memset(CC_CPU(curcpu), 0, sizeof(cc_cpu)); ncallout = imin(16 + maxproc + maxfiles, 18508); TUNABLE_INT_FETCH("kern.ncallout", &ncallout); @@ -295,7 +295,7 @@ callout_callwheel_init(void *dummy) TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi); /* - * Only cpu0 handles timeout(9) and receives a preallocation. + * Only BSP handles timeout(9) and receives a preallocation. * * XXX: Once all timeout(9) consumers are converted this can * be removed. @@ -330,7 +330,7 @@ callout_cpu_init(struct callout_cpu *cc, int cpu) cc_cce_cleanup(cc, i); snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), "callwheel cpu %d", cpu); - if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ + if (cc->cc_callout == NULL) /* Only BSP handles timeout(9) */ return; for (i = 0; i < ncallout; i++) { c = &cc->cc_callout[i]; @@ -400,7 +400,7 @@ start_softclock(void *dummy) if (cpu == timeout_cpu) continue; cc = CC_CPU(cpu); - cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ + cc->cc_callout = NULL; /* Only BSP handles timeout(9). */ callout_cpu_init(cc, cpu); snprintf(name, sizeof(name), "clock (%d)", cpu); ie = NULL; diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index c6889ccd933..7d8205c8041 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1224,6 +1224,8 @@ sched_pickcpu(struct thread *td, int flags) self = PCPU_GET(cpuid); ts = td_get_sched(td); + KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on " + "absent CPU %d for thread %s.", ts->ts_cpu, td->td_name)); if (smp_started == 0) return (self); /* @@ -1294,6 +1296,7 @@ sched_pickcpu(struct thread *td, int flags) if (cpu == -1) cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu); KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu.")); + KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu)); /* * Compare the lowest loaded cpu to current cpu. */ @@ -1400,6 +1403,7 @@ sched_setup(void *dummy) /* Add thread0's load since it's running. */ TDQ_LOCK(tdq); + td_get_sched(&thread0)->ts_cpu = curcpu; /* Something valid to start */ thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); tdq_load_add(tdq, &thread0); tdq->tdq_lowpri = thread0.td_priority; @@ -1837,6 +1841,9 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) { struct tdq *tdn; + KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: " + "thread %s queued on absent CPU %d.", td->td_name, + td_get_sched(td)->ts_cpu)); tdn = TDQ_CPU(td_get_sched(td)->ts_cpu); #ifdef SMP tdq_load_rem(tdq, td); @@ -2444,6 +2451,7 @@ sched_add(struct thread *td, int flags) * Pick the destination cpu and if it isn't ours transfer to the * target cpu. */ + td_get_sched(td)->ts_cpu = curcpu; /* Pick something valid to start */ cpu = sched_pickcpu(td, flags); tdq = sched_setcpu(td, cpu, flags); tdq_add(tdq, td, flags); diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c index 6b7907b558c..06e5d18b330 100644 --- a/sys/kern/subr_pcpu.c +++ b/sys/kern/subr_pcpu.c @@ -279,6 +279,8 @@ pcpu_destroy(struct pcpu *pcpu) struct pcpu * pcpu_find(u_int cpuid) { + KASSERT(cpuid_to_pcpu[cpuid] != NULL, + ("Getting uninitialized PCPU %d", cpuid)); return (cpuid_to_pcpu[cpuid]); } @@ -409,7 +411,7 @@ DB_SHOW_ALL_COMMAND(pcpu, db_show_cpu_all) int id; db_printf("Current CPU: %d\n\n", PCPU_GET(cpuid)); - for (id = 0; id <= mp_maxid; id++) { + CPU_FOREACH(id) { pc = pcpu_find(id); if (pc != NULL) { show_pcpu(pc);