mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-19 10:53:58 +00:00
Move the callout subsystem initialization to its own SYSINIT()
from being indirectly called via cpu_startup()+vm_ksubmap_init(). The boot order position remains the same at SI_SUB_CPU. Allocation of the callout array is changed to stardard kernel malloc from a slightly obscure direct kernel_map allocation. kern_timeout_callwheel_alloc() is renamed to callout_callwheel_init() to better describe its purpose. kern_timeout_callwheel_init() is removed simplifying the per-cpu initialization. Reviewed by: davide
This commit is contained in:
parent
f8ccf82a4c
commit
15ae0c9af9
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=248032
@ -187,6 +187,7 @@ struct callout_cpu cc_cpu;
|
||||
|
||||
static int timeout_cpu;
|
||||
|
||||
static void callout_cpu_init(struct callout_cpu *cc);
|
||||
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
|
||||
#ifdef CALLOUT_PROFILING
|
||||
int *mpcalls, int *lockcalls, int *gcalls,
|
||||
@ -246,19 +247,14 @@ cc_cce_migrating(struct callout_cpu *cc, int direct)
|
||||
}
|
||||
|
||||
/*
|
||||
* kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
|
||||
*
|
||||
* This code is called very early in the kernel initialization sequence,
|
||||
* and may be called more then once.
|
||||
* Kernel low level callwheel initialization
|
||||
* called on cpu0 during kernel startup.
|
||||
*/
|
||||
caddr_t
|
||||
kern_timeout_callwheel_alloc(caddr_t v)
|
||||
static void
|
||||
callout_callwheel_init(void *dummy)
|
||||
{
|
||||
struct callout_cpu *cc;
|
||||
|
||||
timeout_cpu = PCPU_GET(cpuid);
|
||||
cc = CC_CPU(timeout_cpu);
|
||||
|
||||
/*
|
||||
* Calculate the size of the callout wheel and the preallocated
|
||||
* timeout() structures.
|
||||
@ -273,13 +269,23 @@ kern_timeout_callwheel_alloc(caddr_t v)
|
||||
callwheelsize = 1 << fls(ncallout);
|
||||
callwheelmask = callwheelsize - 1;
|
||||
|
||||
cc->cc_callout = (struct callout *)v;
|
||||
v = (caddr_t)(cc->cc_callout + ncallout);
|
||||
cc->cc_callwheel = (struct callout_list *)v;
|
||||
v = (caddr_t)(cc->cc_callwheel + callwheelsize);
|
||||
return(v);
|
||||
/*
|
||||
* Only cpu0 handles timeout(9) and receives a preallocation.
|
||||
*
|
||||
* XXX: Once all timeout(9) consumers are converted this can
|
||||
* be removed.
|
||||
*/
|
||||
timeout_cpu = PCPU_GET(cpuid);
|
||||
cc = CC_CPU(timeout_cpu);
|
||||
cc->cc_callout = malloc(ncallout * sizeof(struct callout),
|
||||
M_CALLOUT, M_WAITOK);
|
||||
callout_cpu_init(cc);
|
||||
}
|
||||
SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
|
||||
|
||||
/*
|
||||
* Initialize the per-cpu callout structures.
|
||||
*/
|
||||
static void
|
||||
callout_cpu_init(struct callout_cpu *cc)
|
||||
{
|
||||
@ -288,13 +294,15 @@ callout_cpu_init(struct callout_cpu *cc)
|
||||
|
||||
mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
|
||||
SLIST_INIT(&cc->cc_callfree);
|
||||
cc->cc_callwheel = malloc(sizeof(struct callout_tailq) * callwheelsize,
|
||||
M_CALLOUT, M_WAITOK);
|
||||
for (i = 0; i < callwheelsize; i++)
|
||||
LIST_INIT(&cc->cc_callwheel[i]);
|
||||
TAILQ_INIT(&cc->cc_expireq);
|
||||
cc->cc_firstevent = INT64_MAX;
|
||||
for (i = 0; i < 2; i++)
|
||||
cc_cce_cleanup(cc, i);
|
||||
if (cc->cc_callout == NULL)
|
||||
if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
|
||||
return;
|
||||
for (i = 0; i < ncallout; i++) {
|
||||
c = &cc->cc_callout[i];
|
||||
@ -334,19 +342,6 @@ callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* kern_timeout_callwheel_init() - initialize previously reserved callwheel
|
||||
* space.
|
||||
*
|
||||
* This code is called just once, after the space reserved for the
|
||||
* callout wheel has been finalized.
|
||||
*/
|
||||
void
|
||||
kern_timeout_callwheel_init(void)
|
||||
{
|
||||
callout_cpu_init(CC_CPU(timeout_cpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* Start standard softclock thread.
|
||||
*/
|
||||
@ -367,18 +362,14 @@ start_softclock(void *dummy)
|
||||
if (cpu == timeout_cpu)
|
||||
continue;
|
||||
cc = CC_CPU(cpu);
|
||||
cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
|
||||
callout_cpu_init(cc);
|
||||
if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
|
||||
INTR_MPSAFE, &cc->cc_cookie))
|
||||
panic("died while creating standard software ithreads");
|
||||
cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */
|
||||
cc->cc_callwheel = malloc(
|
||||
sizeof(struct callout_list) * callwheelsize, M_CALLOUT,
|
||||
M_WAITOK);
|
||||
callout_cpu_init(cc);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
|
||||
|
||||
#define CC_HASH_SHIFT 8
|
||||
|
@ -321,8 +321,6 @@ typedef void timeout_t(void *); /* timeout function type */
|
||||
void callout_handle_init(struct callout_handle *);
|
||||
struct callout_handle timeout(timeout_t *, void *, int);
|
||||
void untimeout(timeout_t *, void *, struct callout_handle);
|
||||
caddr_t kern_timeout_callwheel_alloc(caddr_t v);
|
||||
void kern_timeout_callwheel_init(void);
|
||||
|
||||
/* Stubs for obsolete functions that used to be for interrupt management */
|
||||
static __inline intrmask_t splbio(void) { return 0; }
|
||||
|
@ -157,8 +157,6 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
again:
|
||||
v = (caddr_t)firstaddr;
|
||||
|
||||
v = kern_timeout_callwheel_alloc(v);
|
||||
|
||||
/*
|
||||
* Discount the physical memory larger than the size of kernel_map
|
||||
* to avoid eating up all of KVA space.
|
||||
@ -202,10 +200,5 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
* XXX: Mbuf system machine-specific initializations should
|
||||
* go here, if anywhere.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Initialize the callouts we just allocated.
|
||||
*/
|
||||
kern_timeout_callwheel_init();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user