mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-04 09:09:56 +00:00
Use PCPU_GET, PCPU_PTR and PCPU_SET to access all per-cpu variables
other then curproc.
This commit is contained in:
parent
64ca32560d
commit
ef73ae4b0c
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=70861
@ -94,7 +94,7 @@ interrupt(a0, a1, a2, framep)
|
||||
*/
|
||||
globalp = (struct globaldata *) alpha_pal_rdval();
|
||||
|
||||
atomic_add_int(&PCPU_GET(intr_nesting_level), 1);
|
||||
atomic_add_int(PCPU_PTR(intr_nesting_level), 1);
|
||||
{
|
||||
struct proc *p = curproc;
|
||||
if (!p) p = &proc0;
|
||||
@ -116,7 +116,7 @@ interrupt(a0, a1, a2, framep)
|
||||
CTR0(KTR_INTR, "clock interrupt");
|
||||
if (PCPU_GET(cpuno) != hwrpb->rpb_primary_cpu_id) {
|
||||
CTR0(KTR_INTR, "ignoring clock on secondary");
|
||||
atomic_subtract_int(&PCPU_GET(intr_nesting_level), 1);
|
||||
atomic_subtract_int(PCPU_PTR(intr_nesting_level), 1);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -152,7 +152,7 @@ interrupt(a0, a1, a2, framep)
|
||||
a0, a1, a2);
|
||||
/* NOTREACHED */
|
||||
}
|
||||
atomic_subtract_int(&PCPU_GET(intr_nesting_level), 1);
|
||||
atomic_subtract_int(PCPU_PTR(intr_nesting_level), 1);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2068,7 +2068,7 @@ alpha_fpstate_check(struct proc *p)
|
||||
*/
|
||||
#ifndef SMP
|
||||
if (p->p_addr->u_pcb.pcb_hw.apcb_flags & ALPHA_PCB_FLAGS_FEN)
|
||||
if (p != fpcurproc)
|
||||
if (p != PCPU_GET(fpcurproc))
|
||||
panic("alpha_check_fpcurproc: bogus");
|
||||
#endif
|
||||
}
|
||||
@ -2089,7 +2089,7 @@ alpha_fpstate_check(struct proc *p)
|
||||
void
|
||||
alpha_fpstate_save(struct proc *p, int write)
|
||||
{
|
||||
if (p == fpcurproc) {
|
||||
if (p == PCPU_GET(fpcurproc)) {
|
||||
/*
|
||||
* If curproc != fpcurproc, then we need to enable FEN
|
||||
* so that we can dump the fp state.
|
||||
@ -2107,11 +2107,11 @@ alpha_fpstate_save(struct proc *p, int write)
|
||||
* PALcode to disable FEN, otherwise we must
|
||||
* clear the FEN bit in fpcurproc's pcb.
|
||||
*/
|
||||
if (fpcurproc == curproc)
|
||||
if (PCPU_GET(fpcurproc) == curproc)
|
||||
alpha_pal_wrfen(0);
|
||||
else
|
||||
CLEAR_FEN(fpcurproc);
|
||||
fpcurproc = NULL;
|
||||
CLEAR_FEN(PCPU_GET(fpcurproc));
|
||||
PCPU_SET(fpcurproc, NULL);
|
||||
} else {
|
||||
/*
|
||||
* Make sure that we leave FEN enabled if
|
||||
@ -2119,7 +2119,7 @@ alpha_fpstate_save(struct proc *p, int write)
|
||||
* one process with FEN enabled. Note that FEN
|
||||
* must already be set in fpcurproc's pcb.
|
||||
*/
|
||||
if (curproc != fpcurproc)
|
||||
if (curproc != PCPU_GET(fpcurproc))
|
||||
alpha_pal_wrfen(0);
|
||||
}
|
||||
}
|
||||
@ -2133,7 +2133,7 @@ alpha_fpstate_save(struct proc *p, int write)
|
||||
void
|
||||
alpha_fpstate_drop(struct proc *p)
|
||||
{
|
||||
if (p == fpcurproc) {
|
||||
if (p == PCPU_GET(fpcurproc)) {
|
||||
if (p == curproc) {
|
||||
/*
|
||||
* Disable FEN via the PALcode. This will
|
||||
@ -2146,7 +2146,7 @@ alpha_fpstate_drop(struct proc *p)
|
||||
*/
|
||||
CLEAR_FEN(p);
|
||||
}
|
||||
fpcurproc = NULL;
|
||||
PCPU_SET(fpcurproc, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2161,19 +2161,19 @@ alpha_fpstate_switch(struct proc *p)
|
||||
* Enable FEN so that we can access the fp registers.
|
||||
*/
|
||||
alpha_pal_wrfen(1);
|
||||
if (fpcurproc) {
|
||||
if (PCPU_GET(fpcurproc)) {
|
||||
/*
|
||||
* Dump the old fp state if its valid.
|
||||
*/
|
||||
savefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp);
|
||||
CLEAR_FEN(fpcurproc);
|
||||
savefpstate(&PCPU_GET(fpcurproc)->p_addr->u_pcb.pcb_fp);
|
||||
CLEAR_FEN(PCPU_GET(fpcurproc));
|
||||
}
|
||||
|
||||
/*
|
||||
* Remember the new FP owner and reload its state.
|
||||
*/
|
||||
fpcurproc = p;
|
||||
restorefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp);
|
||||
PCPU_SET(fpcurproc, p);
|
||||
restorefpstate(&PCPU_GET(fpcurproc)->p_addr->u_pcb.pcb_fp);
|
||||
|
||||
/*
|
||||
* If the new owner is curproc, leave FEN enabled, otherwise
|
||||
|
@ -598,7 +598,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -676,7 +676,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -1022,7 +1022,7 @@ atomic_readandclear(u_int64_t* p)
|
||||
void
|
||||
smp_handle_ipi(struct trapframe *frame)
|
||||
{
|
||||
u_int64_t ipis = atomic_readandclear(&PCPU_GET(pending_ipis));
|
||||
u_int64_t ipis = atomic_readandclear(PCPU_PTR(pending_ipis));
|
||||
u_int64_t ipi;
|
||||
int cpuno = PCPU_GET(cpuno);
|
||||
|
||||
|
@ -64,19 +64,21 @@ static pt_entry_t *
|
||||
rom_lev1map()
|
||||
{
|
||||
struct alpha_pcb *apcb;
|
||||
struct pcb *cpcb;
|
||||
|
||||
/*
|
||||
* We may be called before the first context switch
|
||||
* after alpha_init(), in which case we just need
|
||||
* to use the kernel Lev1map.
|
||||
*/
|
||||
if (curpcb == 0)
|
||||
if (PCPU_GET(curpcb) == 0)
|
||||
return (Lev1map);
|
||||
|
||||
/*
|
||||
* Find the level 1 map that we're currently running on.
|
||||
*/
|
||||
apcb = (struct alpha_pcb *)ALPHA_PHYS_TO_K0SEG((vm_offset_t) curpcb);
|
||||
cpcb = PCPU_GET(curpcb);
|
||||
apcb = (struct alpha_pcb *)ALPHA_PHYS_TO_K0SEG((vm_offset_t)cpcb);
|
||||
|
||||
return ((pt_entry_t *)ALPHA_PHYS_TO_K0SEG(alpha_ptob(apcb->apcb_ptbr)));
|
||||
}
|
||||
@ -198,12 +200,12 @@ enter_prom()
|
||||
/*
|
||||
* SimOS console uses floating point.
|
||||
*/
|
||||
if (curproc != fpcurproc) {
|
||||
if (curproc != PCPU_GET(fpcurproc)) {
|
||||
alpha_pal_wrfen(1);
|
||||
if (fpcurproc)
|
||||
savefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp);
|
||||
fpcurproc = curproc;
|
||||
restorefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp);
|
||||
if (PCPU_GET(fpcurproc))
|
||||
savefpstate(&PCPU_GET(fpcurproc)->p_addr->u_pcb.pcb_fp);
|
||||
PCPU_SET(fpcurproc, curproc);
|
||||
restorefpstate(&PCPU_GET(fpcurproc)->p_addr->u_pcb.pcb_fp);
|
||||
}
|
||||
#endif
|
||||
if (!pmap_uses_prom_console())
|
||||
|
@ -365,7 +365,7 @@ trap(a0, a1, a2, entry, framep)
|
||||
* on exit from the kernel, if proc == fpcurproc,
|
||||
* FP is enabled.
|
||||
*/
|
||||
if (fpcurproc == p) {
|
||||
if (PCPU_GET(fpcurproc) == p) {
|
||||
printf("trap: fp disabled for fpcurproc == %p",
|
||||
p);
|
||||
goto dopanic;
|
||||
|
@ -258,7 +258,7 @@ set_bios_selectors(struct bios_segments *seg, int flags)
|
||||
union descriptor *p_gdt;
|
||||
|
||||
#ifdef SMP
|
||||
p_gdt = &gdt[cpuid * NGDT];
|
||||
p_gdt = &gdt[PCPU_GET(cpuid) * NGDT];
|
||||
#else
|
||||
p_gdt = gdt;
|
||||
#endif
|
||||
|
@ -142,11 +142,12 @@ kdb_trap(type, code, regs)
|
||||
#ifdef CPUSTOP_ON_DDBBREAK
|
||||
|
||||
#if defined(VERBOSE_CPUSTOP_ON_DDBBREAK)
|
||||
db_printf("\nCPU%d stopping CPUs: 0x%08x...", cpuid, other_cpus);
|
||||
db_printf("\nCPU%d stopping CPUs: 0x%08x...", PCPU_GET(cpuid),
|
||||
PCPU_GET(other_cpus));
|
||||
#endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */
|
||||
|
||||
/* We stop all CPUs except ourselves (obviously) */
|
||||
stop_cpus(other_cpus);
|
||||
stop_cpus(PCPU_GET(other_cpus));
|
||||
|
||||
#if defined(VERBOSE_CPUSTOP_ON_DDBBREAK)
|
||||
db_printf(" stopped.\n");
|
||||
@ -171,13 +172,14 @@ kdb_trap(type, code, regs)
|
||||
#ifdef CPUSTOP_ON_DDBBREAK
|
||||
|
||||
#if defined(VERBOSE_CPUSTOP_ON_DDBBREAK)
|
||||
db_printf("\nCPU%d restarting CPUs: 0x%08x...", cpuid, stopped_cpus);
|
||||
db_printf("\nCPU%d restarting CPUs: 0x%08x...", PCPU_GET(cpuid),
|
||||
stopped_cpus);
|
||||
#endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */
|
||||
|
||||
/* Restart all the CPUs we previously stopped */
|
||||
if (stopped_cpus != other_cpus && smp_started != 0) {
|
||||
if (stopped_cpus != PCPU_GET(other_cpus) && smp_started != 0) {
|
||||
db_printf("whoa, other_cpus: 0x%08x, stopped_cpus: 0x%08x\n",
|
||||
other_cpus, stopped_cpus);
|
||||
PCPU_GET(other_cpus), stopped_cpus);
|
||||
panic("stop_cpus() failed");
|
||||
}
|
||||
restart_cpus(stopped_cpus);
|
||||
|
@ -493,8 +493,8 @@ npxinit(control)
|
||||
npxsave(&dummy);
|
||||
stop_emulating();
|
||||
fldcw(&control);
|
||||
if (curpcb != NULL)
|
||||
fnsave(&curpcb->pcb_savefpu);
|
||||
if (PCPU_GET(curpcb) != NULL)
|
||||
fnsave(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
start_emulating();
|
||||
}
|
||||
|
||||
@ -506,14 +506,14 @@ npxexit(p)
|
||||
struct proc *p;
|
||||
{
|
||||
|
||||
if (p == npxproc)
|
||||
npxsave(&curpcb->pcb_savefpu);
|
||||
if (p == PCPU_GET(npxproc))
|
||||
npxsave(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
#ifdef NPX_DEBUG
|
||||
if (npx_exists) {
|
||||
u_int masked_exceptions;
|
||||
|
||||
masked_exceptions = curpcb->pcb_savefpu.sv_env.en_cw
|
||||
& curpcb->pcb_savefpu.sv_env.en_sw & 0x7f;
|
||||
masked_exceptions = PCPU_GET(curpcb)->pcb_savefpu.sv_env.en_cw
|
||||
&PCPU_GET(curpcb)->pcb_savefpu.sv_env.en_sw & 0x7f;
|
||||
/*
|
||||
* Log exceptions that would have trapped with the old
|
||||
* control word (overflow, divide by 0, and invalid operand).
|
||||
@ -722,19 +722,19 @@ npx_intr(dummy)
|
||||
u_short control;
|
||||
struct intrframe *frame;
|
||||
|
||||
if (npxproc == NULL || !npx_exists) {
|
||||
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
|
||||
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
|
||||
npxproc, curproc, npx_exists);
|
||||
PCPU_GET(npxproc), curproc, npx_exists);
|
||||
panic("npxintr from nowhere");
|
||||
}
|
||||
if (npxproc != curproc) {
|
||||
if (PCPU_GET(npxproc) != curproc) {
|
||||
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
|
||||
npxproc, curproc, npx_exists);
|
||||
PCPU_GET(npxproc), curproc, npx_exists);
|
||||
panic("npxintr from non-current process");
|
||||
}
|
||||
|
||||
outb(0xf0, 0);
|
||||
fnstsw(&curpcb->pcb_savefpu.sv_ex_sw);
|
||||
fnstsw(&PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw);
|
||||
fnstcw(&control);
|
||||
fnclex();
|
||||
|
||||
@ -760,8 +760,8 @@ npx_intr(dummy)
|
||||
* this exception.
|
||||
*/
|
||||
code =
|
||||
fpetable[(curpcb->pcb_savefpu.sv_ex_sw & ~control & 0x3f) |
|
||||
(curpcb->pcb_savefpu.sv_ex_sw & 0x40)];
|
||||
fpetable[(PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw & ~control & 0x3f) |
|
||||
(PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw & 0x40)];
|
||||
trapsignal(curproc, SIGFPE, code);
|
||||
} else {
|
||||
/*
|
||||
@ -794,9 +794,9 @@ npxdna()
|
||||
{
|
||||
if (!npx_exists)
|
||||
return (0);
|
||||
if (npxproc != NULL) {
|
||||
if (PCPU_GET(npxproc) != NULL) {
|
||||
printf("npxdna: npxproc = %p, curproc = %p\n",
|
||||
npxproc, curproc);
|
||||
PCPU_GET(npxproc), curproc);
|
||||
panic("npxdna");
|
||||
}
|
||||
stop_emulating();
|
||||
@ -804,7 +804,7 @@ npxdna()
|
||||
* Record new context early in case frstor causes an IRQ13.
|
||||
*/
|
||||
PCPU_SET(npxproc, CURPROC);
|
||||
curpcb->pcb_savefpu.sv_ex_sw = 0;
|
||||
PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw = 0;
|
||||
/*
|
||||
* The following frstor may cause an IRQ13 when the state being
|
||||
* restored has a pending error. The error will appear to have been
|
||||
@ -817,7 +817,7 @@ npxdna()
|
||||
* fnsave are broken, so our treatment breaks fnclex if it is the
|
||||
* first FPU instruction after a context switch.
|
||||
*/
|
||||
frstor(&curpcb->pcb_savefpu);
|
||||
frstor(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
@ -1051,7 +1051,7 @@ setregs(p, entry, stack, ps_strings)
|
||||
regs->tf_ebx = ps_strings;
|
||||
|
||||
/* reset %gs as well */
|
||||
if (pcb == curpcb)
|
||||
if (pcb == PCPU_GET(curpcb))
|
||||
load_gs(_udatasel);
|
||||
else
|
||||
pcb->pcb_gs = _udatasel;
|
||||
@ -1067,7 +1067,7 @@ setregs(p, entry, stack, ps_strings)
|
||||
pcb->pcb_dr3 = 0;
|
||||
pcb->pcb_dr6 = 0;
|
||||
pcb->pcb_dr7 = 0;
|
||||
if (pcb == curpcb) {
|
||||
if (pcb == PCPU_GET(curpcb)) {
|
||||
/*
|
||||
* Clear the debug registers on the running
|
||||
* CPU, otherwise they will end up affecting
|
||||
@ -1970,13 +1970,14 @@ init386(first)
|
||||
initializecpu(); /* Initialize CPU registers */
|
||||
|
||||
/* make an initial tss so cpu can get interrupt stack on syscall! */
|
||||
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
PCPU_SET(common_tss.tss_esp0,
|
||||
(int) proc0.p_addr + UPAGES*PAGE_SIZE - 16);
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
private_tss = 0;
|
||||
tss_gdt = &gdt[GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
ltr(gsel_tss);
|
||||
|
||||
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
|
||||
|
@ -484,11 +484,11 @@ init_secondary(void)
|
||||
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
|
||||
common_tss.tss_esp0 = 0; /* not used until after switch */
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
ltr(gsel_tss);
|
||||
|
||||
pmap_set_opt();
|
||||
@ -2045,7 +2045,7 @@ start_all_aps(u_int boot_addr)
|
||||
}
|
||||
|
||||
/* build our map of 'other' CPUs */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
/* fill in our (BSP) APIC version */
|
||||
cpu_apic_versions[0] = lapic.version;
|
||||
@ -2398,9 +2398,9 @@ ap_init(void)
|
||||
#endif
|
||||
|
||||
/* Build our map of 'other' CPUs. */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
printf("SMP: AP CPU #%d Launched!\n", cpuid);
|
||||
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
|
||||
|
||||
/* set up CPU registers and state */
|
||||
cpu_setregs();
|
||||
@ -2410,8 +2410,8 @@ ap_init(void)
|
||||
|
||||
/* A quick check from sanity claus */
|
||||
apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
|
||||
if (cpuid != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", cpuid);
|
||||
if (PCPU_GET(cpuid) != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
|
||||
printf("SMP: apic_id = %d\n", apic_id);
|
||||
printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
|
||||
panic("cpuid mismatch! boom!!");
|
||||
@ -2445,10 +2445,10 @@ ap_init(void)
|
||||
* Set curproc to our per-cpu idleproc so that mutexes have
|
||||
* something unique to lock with.
|
||||
*/
|
||||
PCPU_SET(curproc,idleproc);
|
||||
PCPU_SET(curproc, PCPU_GET(idleproc));
|
||||
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/* ok, now grab sched_lock and enter the scheduler */
|
||||
enable_intr();
|
||||
@ -2610,7 +2610,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2636,7 +2636,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2685,7 +2685,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2712,7 +2712,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2813,8 +2813,8 @@ forward_roundrobin(void)
|
||||
return;
|
||||
if (!forward_roundrobin_enabled)
|
||||
return;
|
||||
resched_cpus |= other_cpus;
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
resched_cpus |= PCPU_GET(other_cpus);
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
#if 1
|
||||
selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
|
||||
#else
|
||||
|
@ -484,11 +484,11 @@ init_secondary(void)
|
||||
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
|
||||
common_tss.tss_esp0 = 0; /* not used until after switch */
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
ltr(gsel_tss);
|
||||
|
||||
pmap_set_opt();
|
||||
@ -2045,7 +2045,7 @@ start_all_aps(u_int boot_addr)
|
||||
}
|
||||
|
||||
/* build our map of 'other' CPUs */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
/* fill in our (BSP) APIC version */
|
||||
cpu_apic_versions[0] = lapic.version;
|
||||
@ -2398,9 +2398,9 @@ ap_init(void)
|
||||
#endif
|
||||
|
||||
/* Build our map of 'other' CPUs. */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
printf("SMP: AP CPU #%d Launched!\n", cpuid);
|
||||
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
|
||||
|
||||
/* set up CPU registers and state */
|
||||
cpu_setregs();
|
||||
@ -2410,8 +2410,8 @@ ap_init(void)
|
||||
|
||||
/* A quick check from sanity claus */
|
||||
apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
|
||||
if (cpuid != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", cpuid);
|
||||
if (PCPU_GET(cpuid) != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
|
||||
printf("SMP: apic_id = %d\n", apic_id);
|
||||
printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
|
||||
panic("cpuid mismatch! boom!!");
|
||||
@ -2445,10 +2445,10 @@ ap_init(void)
|
||||
* Set curproc to our per-cpu idleproc so that mutexes have
|
||||
* something unique to lock with.
|
||||
*/
|
||||
PCPU_SET(curproc,idleproc);
|
||||
PCPU_SET(curproc, PCPU_GET(idleproc));
|
||||
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/* ok, now grab sched_lock and enter the scheduler */
|
||||
enable_intr();
|
||||
@ -2610,7 +2610,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2636,7 +2636,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2685,7 +2685,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2712,7 +2712,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2813,8 +2813,8 @@ forward_roundrobin(void)
|
||||
return;
|
||||
if (!forward_roundrobin_enabled)
|
||||
return;
|
||||
resched_cpus |= other_cpus;
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
resched_cpus |= PCPU_GET(other_cpus);
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
#if 1
|
||||
selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
|
||||
#else
|
||||
|
@ -453,7 +453,7 @@ pmap_set_opt(void)
|
||||
{
|
||||
if (pseflag && (cpu_feature & CPUID_PSE)) {
|
||||
load_cr4(rcr4() | CR4_PSE);
|
||||
if (pdir4mb && cpuid == 0) { /* only on BSP */
|
||||
if (pdir4mb && PCPU_GET(cpuid) == 0) { /* only on BSP */
|
||||
kernel_pmap->pm_pdir[KPTDI] =
|
||||
PTD[KPTDI] = (pd_entry_t)pdir4mb;
|
||||
cpu_invltlb();
|
||||
@ -581,9 +581,9 @@ static __inline void
|
||||
pmap_TLB_invalidate(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
#if defined(SMP)
|
||||
if (pmap->pm_active & (1 << cpuid))
|
||||
if (pmap->pm_active & (1 << PCPU_GET(cpuid)))
|
||||
cpu_invlpg((void *)va);
|
||||
if (pmap->pm_active & other_cpus)
|
||||
if (pmap->pm_active & PCPU_GET(other_cpus))
|
||||
smp_invltlb();
|
||||
#else
|
||||
if (pmap->pm_active)
|
||||
@ -595,9 +595,9 @@ static __inline void
|
||||
pmap_TLB_invalidate_all(pmap_t pmap)
|
||||
{
|
||||
#if defined(SMP)
|
||||
if (pmap->pm_active & (1 << cpuid))
|
||||
if (pmap->pm_active & (1 << PCPU_GET(cpuid)))
|
||||
cpu_invltlb();
|
||||
if (pmap->pm_active & other_cpus)
|
||||
if (pmap->pm_active & PCPU_GET(other_cpus))
|
||||
smp_invltlb();
|
||||
#else
|
||||
if (pmap->pm_active)
|
||||
@ -652,11 +652,11 @@ pmap_pte_quick(pmap, va)
|
||||
}
|
||||
newpf = pde & PG_FRAME;
|
||||
#ifdef SMP
|
||||
if ( ((* (unsigned *) prv_PMAP1) & PG_FRAME) != newpf) {
|
||||
* (unsigned *) prv_PMAP1 = newpf | PG_RW | PG_V;
|
||||
cpu_invlpg(prv_PADDR1);
|
||||
if ( ((* (unsigned *) PCPU_GET(prv_PMAP1)) & PG_FRAME) != newpf) {
|
||||
* (unsigned *) PCPU_GET(prv_PMAP1) = newpf | PG_RW | PG_V;
|
||||
cpu_invlpg(PCPU_GET(prv_PADDR1));
|
||||
}
|
||||
return (unsigned *)(prv_PADDR1 + (index & (NPTEPG - 1)));
|
||||
return (unsigned *)(PCPU_GET(prv_PADDR1) + (index & (NPTEPG - 1)));
|
||||
#else
|
||||
if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) {
|
||||
* (unsigned *) PMAP1 = newpf | PG_RW | PG_V;
|
||||
@ -1985,11 +1985,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pmap->pm_pdir[PTDPTDI], origpte, va);
|
||||
}
|
||||
if (smp_active) {
|
||||
pdeaddr = (vm_offset_t *) IdlePTDS[cpuid];
|
||||
pdeaddr = (vm_offset_t *) IdlePTDS[PCPU_GET(cpuid)];
|
||||
if (((newpte = pdeaddr[va >> PDRSHIFT]) & PG_V) == 0) {
|
||||
if ((vm_offset_t) my_idlePTD != (vm_offset_t) vtophys(pdeaddr))
|
||||
printf("pde mismatch: %x, %x\n", my_idlePTD, pdeaddr);
|
||||
printf("cpuid: %d, pdeaddr: 0x%x\n", cpuid, pdeaddr);
|
||||
printf("cpuid: %d, pdeaddr: 0x%x\n", PCPU_GET(cpuid), pdeaddr);
|
||||
panic("pmap_enter: invalid kernel page table page(1), pdir=%p, npde=%p, pde=%p, va=%p\n",
|
||||
pmap->pm_pdir[PTDPTDI], newpte, origpte, va);
|
||||
}
|
||||
@ -2048,7 +2048,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
*pte |= PG_RW;
|
||||
#ifdef SMP
|
||||
cpu_invlpg((void *)va);
|
||||
if (pmap->pm_active & other_cpus)
|
||||
if (pmap->pm_active & PCPU_GET(other_cpus))
|
||||
smp_invltlb();
|
||||
#else
|
||||
invltlb_1pg(va);
|
||||
@ -2122,7 +2122,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
/*if (origpte)*/ {
|
||||
#ifdef SMP
|
||||
cpu_invlpg((void *)va);
|
||||
if (pmap->pm_active & other_cpus)
|
||||
if (pmap->pm_active & PCPU_GET(other_cpus))
|
||||
smp_invltlb();
|
||||
#else
|
||||
invltlb_1pg(va);
|
||||
@ -2666,20 +2666,20 @@ pmap_zero_page(phys)
|
||||
vm_offset_t phys;
|
||||
{
|
||||
#ifdef SMP
|
||||
if (*(int *) prv_CMAP3)
|
||||
if (*(int *) PCPU_GET(prv_CMAP3))
|
||||
panic("pmap_zero_page: prv_CMAP3 busy");
|
||||
|
||||
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
cpu_invlpg(prv_CADDR3);
|
||||
*(int *) PCPU_GET(prv_CMAP3) = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
cpu_invlpg(PCPU_GET(prv_CADDR3));
|
||||
|
||||
#if defined(I686_CPU)
|
||||
if (cpu_class == CPUCLASS_686)
|
||||
i686_pagezero(prv_CADDR3);
|
||||
i686_pagezero(PCPU_GET(prv_CADDR3));
|
||||
else
|
||||
#endif
|
||||
bzero(prv_CADDR3, PAGE_SIZE);
|
||||
bzero(PCPU_GET(prv_CADDR3), PAGE_SIZE);
|
||||
|
||||
*(int *) prv_CMAP3 = 0;
|
||||
*(int *) PCPU_GET(prv_CMAP3) = 0;
|
||||
#else
|
||||
if (*(int *) CMAP2)
|
||||
panic("pmap_zero_page: CMAP2 busy");
|
||||
@ -2710,20 +2710,20 @@ pmap_zero_page_area(phys, off, size)
|
||||
int size;
|
||||
{
|
||||
#ifdef SMP
|
||||
if (*(int *) prv_CMAP3)
|
||||
if (*(int *) PCPU_GET(prv_CMAP3))
|
||||
panic("pmap_zero_page: prv_CMAP3 busy");
|
||||
|
||||
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
cpu_invlpg(prv_CADDR3);
|
||||
*(int *) PCPU_GET(prv_CMAP3) = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
cpu_invlpg(PCPU_GET(prv_CADDR3));
|
||||
|
||||
#if defined(I686_CPU)
|
||||
if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
|
||||
i686_pagezero(prv_CADDR3);
|
||||
i686_pagezero(PCPU_GET(prv_CADDR3));
|
||||
else
|
||||
#endif
|
||||
bzero((char *)prv_CADDR3 + off, size);
|
||||
bzero((char *)PCPU_GET(prv_CADDR3) + off, size);
|
||||
|
||||
*(int *) prv_CMAP3 = 0;
|
||||
*(int *) PCPU_GET(prv_CMAP3) = 0;
|
||||
#else
|
||||
if (*(int *) CMAP2)
|
||||
panic("pmap_zero_page: CMAP2 busy");
|
||||
@ -2753,21 +2753,22 @@ pmap_copy_page(src, dst)
|
||||
vm_offset_t dst;
|
||||
{
|
||||
#ifdef SMP
|
||||
if (*(int *) prv_CMAP1)
|
||||
if (*(int *) PCPU_GET(prv_CMAP1))
|
||||
panic("pmap_copy_page: prv_CMAP1 busy");
|
||||
if (*(int *) prv_CMAP2)
|
||||
if (*(int *) PCPU_GET(prv_CMAP2))
|
||||
panic("pmap_copy_page: prv_CMAP2 busy");
|
||||
|
||||
*(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
|
||||
*(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
|
||||
*(int *) PCPU_GET(prv_CMAP1) = PG_V | (src & PG_FRAME) | PG_A;
|
||||
*(int *) PCPU_GET(prv_CMAP2) = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
|
||||
|
||||
cpu_invlpg(prv_CADDR1);
|
||||
cpu_invlpg(prv_CADDR2);
|
||||
cpu_invlpg(PCPU_GET(prv_CADDR1));
|
||||
cpu_invlpg(PCPU_GET(prv_CADDR2));
|
||||
|
||||
bcopy(prv_CADDR1, prv_CADDR2, PAGE_SIZE);
|
||||
bcopy(PCPU_GET(prv_CADDR1), PCPU_GET(prv_CADDR2), PAGE_SIZE);
|
||||
|
||||
*(int *) PCPU_GET(prv_CMAP1) = 0;
|
||||
*(int *) PCPU_GET(prv_CMAP2) = 0;
|
||||
|
||||
*(int *) prv_CMAP1 = 0;
|
||||
*(int *) prv_CMAP2 = 0;
|
||||
#else
|
||||
if (*(int *) CMAP1 || *(int *) CMAP2)
|
||||
panic("pmap_copy_page: CMAP busy");
|
||||
@ -3294,7 +3295,7 @@ pmap_activate(struct proc *p)
|
||||
|
||||
pmap = vmspace_pmap(p->p_vmspace);
|
||||
#if defined(SMP)
|
||||
pmap->pm_active |= 1 << cpuid;
|
||||
pmap->pm_active |= 1 << PCPU_GET(cpuid);
|
||||
#else
|
||||
pmap->pm_active |= 1;
|
||||
#endif
|
||||
|
@ -252,12 +252,12 @@ set_user_ldt(struct pcb *pcb)
|
||||
{
|
||||
struct pcb_ldt *pcb_ldt;
|
||||
|
||||
if (pcb != curpcb)
|
||||
if (pcb != PCPU_GET(curpcb))
|
||||
return;
|
||||
|
||||
pcb_ldt = pcb->pcb_ldt;
|
||||
#ifdef SMP
|
||||
gdt[cpuid * NGDT + GUSERLDT_SEL].sd = pcb_ldt->ldt_sd;
|
||||
gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pcb_ldt->ldt_sd;
|
||||
#else
|
||||
gdt[GUSERLDT_SEL].sd = pcb_ldt->ldt_sd;
|
||||
#endif
|
||||
@ -308,7 +308,7 @@ user_ldt_free(struct pcb *pcb)
|
||||
if (pcb_ldt == NULL)
|
||||
return;
|
||||
|
||||
if (pcb == curpcb) {
|
||||
if (pcb == PCPU_GET(curpcb)) {
|
||||
lldt(_default_ldt);
|
||||
PCPU_SET(currentldt, _default_ldt);
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ trap(frame)
|
||||
if (in_vm86call)
|
||||
break;
|
||||
|
||||
if (intr_nesting_level != 0)
|
||||
if (PCPU_GET(intr_nesting_level) != 0)
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -493,7 +493,7 @@ trap(frame)
|
||||
* a signal.
|
||||
*/
|
||||
if (frame.tf_eip == (int)cpu_switch_load_gs) {
|
||||
curpcb->pcb_gs = 0;
|
||||
PCPU_GET(curpcb)->pcb_gs = 0;
|
||||
psignal(p, SIGBUS);
|
||||
goto out;
|
||||
}
|
||||
@ -519,13 +519,15 @@ trap(frame)
|
||||
if (frame.tf_eip == (int)doreti_popl_es) {
|
||||
frame.tf_eip = (int)doreti_popl_es_fault;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (frame.tf_eip == (int)doreti_popl_fs) {
|
||||
frame.tf_eip = (int)doreti_popl_fs_fault;
|
||||
goto out;
|
||||
}
|
||||
if (curpcb && curpcb->pcb_onfault) {
|
||||
frame.tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame.tf_eip =
|
||||
(int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
@ -685,8 +687,9 @@ trap_pfault(frame, usermode, eva)
|
||||
|
||||
if (p == NULL ||
|
||||
(!usermode && va < VM_MAXUSER_ADDRESS &&
|
||||
(intr_nesting_level != 0 || curpcb == NULL ||
|
||||
curpcb->pcb_onfault == NULL))) {
|
||||
(PCPU_GET(intr_nesting_level) != 0 ||
|
||||
PCPU_GET(curpcb) == NULL ||
|
||||
PCPU_GET(curpcb)->pcb_onfault == NULL))) {
|
||||
trap_fatal(frame, eva);
|
||||
return (-1);
|
||||
}
|
||||
@ -748,8 +751,10 @@ trap_pfault(frame, usermode, eva)
|
||||
return (0);
|
||||
nogo:
|
||||
if (!usermode) {
|
||||
if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
|
||||
frame->tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(intr_nesting_level) == 0 &&
|
||||
PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
return (0);
|
||||
}
|
||||
trap_fatal(frame, eva);
|
||||
@ -853,8 +858,10 @@ trap_pfault(frame, usermode, eva)
|
||||
return (0);
|
||||
nogo:
|
||||
if (!usermode) {
|
||||
if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
|
||||
frame->tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(intr_nesting_level) == 0 &&
|
||||
PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
return (0);
|
||||
}
|
||||
trap_fatal(frame, eva);
|
||||
@ -886,7 +893,7 @@ trap_fatal(frame, eva)
|
||||
ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
|
||||
#ifdef SMP
|
||||
/* two seperate prints in case of a trap on an unmapped page */
|
||||
printf("cpuid = %d; ", cpuid);
|
||||
printf("cpuid = %d; ", PCPU_GET(cpuid));
|
||||
printf("lapic.id = %08x\n", lapic.id);
|
||||
#endif
|
||||
if (type == T_PAGEFLT) {
|
||||
@ -964,12 +971,12 @@ void
|
||||
dblfault_handler()
|
||||
{
|
||||
printf("\nFatal double fault:\n");
|
||||
printf("eip = 0x%x\n", common_tss.tss_eip);
|
||||
printf("esp = 0x%x\n", common_tss.tss_esp);
|
||||
printf("ebp = 0x%x\n", common_tss.tss_ebp);
|
||||
printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip));
|
||||
printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp));
|
||||
printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp));
|
||||
#ifdef SMP
|
||||
/* two seperate prints in case of a trap on an unmapped page */
|
||||
printf("cpuid = %d; ", cpuid);
|
||||
printf("cpuid = %d; ", PCPU_GET(cpuid));
|
||||
printf("lapic.id = %08x\n", lapic.id);
|
||||
#endif
|
||||
panic("double fault");
|
||||
|
@ -143,7 +143,7 @@ cpu_fork(p1, p2, flags)
|
||||
|
||||
#if NNPX > 0
|
||||
/* Ensure that p1's pcb is up to date. */
|
||||
if (npxproc == p1)
|
||||
if (PCPU_GET(npxproc) == p1)
|
||||
npxsave(&p1->p_addr->u_pcb.pcb_savefpu);
|
||||
#endif
|
||||
|
||||
@ -442,23 +442,23 @@ cpu_reset()
|
||||
|
||||
u_int map;
|
||||
int cnt;
|
||||
printf("cpu_reset called on cpu#%d\n",cpuid);
|
||||
printf("cpu_reset called on cpu#%d\n", PCPU_GET(cpuid));
|
||||
|
||||
map = other_cpus & ~ stopped_cpus;
|
||||
map = PCPU_GET(other_cpus) & ~ stopped_cpus;
|
||||
|
||||
if (map != 0) {
|
||||
printf("cpu_reset: Stopping other CPUs\n");
|
||||
stop_cpus(map); /* Stop all other CPUs */
|
||||
}
|
||||
|
||||
if (cpuid == 0) {
|
||||
if (PCPU_GET(cpuid) == 0) {
|
||||
DELAY(1000000);
|
||||
cpu_reset_real();
|
||||
/* NOTREACHED */
|
||||
} else {
|
||||
/* We are not BSP (CPU #0) */
|
||||
|
||||
cpu_reset_proxyid = cpuid;
|
||||
cpu_reset_proxyid = PCPU_GET(cpuid);
|
||||
cpustop_restartfunc = cpu_reset_proxy;
|
||||
cpu_reset_proxy_active = 0;
|
||||
printf("cpu_reset: Restarting BSP\n");
|
||||
|
@ -62,7 +62,7 @@
|
||||
#define CLKF_USERMODE(framep) \
|
||||
((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM))
|
||||
|
||||
#define CLKF_INTR(framep) (intr_nesting_level >= 2)
|
||||
#define CLKF_INTR(framep) (PCPU_GET(intr_nesting_level) >= 2)
|
||||
#define CLKF_PC(framep) ((framep)->cf_eip)
|
||||
|
||||
/*
|
||||
@ -82,7 +82,7 @@
|
||||
#define need_resched() do { \
|
||||
PCPU_SET(astpending, AST_RESCHED|AST_PENDING); \
|
||||
} while (0)
|
||||
#define resched_wanted() (astpending & AST_RESCHED)
|
||||
#define resched_wanted() (PCPU_GET(astpending) & AST_RESCHED)
|
||||
|
||||
/*
|
||||
* Arrange to handle pending profiling ticks before returning to user mode.
|
||||
@ -105,7 +105,7 @@
|
||||
*/
|
||||
#define signotify(p) aston()
|
||||
#define aston() do { \
|
||||
PCPU_SET(astpending, astpending | AST_PENDING); \
|
||||
PCPU_SET(astpending, PCPU_GET(astpending) | AST_PENDING); \
|
||||
} while (0)
|
||||
#define astoff()
|
||||
|
||||
|
@ -484,11 +484,11 @@ init_secondary(void)
|
||||
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
|
||||
common_tss.tss_esp0 = 0; /* not used until after switch */
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
ltr(gsel_tss);
|
||||
|
||||
pmap_set_opt();
|
||||
@ -2045,7 +2045,7 @@ start_all_aps(u_int boot_addr)
|
||||
}
|
||||
|
||||
/* build our map of 'other' CPUs */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
/* fill in our (BSP) APIC version */
|
||||
cpu_apic_versions[0] = lapic.version;
|
||||
@ -2398,9 +2398,9 @@ ap_init(void)
|
||||
#endif
|
||||
|
||||
/* Build our map of 'other' CPUs. */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
printf("SMP: AP CPU #%d Launched!\n", cpuid);
|
||||
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
|
||||
|
||||
/* set up CPU registers and state */
|
||||
cpu_setregs();
|
||||
@ -2410,8 +2410,8 @@ ap_init(void)
|
||||
|
||||
/* A quick check from sanity claus */
|
||||
apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
|
||||
if (cpuid != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", cpuid);
|
||||
if (PCPU_GET(cpuid) != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
|
||||
printf("SMP: apic_id = %d\n", apic_id);
|
||||
printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
|
||||
panic("cpuid mismatch! boom!!");
|
||||
@ -2445,10 +2445,10 @@ ap_init(void)
|
||||
* Set curproc to our per-cpu idleproc so that mutexes have
|
||||
* something unique to lock with.
|
||||
*/
|
||||
PCPU_SET(curproc,idleproc);
|
||||
PCPU_SET(curproc, PCPU_GET(idleproc));
|
||||
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/* ok, now grab sched_lock and enter the scheduler */
|
||||
enable_intr();
|
||||
@ -2610,7 +2610,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2636,7 +2636,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2685,7 +2685,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2712,7 +2712,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2813,8 +2813,8 @@ forward_roundrobin(void)
|
||||
return;
|
||||
if (!forward_roundrobin_enabled)
|
||||
return;
|
||||
resched_cpus |= other_cpus;
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
resched_cpus |= PCPU_GET(other_cpus);
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
#if 1
|
||||
selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
|
||||
#else
|
||||
|
@ -493,8 +493,8 @@ npxinit(control)
|
||||
npxsave(&dummy);
|
||||
stop_emulating();
|
||||
fldcw(&control);
|
||||
if (curpcb != NULL)
|
||||
fnsave(&curpcb->pcb_savefpu);
|
||||
if (PCPU_GET(curpcb) != NULL)
|
||||
fnsave(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
start_emulating();
|
||||
}
|
||||
|
||||
@ -506,14 +506,14 @@ npxexit(p)
|
||||
struct proc *p;
|
||||
{
|
||||
|
||||
if (p == npxproc)
|
||||
npxsave(&curpcb->pcb_savefpu);
|
||||
if (p == PCPU_GET(npxproc))
|
||||
npxsave(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
#ifdef NPX_DEBUG
|
||||
if (npx_exists) {
|
||||
u_int masked_exceptions;
|
||||
|
||||
masked_exceptions = curpcb->pcb_savefpu.sv_env.en_cw
|
||||
& curpcb->pcb_savefpu.sv_env.en_sw & 0x7f;
|
||||
masked_exceptions = PCPU_GET(curpcb)->pcb_savefpu.sv_env.en_cw
|
||||
&PCPU_GET(curpcb)->pcb_savefpu.sv_env.en_sw & 0x7f;
|
||||
/*
|
||||
* Log exceptions that would have trapped with the old
|
||||
* control word (overflow, divide by 0, and invalid operand).
|
||||
@ -722,19 +722,19 @@ npx_intr(dummy)
|
||||
u_short control;
|
||||
struct intrframe *frame;
|
||||
|
||||
if (npxproc == NULL || !npx_exists) {
|
||||
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
|
||||
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
|
||||
npxproc, curproc, npx_exists);
|
||||
PCPU_GET(npxproc), curproc, npx_exists);
|
||||
panic("npxintr from nowhere");
|
||||
}
|
||||
if (npxproc != curproc) {
|
||||
if (PCPU_GET(npxproc) != curproc) {
|
||||
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
|
||||
npxproc, curproc, npx_exists);
|
||||
PCPU_GET(npxproc), curproc, npx_exists);
|
||||
panic("npxintr from non-current process");
|
||||
}
|
||||
|
||||
outb(0xf0, 0);
|
||||
fnstsw(&curpcb->pcb_savefpu.sv_ex_sw);
|
||||
fnstsw(&PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw);
|
||||
fnstcw(&control);
|
||||
fnclex();
|
||||
|
||||
@ -760,8 +760,8 @@ npx_intr(dummy)
|
||||
* this exception.
|
||||
*/
|
||||
code =
|
||||
fpetable[(curpcb->pcb_savefpu.sv_ex_sw & ~control & 0x3f) |
|
||||
(curpcb->pcb_savefpu.sv_ex_sw & 0x40)];
|
||||
fpetable[(PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw & ~control & 0x3f) |
|
||||
(PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw & 0x40)];
|
||||
trapsignal(curproc, SIGFPE, code);
|
||||
} else {
|
||||
/*
|
||||
@ -794,9 +794,9 @@ npxdna()
|
||||
{
|
||||
if (!npx_exists)
|
||||
return (0);
|
||||
if (npxproc != NULL) {
|
||||
if (PCPU_GET(npxproc) != NULL) {
|
||||
printf("npxdna: npxproc = %p, curproc = %p\n",
|
||||
npxproc, curproc);
|
||||
PCPU_GET(npxproc), curproc);
|
||||
panic("npxdna");
|
||||
}
|
||||
stop_emulating();
|
||||
@ -804,7 +804,7 @@ npxdna()
|
||||
* Record new context early in case frstor causes an IRQ13.
|
||||
*/
|
||||
PCPU_SET(npxproc, CURPROC);
|
||||
curpcb->pcb_savefpu.sv_ex_sw = 0;
|
||||
PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw = 0;
|
||||
/*
|
||||
* The following frstor may cause an IRQ13 when the state being
|
||||
* restored has a pending error. The error will appear to have been
|
||||
@ -817,7 +817,7 @@ npxdna()
|
||||
* fnsave are broken, so our treatment breaks fnclex if it is the
|
||||
* first FPU instruction after a context switch.
|
||||
*/
|
||||
frstor(&curpcb->pcb_savefpu);
|
||||
frstor(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
@ -1889,7 +1889,7 @@ ohci_abort_xfer(xfer, status)
|
||||
timeout(ohci_abort_xfer_end, xfer, hz / USB_FRAMES_PER_SECOND);
|
||||
} else {
|
||||
#if defined(DIAGNOSTIC) && defined(__i386__) && defined(__FreeBSD__)
|
||||
KASSERT(intr_nesting_level == 0,
|
||||
KASSERT(PCPU_GET(intr_nesting_level) == 0,
|
||||
("ohci_abort_req in interrupt context"));
|
||||
#endif
|
||||
usb_delay_ms(opipe->pipe.device->bus, 1);
|
||||
|
@ -1610,7 +1610,7 @@ uhci_abort_xfer(usbd_xfer_handle xfer, usbd_status status)
|
||||
timeout(uhci_abort_xfer_end, xfer, hz / USB_FRAMES_PER_SECOND);
|
||||
} else {
|
||||
#if defined(DIAGNOSTIC) && defined(__i386__) && defined(__FreeBSD__)
|
||||
KASSERT(intr_nesting_level == 0,
|
||||
KASSERT(PCPU_GET(intr_nesting_level) == 0,
|
||||
("ohci_abort_req in interrupt context"));
|
||||
#endif
|
||||
usb_delay_ms(xfer->pipe->device->bus, 1);
|
||||
|
@ -931,7 +931,7 @@ usbd_do_request_flags(dev, req, data, flags, actlen)
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
#if defined(__i386__) && defined(__FreeBSD__)
|
||||
KASSERT(intr_nesting_level == 0,
|
||||
KASSERT(PCPU_GET(intr_nesting_level) == 0,
|
||||
("usbd_do_request: in interrupt context"));
|
||||
#endif
|
||||
if (dev->bus->intr_context) {
|
||||
|
@ -86,7 +86,7 @@ caddr_t MMalloc (int size, char *, int);
|
||||
void FFree (void *mem, char *, int);
|
||||
#define LOCKDRIVE(d) lockdrive (d, __FILE__, __LINE__)
|
||||
#else
|
||||
#define Malloc(x) malloc((x), M_DEVBUF, intr_nesting_level == 0)
|
||||
#define Malloc(x) malloc((x), M_DEVBUF, PCPU_GET(intr_nesting_level) == 0)
|
||||
#define Free(x) free((x), M_DEVBUF)
|
||||
#define LOCKDRIVE(d) lockdrive (d)
|
||||
#endif
|
||||
|
@ -151,7 +151,7 @@ MMalloc(int size, char *file, int line)
|
||||
return 0; /* can't continue */
|
||||
}
|
||||
/* Wait for malloc if we can */
|
||||
result = malloc(size, M_DEVBUF, intr_nesting_level == 0 ? M_WAITOK : M_NOWAIT);
|
||||
result = malloc(size, M_DEVBUF, PCPU_GET(intr_nesting_level) == 0 ? M_WAITOK : M_NOWAIT);
|
||||
if (result == NULL)
|
||||
log(LOG_ERR, "vinum: can't allocate %d bytes from %s:%d\n", size, file, line);
|
||||
else {
|
||||
|
@ -258,7 +258,7 @@ set_bios_selectors(struct bios_segments *seg, int flags)
|
||||
union descriptor *p_gdt;
|
||||
|
||||
#ifdef SMP
|
||||
p_gdt = &gdt[cpuid * NGDT];
|
||||
p_gdt = &gdt[PCPU_GET(cpuid) * NGDT];
|
||||
#else
|
||||
p_gdt = gdt;
|
||||
#endif
|
||||
|
@ -142,11 +142,12 @@ kdb_trap(type, code, regs)
|
||||
#ifdef CPUSTOP_ON_DDBBREAK
|
||||
|
||||
#if defined(VERBOSE_CPUSTOP_ON_DDBBREAK)
|
||||
db_printf("\nCPU%d stopping CPUs: 0x%08x...", cpuid, other_cpus);
|
||||
db_printf("\nCPU%d stopping CPUs: 0x%08x...", PCPU_GET(cpuid),
|
||||
PCPU_GET(other_cpus));
|
||||
#endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */
|
||||
|
||||
/* We stop all CPUs except ourselves (obviously) */
|
||||
stop_cpus(other_cpus);
|
||||
stop_cpus(PCPU_GET(other_cpus));
|
||||
|
||||
#if defined(VERBOSE_CPUSTOP_ON_DDBBREAK)
|
||||
db_printf(" stopped.\n");
|
||||
@ -171,13 +172,14 @@ kdb_trap(type, code, regs)
|
||||
#ifdef CPUSTOP_ON_DDBBREAK
|
||||
|
||||
#if defined(VERBOSE_CPUSTOP_ON_DDBBREAK)
|
||||
db_printf("\nCPU%d restarting CPUs: 0x%08x...", cpuid, stopped_cpus);
|
||||
db_printf("\nCPU%d restarting CPUs: 0x%08x...", PCPU_GET(cpuid),
|
||||
stopped_cpus);
|
||||
#endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */
|
||||
|
||||
/* Restart all the CPUs we previously stopped */
|
||||
if (stopped_cpus != other_cpus && smp_started != 0) {
|
||||
if (stopped_cpus != PCPU_GET(other_cpus) && smp_started != 0) {
|
||||
db_printf("whoa, other_cpus: 0x%08x, stopped_cpus: 0x%08x\n",
|
||||
other_cpus, stopped_cpus);
|
||||
PCPU_GET(other_cpus), stopped_cpus);
|
||||
panic("stop_cpus() failed");
|
||||
}
|
||||
restart_cpus(stopped_cpus);
|
||||
|
@ -1051,7 +1051,7 @@ setregs(p, entry, stack, ps_strings)
|
||||
regs->tf_ebx = ps_strings;
|
||||
|
||||
/* reset %gs as well */
|
||||
if (pcb == curpcb)
|
||||
if (pcb == PCPU_GET(curpcb))
|
||||
load_gs(_udatasel);
|
||||
else
|
||||
pcb->pcb_gs = _udatasel;
|
||||
@ -1067,7 +1067,7 @@ setregs(p, entry, stack, ps_strings)
|
||||
pcb->pcb_dr3 = 0;
|
||||
pcb->pcb_dr6 = 0;
|
||||
pcb->pcb_dr7 = 0;
|
||||
if (pcb == curpcb) {
|
||||
if (pcb == PCPU_GET(curpcb)) {
|
||||
/*
|
||||
* Clear the debug registers on the running
|
||||
* CPU, otherwise they will end up affecting
|
||||
@ -1970,13 +1970,14 @@ init386(first)
|
||||
initializecpu(); /* Initialize CPU registers */
|
||||
|
||||
/* make an initial tss so cpu can get interrupt stack on syscall! */
|
||||
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
PCPU_SET(common_tss.tss_esp0,
|
||||
(int) proc0.p_addr + UPAGES*PAGE_SIZE - 16);
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
private_tss = 0;
|
||||
tss_gdt = &gdt[GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
ltr(gsel_tss);
|
||||
|
||||
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
|
||||
|
@ -484,11 +484,11 @@ init_secondary(void)
|
||||
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
|
||||
common_tss.tss_esp0 = 0; /* not used until after switch */
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
ltr(gsel_tss);
|
||||
|
||||
pmap_set_opt();
|
||||
@ -2045,7 +2045,7 @@ start_all_aps(u_int boot_addr)
|
||||
}
|
||||
|
||||
/* build our map of 'other' CPUs */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
/* fill in our (BSP) APIC version */
|
||||
cpu_apic_versions[0] = lapic.version;
|
||||
@ -2398,9 +2398,9 @@ ap_init(void)
|
||||
#endif
|
||||
|
||||
/* Build our map of 'other' CPUs. */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
printf("SMP: AP CPU #%d Launched!\n", cpuid);
|
||||
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
|
||||
|
||||
/* set up CPU registers and state */
|
||||
cpu_setregs();
|
||||
@ -2410,8 +2410,8 @@ ap_init(void)
|
||||
|
||||
/* A quick check from sanity claus */
|
||||
apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
|
||||
if (cpuid != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", cpuid);
|
||||
if (PCPU_GET(cpuid) != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
|
||||
printf("SMP: apic_id = %d\n", apic_id);
|
||||
printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
|
||||
panic("cpuid mismatch! boom!!");
|
||||
@ -2445,10 +2445,10 @@ ap_init(void)
|
||||
* Set curproc to our per-cpu idleproc so that mutexes have
|
||||
* something unique to lock with.
|
||||
*/
|
||||
PCPU_SET(curproc,idleproc);
|
||||
PCPU_SET(curproc, PCPU_GET(idleproc));
|
||||
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/* ok, now grab sched_lock and enter the scheduler */
|
||||
enable_intr();
|
||||
@ -2610,7 +2610,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2636,7 +2636,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2685,7 +2685,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2712,7 +2712,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2813,8 +2813,8 @@ forward_roundrobin(void)
|
||||
return;
|
||||
if (!forward_roundrobin_enabled)
|
||||
return;
|
||||
resched_cpus |= other_cpus;
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
resched_cpus |= PCPU_GET(other_cpus);
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
#if 1
|
||||
selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
|
||||
#else
|
||||
|
@ -62,7 +62,7 @@ apic_initialize(void)
|
||||
/* setup LVT1 as ExtINT */
|
||||
temp = lapic.lvt_lint0;
|
||||
temp &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM);
|
||||
if (cpuid == 0)
|
||||
if (PCPU_GET(cpuid) == 0)
|
||||
temp |= 0x00000700; /* process ExtInts */
|
||||
else
|
||||
temp |= 0x00010700; /* mask ExtInts */
|
||||
@ -94,7 +94,7 @@ apic_initialize(void)
|
||||
temp |= (XSPURIOUSINT_OFFSET & APIC_SVR_VEC_PROG);
|
||||
|
||||
#if defined(TEST_TEST1)
|
||||
if (cpuid == GUARD_CPU) {
|
||||
if (PCPU_GET(cpuid) == GUARD_CPU) {
|
||||
temp &= ~APIC_SVR_SWEN; /* software DISABLE APIC */
|
||||
}
|
||||
#endif /** TEST_TEST1 */
|
||||
@ -112,7 +112,7 @@ apic_initialize(void)
|
||||
void
|
||||
apic_dump(char* str)
|
||||
{
|
||||
printf("SMP: CPU%d %s:\n", cpuid, str);
|
||||
printf("SMP: CPU%d %s:\n", PCPU_GET(cpuid), str);
|
||||
printf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
|
||||
lapic.lvt_lint0, lapic.lvt_lint1, lapic.tpr, lapic.svr);
|
||||
}
|
||||
|
@ -484,11 +484,11 @@ init_secondary(void)
|
||||
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
|
||||
common_tss.tss_esp0 = 0; /* not used until after switch */
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
ltr(gsel_tss);
|
||||
|
||||
pmap_set_opt();
|
||||
@ -2045,7 +2045,7 @@ start_all_aps(u_int boot_addr)
|
||||
}
|
||||
|
||||
/* build our map of 'other' CPUs */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
/* fill in our (BSP) APIC version */
|
||||
cpu_apic_versions[0] = lapic.version;
|
||||
@ -2398,9 +2398,9 @@ ap_init(void)
|
||||
#endif
|
||||
|
||||
/* Build our map of 'other' CPUs. */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
printf("SMP: AP CPU #%d Launched!\n", cpuid);
|
||||
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
|
||||
|
||||
/* set up CPU registers and state */
|
||||
cpu_setregs();
|
||||
@ -2410,8 +2410,8 @@ ap_init(void)
|
||||
|
||||
/* A quick check from sanity claus */
|
||||
apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
|
||||
if (cpuid != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", cpuid);
|
||||
if (PCPU_GET(cpuid) != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
|
||||
printf("SMP: apic_id = %d\n", apic_id);
|
||||
printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
|
||||
panic("cpuid mismatch! boom!!");
|
||||
@ -2445,10 +2445,10 @@ ap_init(void)
|
||||
* Set curproc to our per-cpu idleproc so that mutexes have
|
||||
* something unique to lock with.
|
||||
*/
|
||||
PCPU_SET(curproc,idleproc);
|
||||
PCPU_SET(curproc, PCPU_GET(idleproc));
|
||||
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/* ok, now grab sched_lock and enter the scheduler */
|
||||
enable_intr();
|
||||
@ -2610,7 +2610,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2636,7 +2636,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2685,7 +2685,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2712,7 +2712,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2813,8 +2813,8 @@ forward_roundrobin(void)
|
||||
return;
|
||||
if (!forward_roundrobin_enabled)
|
||||
return;
|
||||
resched_cpus |= other_cpus;
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
resched_cpus |= PCPU_GET(other_cpus);
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
#if 1
|
||||
selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
|
||||
#else
|
||||
|
@ -453,7 +453,7 @@ pmap_set_opt(void)
|
||||
{
|
||||
if (pseflag && (cpu_feature & CPUID_PSE)) {
|
||||
load_cr4(rcr4() | CR4_PSE);
|
||||
if (pdir4mb && cpuid == 0) { /* only on BSP */
|
||||
if (pdir4mb && PCPU_GET(cpuid) == 0) { /* only on BSP */
|
||||
kernel_pmap->pm_pdir[KPTDI] =
|
||||
PTD[KPTDI] = (pd_entry_t)pdir4mb;
|
||||
cpu_invltlb();
|
||||
@ -581,9 +581,9 @@ static __inline void
|
||||
pmap_TLB_invalidate(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
#if defined(SMP)
|
||||
if (pmap->pm_active & (1 << cpuid))
|
||||
if (pmap->pm_active & (1 << PCPU_GET(cpuid)))
|
||||
cpu_invlpg((void *)va);
|
||||
if (pmap->pm_active & other_cpus)
|
||||
if (pmap->pm_active & PCPU_GET(other_cpus))
|
||||
smp_invltlb();
|
||||
#else
|
||||
if (pmap->pm_active)
|
||||
@ -595,9 +595,9 @@ static __inline void
|
||||
pmap_TLB_invalidate_all(pmap_t pmap)
|
||||
{
|
||||
#if defined(SMP)
|
||||
if (pmap->pm_active & (1 << cpuid))
|
||||
if (pmap->pm_active & (1 << PCPU_GET(cpuid)))
|
||||
cpu_invltlb();
|
||||
if (pmap->pm_active & other_cpus)
|
||||
if (pmap->pm_active & PCPU_GET(other_cpus))
|
||||
smp_invltlb();
|
||||
#else
|
||||
if (pmap->pm_active)
|
||||
@ -652,11 +652,11 @@ pmap_pte_quick(pmap, va)
|
||||
}
|
||||
newpf = pde & PG_FRAME;
|
||||
#ifdef SMP
|
||||
if ( ((* (unsigned *) prv_PMAP1) & PG_FRAME) != newpf) {
|
||||
* (unsigned *) prv_PMAP1 = newpf | PG_RW | PG_V;
|
||||
cpu_invlpg(prv_PADDR1);
|
||||
if ( ((* (unsigned *) PCPU_GET(prv_PMAP1)) & PG_FRAME) != newpf) {
|
||||
* (unsigned *) PCPU_GET(prv_PMAP1) = newpf | PG_RW | PG_V;
|
||||
cpu_invlpg(PCPU_GET(prv_PADDR1));
|
||||
}
|
||||
return (unsigned *)(prv_PADDR1 + (index & (NPTEPG - 1)));
|
||||
return (unsigned *)(PCPU_GET(prv_PADDR1) + (index & (NPTEPG - 1)));
|
||||
#else
|
||||
if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) {
|
||||
* (unsigned *) PMAP1 = newpf | PG_RW | PG_V;
|
||||
@ -1985,11 +1985,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pmap->pm_pdir[PTDPTDI], origpte, va);
|
||||
}
|
||||
if (smp_active) {
|
||||
pdeaddr = (vm_offset_t *) IdlePTDS[cpuid];
|
||||
pdeaddr = (vm_offset_t *) IdlePTDS[PCPU_GET(cpuid)];
|
||||
if (((newpte = pdeaddr[va >> PDRSHIFT]) & PG_V) == 0) {
|
||||
if ((vm_offset_t) my_idlePTD != (vm_offset_t) vtophys(pdeaddr))
|
||||
printf("pde mismatch: %x, %x\n", my_idlePTD, pdeaddr);
|
||||
printf("cpuid: %d, pdeaddr: 0x%x\n", cpuid, pdeaddr);
|
||||
printf("cpuid: %d, pdeaddr: 0x%x\n", PCPU_GET(cpuid), pdeaddr);
|
||||
panic("pmap_enter: invalid kernel page table page(1), pdir=%p, npde=%p, pde=%p, va=%p\n",
|
||||
pmap->pm_pdir[PTDPTDI], newpte, origpte, va);
|
||||
}
|
||||
@ -2048,7 +2048,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
*pte |= PG_RW;
|
||||
#ifdef SMP
|
||||
cpu_invlpg((void *)va);
|
||||
if (pmap->pm_active & other_cpus)
|
||||
if (pmap->pm_active & PCPU_GET(other_cpus))
|
||||
smp_invltlb();
|
||||
#else
|
||||
invltlb_1pg(va);
|
||||
@ -2122,7 +2122,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
/*if (origpte)*/ {
|
||||
#ifdef SMP
|
||||
cpu_invlpg((void *)va);
|
||||
if (pmap->pm_active & other_cpus)
|
||||
if (pmap->pm_active & PCPU_GET(other_cpus))
|
||||
smp_invltlb();
|
||||
#else
|
||||
invltlb_1pg(va);
|
||||
@ -2666,20 +2666,20 @@ pmap_zero_page(phys)
|
||||
vm_offset_t phys;
|
||||
{
|
||||
#ifdef SMP
|
||||
if (*(int *) prv_CMAP3)
|
||||
if (*(int *) PCPU_GET(prv_CMAP3))
|
||||
panic("pmap_zero_page: prv_CMAP3 busy");
|
||||
|
||||
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
cpu_invlpg(prv_CADDR3);
|
||||
*(int *) PCPU_GET(prv_CMAP3) = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
cpu_invlpg(PCPU_GET(prv_CADDR3));
|
||||
|
||||
#if defined(I686_CPU)
|
||||
if (cpu_class == CPUCLASS_686)
|
||||
i686_pagezero(prv_CADDR3);
|
||||
i686_pagezero(PCPU_GET(prv_CADDR3));
|
||||
else
|
||||
#endif
|
||||
bzero(prv_CADDR3, PAGE_SIZE);
|
||||
bzero(PCPU_GET(prv_CADDR3), PAGE_SIZE);
|
||||
|
||||
*(int *) prv_CMAP3 = 0;
|
||||
*(int *) PCPU_GET(prv_CMAP3) = 0;
|
||||
#else
|
||||
if (*(int *) CMAP2)
|
||||
panic("pmap_zero_page: CMAP2 busy");
|
||||
@ -2710,20 +2710,20 @@ pmap_zero_page_area(phys, off, size)
|
||||
int size;
|
||||
{
|
||||
#ifdef SMP
|
||||
if (*(int *) prv_CMAP3)
|
||||
if (*(int *) PCPU_GET(prv_CMAP3))
|
||||
panic("pmap_zero_page: prv_CMAP3 busy");
|
||||
|
||||
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
cpu_invlpg(prv_CADDR3);
|
||||
*(int *) PCPU_GET(prv_CMAP3) = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
|
||||
cpu_invlpg(PCPU_GET(prv_CADDR3));
|
||||
|
||||
#if defined(I686_CPU)
|
||||
if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
|
||||
i686_pagezero(prv_CADDR3);
|
||||
i686_pagezero(PCPU_GET(prv_CADDR3));
|
||||
else
|
||||
#endif
|
||||
bzero((char *)prv_CADDR3 + off, size);
|
||||
bzero((char *)PCPU_GET(prv_CADDR3) + off, size);
|
||||
|
||||
*(int *) prv_CMAP3 = 0;
|
||||
*(int *) PCPU_GET(prv_CMAP3) = 0;
|
||||
#else
|
||||
if (*(int *) CMAP2)
|
||||
panic("pmap_zero_page: CMAP2 busy");
|
||||
@ -2753,21 +2753,22 @@ pmap_copy_page(src, dst)
|
||||
vm_offset_t dst;
|
||||
{
|
||||
#ifdef SMP
|
||||
if (*(int *) prv_CMAP1)
|
||||
if (*(int *) PCPU_GET(prv_CMAP1))
|
||||
panic("pmap_copy_page: prv_CMAP1 busy");
|
||||
if (*(int *) prv_CMAP2)
|
||||
if (*(int *) PCPU_GET(prv_CMAP2))
|
||||
panic("pmap_copy_page: prv_CMAP2 busy");
|
||||
|
||||
*(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
|
||||
*(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
|
||||
*(int *) PCPU_GET(prv_CMAP1) = PG_V | (src & PG_FRAME) | PG_A;
|
||||
*(int *) PCPU_GET(prv_CMAP2) = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
|
||||
|
||||
cpu_invlpg(prv_CADDR1);
|
||||
cpu_invlpg(prv_CADDR2);
|
||||
cpu_invlpg(PCPU_GET(prv_CADDR1));
|
||||
cpu_invlpg(PCPU_GET(prv_CADDR2));
|
||||
|
||||
bcopy(prv_CADDR1, prv_CADDR2, PAGE_SIZE);
|
||||
bcopy(PCPU_GET(prv_CADDR1), PCPU_GET(prv_CADDR2), PAGE_SIZE);
|
||||
|
||||
*(int *) PCPU_GET(prv_CMAP1) = 0;
|
||||
*(int *) PCPU_GET(prv_CMAP2) = 0;
|
||||
|
||||
*(int *) prv_CMAP1 = 0;
|
||||
*(int *) prv_CMAP2 = 0;
|
||||
#else
|
||||
if (*(int *) CMAP1 || *(int *) CMAP2)
|
||||
panic("pmap_copy_page: CMAP busy");
|
||||
@ -3294,7 +3295,7 @@ pmap_activate(struct proc *p)
|
||||
|
||||
pmap = vmspace_pmap(p->p_vmspace);
|
||||
#if defined(SMP)
|
||||
pmap->pm_active |= 1 << cpuid;
|
||||
pmap->pm_active |= 1 << PCPU_GET(cpuid);
|
||||
#else
|
||||
pmap->pm_active |= 1;
|
||||
#endif
|
||||
|
@ -252,12 +252,12 @@ set_user_ldt(struct pcb *pcb)
|
||||
{
|
||||
struct pcb_ldt *pcb_ldt;
|
||||
|
||||
if (pcb != curpcb)
|
||||
if (pcb != PCPU_GET(curpcb))
|
||||
return;
|
||||
|
||||
pcb_ldt = pcb->pcb_ldt;
|
||||
#ifdef SMP
|
||||
gdt[cpuid * NGDT + GUSERLDT_SEL].sd = pcb_ldt->ldt_sd;
|
||||
gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pcb_ldt->ldt_sd;
|
||||
#else
|
||||
gdt[GUSERLDT_SEL].sd = pcb_ldt->ldt_sd;
|
||||
#endif
|
||||
@ -308,7 +308,7 @@ user_ldt_free(struct pcb *pcb)
|
||||
if (pcb_ldt == NULL)
|
||||
return;
|
||||
|
||||
if (pcb == curpcb) {
|
||||
if (pcb == PCPU_GET(curpcb)) {
|
||||
lldt(_default_ldt);
|
||||
PCPU_SET(currentldt, _default_ldt);
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ trap(frame)
|
||||
if (in_vm86call)
|
||||
break;
|
||||
|
||||
if (intr_nesting_level != 0)
|
||||
if (PCPU_GET(intr_nesting_level) != 0)
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -493,7 +493,7 @@ trap(frame)
|
||||
* a signal.
|
||||
*/
|
||||
if (frame.tf_eip == (int)cpu_switch_load_gs) {
|
||||
curpcb->pcb_gs = 0;
|
||||
PCPU_GET(curpcb)->pcb_gs = 0;
|
||||
psignal(p, SIGBUS);
|
||||
goto out;
|
||||
}
|
||||
@ -519,13 +519,15 @@ trap(frame)
|
||||
if (frame.tf_eip == (int)doreti_popl_es) {
|
||||
frame.tf_eip = (int)doreti_popl_es_fault;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (frame.tf_eip == (int)doreti_popl_fs) {
|
||||
frame.tf_eip = (int)doreti_popl_fs_fault;
|
||||
goto out;
|
||||
}
|
||||
if (curpcb && curpcb->pcb_onfault) {
|
||||
frame.tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame.tf_eip =
|
||||
(int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
@ -685,8 +687,9 @@ trap_pfault(frame, usermode, eva)
|
||||
|
||||
if (p == NULL ||
|
||||
(!usermode && va < VM_MAXUSER_ADDRESS &&
|
||||
(intr_nesting_level != 0 || curpcb == NULL ||
|
||||
curpcb->pcb_onfault == NULL))) {
|
||||
(PCPU_GET(intr_nesting_level) != 0 ||
|
||||
PCPU_GET(curpcb) == NULL ||
|
||||
PCPU_GET(curpcb)->pcb_onfault == NULL))) {
|
||||
trap_fatal(frame, eva);
|
||||
return (-1);
|
||||
}
|
||||
@ -748,8 +751,10 @@ trap_pfault(frame, usermode, eva)
|
||||
return (0);
|
||||
nogo:
|
||||
if (!usermode) {
|
||||
if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
|
||||
frame->tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(intr_nesting_level) == 0 &&
|
||||
PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
return (0);
|
||||
}
|
||||
trap_fatal(frame, eva);
|
||||
@ -853,8 +858,10 @@ trap_pfault(frame, usermode, eva)
|
||||
return (0);
|
||||
nogo:
|
||||
if (!usermode) {
|
||||
if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
|
||||
frame->tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(intr_nesting_level) == 0 &&
|
||||
PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
return (0);
|
||||
}
|
||||
trap_fatal(frame, eva);
|
||||
@ -886,7 +893,7 @@ trap_fatal(frame, eva)
|
||||
ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
|
||||
#ifdef SMP
|
||||
/* two seperate prints in case of a trap on an unmapped page */
|
||||
printf("cpuid = %d; ", cpuid);
|
||||
printf("cpuid = %d; ", PCPU_GET(cpuid));
|
||||
printf("lapic.id = %08x\n", lapic.id);
|
||||
#endif
|
||||
if (type == T_PAGEFLT) {
|
||||
@ -964,12 +971,12 @@ void
|
||||
dblfault_handler()
|
||||
{
|
||||
printf("\nFatal double fault:\n");
|
||||
printf("eip = 0x%x\n", common_tss.tss_eip);
|
||||
printf("esp = 0x%x\n", common_tss.tss_esp);
|
||||
printf("ebp = 0x%x\n", common_tss.tss_ebp);
|
||||
printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip));
|
||||
printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp));
|
||||
printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp));
|
||||
#ifdef SMP
|
||||
/* two seperate prints in case of a trap on an unmapped page */
|
||||
printf("cpuid = %d; ", cpuid);
|
||||
printf("cpuid = %d; ", PCPU_GET(cpuid));
|
||||
printf("lapic.id = %08x\n", lapic.id);
|
||||
#endif
|
||||
panic("double fault");
|
||||
|
@ -143,9 +143,9 @@ vm86_emulate(vmf)
|
||||
* the extension is not present. (This check should not be needed,
|
||||
* as we can't enter vm86 mode until we set up an extension area)
|
||||
*/
|
||||
if (curpcb->pcb_ext == 0)
|
||||
if (PCPU_GET(curpcb)->pcb_ext == 0)
|
||||
return (SIGBUS);
|
||||
vm86 = &curpcb->pcb_ext->ext_vm86;
|
||||
vm86 = &PCPU_GET(curpcb)->pcb_ext->ext_vm86;
|
||||
|
||||
if (vmf->vmf_eflags & PSL_T)
|
||||
retcode = SIGTRAP;
|
||||
@ -507,7 +507,7 @@ static void
|
||||
vm86_initflags(struct vm86frame *vmf)
|
||||
{
|
||||
int eflags = vmf->vmf_eflags;
|
||||
struct vm86_kernel *vm86 = &curpcb->pcb_ext->ext_vm86;
|
||||
struct vm86_kernel *vm86 = &PCPU_GET(curpcb)->pcb_ext->ext_vm86;
|
||||
|
||||
if (vm86->vm86_has_vme) {
|
||||
eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
|
||||
|
@ -143,7 +143,7 @@ cpu_fork(p1, p2, flags)
|
||||
|
||||
#if NNPX > 0
|
||||
/* Ensure that p1's pcb is up to date. */
|
||||
if (npxproc == p1)
|
||||
if (PCPU_GET(npxproc) == p1)
|
||||
npxsave(&p1->p_addr->u_pcb.pcb_savefpu);
|
||||
#endif
|
||||
|
||||
@ -442,23 +442,23 @@ cpu_reset()
|
||||
|
||||
u_int map;
|
||||
int cnt;
|
||||
printf("cpu_reset called on cpu#%d\n",cpuid);
|
||||
printf("cpu_reset called on cpu#%d\n", PCPU_GET(cpuid));
|
||||
|
||||
map = other_cpus & ~ stopped_cpus;
|
||||
map = PCPU_GET(other_cpus) & ~ stopped_cpus;
|
||||
|
||||
if (map != 0) {
|
||||
printf("cpu_reset: Stopping other CPUs\n");
|
||||
stop_cpus(map); /* Stop all other CPUs */
|
||||
}
|
||||
|
||||
if (cpuid == 0) {
|
||||
if (PCPU_GET(cpuid) == 0) {
|
||||
DELAY(1000000);
|
||||
cpu_reset_real();
|
||||
/* NOTREACHED */
|
||||
} else {
|
||||
/* We are not BSP (CPU #0) */
|
||||
|
||||
cpu_reset_proxyid = cpuid;
|
||||
cpu_reset_proxyid = PCPU_GET(cpuid);
|
||||
cpustop_restartfunc = cpu_reset_proxy;
|
||||
cpu_reset_proxy_active = 0;
|
||||
printf("cpu_reset: Restarting BSP\n");
|
||||
|
@ -62,7 +62,7 @@
|
||||
#define CLKF_USERMODE(framep) \
|
||||
((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM))
|
||||
|
||||
#define CLKF_INTR(framep) (intr_nesting_level >= 2)
|
||||
#define CLKF_INTR(framep) (PCPU_GET(intr_nesting_level) >= 2)
|
||||
#define CLKF_PC(framep) ((framep)->cf_eip)
|
||||
|
||||
/*
|
||||
@ -82,7 +82,7 @@
|
||||
#define need_resched() do { \
|
||||
PCPU_SET(astpending, AST_RESCHED|AST_PENDING); \
|
||||
} while (0)
|
||||
#define resched_wanted() (astpending & AST_RESCHED)
|
||||
#define resched_wanted() (PCPU_GET(astpending) & AST_RESCHED)
|
||||
|
||||
/*
|
||||
* Arrange to handle pending profiling ticks before returning to user mode.
|
||||
@ -105,7 +105,7 @@
|
||||
*/
|
||||
#define signotify(p) aston()
|
||||
#define aston() do { \
|
||||
PCPU_SET(astpending, astpending | AST_PENDING); \
|
||||
PCPU_SET(astpending, PCPU_GET(astpending) | AST_PENDING); \
|
||||
} while (0)
|
||||
#define astoff()
|
||||
|
||||
|
@ -484,11 +484,11 @@ init_secondary(void)
|
||||
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
|
||||
common_tss.tss_esp0 = 0; /* not used until after switch */
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
ltr(gsel_tss);
|
||||
|
||||
pmap_set_opt();
|
||||
@ -2045,7 +2045,7 @@ start_all_aps(u_int boot_addr)
|
||||
}
|
||||
|
||||
/* build our map of 'other' CPUs */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
/* fill in our (BSP) APIC version */
|
||||
cpu_apic_versions[0] = lapic.version;
|
||||
@ -2398,9 +2398,9 @@ ap_init(void)
|
||||
#endif
|
||||
|
||||
/* Build our map of 'other' CPUs. */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
printf("SMP: AP CPU #%d Launched!\n", cpuid);
|
||||
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
|
||||
|
||||
/* set up CPU registers and state */
|
||||
cpu_setregs();
|
||||
@ -2410,8 +2410,8 @@ ap_init(void)
|
||||
|
||||
/* A quick check from sanity claus */
|
||||
apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
|
||||
if (cpuid != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", cpuid);
|
||||
if (PCPU_GET(cpuid) != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
|
||||
printf("SMP: apic_id = %d\n", apic_id);
|
||||
printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
|
||||
panic("cpuid mismatch! boom!!");
|
||||
@ -2445,10 +2445,10 @@ ap_init(void)
|
||||
* Set curproc to our per-cpu idleproc so that mutexes have
|
||||
* something unique to lock with.
|
||||
*/
|
||||
PCPU_SET(curproc,idleproc);
|
||||
PCPU_SET(curproc, PCPU_GET(idleproc));
|
||||
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/* ok, now grab sched_lock and enter the scheduler */
|
||||
enable_intr();
|
||||
@ -2610,7 +2610,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2636,7 +2636,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2685,7 +2685,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2712,7 +2712,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2813,8 +2813,8 @@ forward_roundrobin(void)
|
||||
return;
|
||||
if (!forward_roundrobin_enabled)
|
||||
return;
|
||||
resched_cpus |= other_cpus;
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
resched_cpus |= PCPU_GET(other_cpus);
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
#if 1
|
||||
selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
|
||||
#else
|
||||
|
@ -493,8 +493,8 @@ npxinit(control)
|
||||
npxsave(&dummy);
|
||||
stop_emulating();
|
||||
fldcw(&control);
|
||||
if (curpcb != NULL)
|
||||
fnsave(&curpcb->pcb_savefpu);
|
||||
if (PCPU_GET(curpcb) != NULL)
|
||||
fnsave(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
start_emulating();
|
||||
}
|
||||
|
||||
@ -506,14 +506,14 @@ npxexit(p)
|
||||
struct proc *p;
|
||||
{
|
||||
|
||||
if (p == npxproc)
|
||||
npxsave(&curpcb->pcb_savefpu);
|
||||
if (p == PCPU_GET(npxproc))
|
||||
npxsave(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
#ifdef NPX_DEBUG
|
||||
if (npx_exists) {
|
||||
u_int masked_exceptions;
|
||||
|
||||
masked_exceptions = curpcb->pcb_savefpu.sv_env.en_cw
|
||||
& curpcb->pcb_savefpu.sv_env.en_sw & 0x7f;
|
||||
masked_exceptions = PCPU_GET(curpcb)->pcb_savefpu.sv_env.en_cw
|
||||
&PCPU_GET(curpcb)->pcb_savefpu.sv_env.en_sw & 0x7f;
|
||||
/*
|
||||
* Log exceptions that would have trapped with the old
|
||||
* control word (overflow, divide by 0, and invalid operand).
|
||||
@ -722,19 +722,19 @@ npx_intr(dummy)
|
||||
u_short control;
|
||||
struct intrframe *frame;
|
||||
|
||||
if (npxproc == NULL || !npx_exists) {
|
||||
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
|
||||
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
|
||||
npxproc, curproc, npx_exists);
|
||||
PCPU_GET(npxproc), curproc, npx_exists);
|
||||
panic("npxintr from nowhere");
|
||||
}
|
||||
if (npxproc != curproc) {
|
||||
if (PCPU_GET(npxproc) != curproc) {
|
||||
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
|
||||
npxproc, curproc, npx_exists);
|
||||
PCPU_GET(npxproc), curproc, npx_exists);
|
||||
panic("npxintr from non-current process");
|
||||
}
|
||||
|
||||
outb(0xf0, 0);
|
||||
fnstsw(&curpcb->pcb_savefpu.sv_ex_sw);
|
||||
fnstsw(&PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw);
|
||||
fnstcw(&control);
|
||||
fnclex();
|
||||
|
||||
@ -760,8 +760,8 @@ npx_intr(dummy)
|
||||
* this exception.
|
||||
*/
|
||||
code =
|
||||
fpetable[(curpcb->pcb_savefpu.sv_ex_sw & ~control & 0x3f) |
|
||||
(curpcb->pcb_savefpu.sv_ex_sw & 0x40)];
|
||||
fpetable[(PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw & ~control & 0x3f) |
|
||||
(PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw & 0x40)];
|
||||
trapsignal(curproc, SIGFPE, code);
|
||||
} else {
|
||||
/*
|
||||
@ -794,9 +794,9 @@ npxdna()
|
||||
{
|
||||
if (!npx_exists)
|
||||
return (0);
|
||||
if (npxproc != NULL) {
|
||||
if (PCPU_GET(npxproc) != NULL) {
|
||||
printf("npxdna: npxproc = %p, curproc = %p\n",
|
||||
npxproc, curproc);
|
||||
PCPU_GET(npxproc), curproc);
|
||||
panic("npxdna");
|
||||
}
|
||||
stop_emulating();
|
||||
@ -804,7 +804,7 @@ npxdna()
|
||||
* Record new context early in case frstor causes an IRQ13.
|
||||
*/
|
||||
PCPU_SET(npxproc, CURPROC);
|
||||
curpcb->pcb_savefpu.sv_ex_sw = 0;
|
||||
PCPU_GET(curpcb)->pcb_savefpu.sv_ex_sw = 0;
|
||||
/*
|
||||
* The following frstor may cause an IRQ13 when the state being
|
||||
* restored has a pending error. The error will appear to have been
|
||||
@ -817,7 +817,7 @@ npxdna()
|
||||
* fnsave are broken, so our treatment breaks fnclex if it is the
|
||||
* first FPU instruction after a context switch.
|
||||
*/
|
||||
frstor(&curpcb->pcb_savefpu);
|
||||
frstor(&PCPU_GET(curpcb)->pcb_savefpu);
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ static u_int schedclk2;
|
||||
void
|
||||
interrupt(u_int64_t vector, struct trapframe *framep)
|
||||
{
|
||||
atomic_add_int(&PCPU_GET(intr_nesting_level), 1);
|
||||
atomic_add_int(PCPU_PTR(intr_nesting_level), 1);
|
||||
|
||||
switch (vector) {
|
||||
case 240: /* clock interrupt */
|
||||
@ -104,7 +104,7 @@ interrupt(u_int64_t vector, struct trapframe *framep)
|
||||
panic("unexpected interrupt: vec %ld\n", vector);
|
||||
/* NOTREACHED */
|
||||
}
|
||||
atomic_subtract_int(&PCPU_GET(intr_nesting_level), 1);
|
||||
atomic_subtract_int(PCPU_PTR(intr_nesting_level), 1);
|
||||
}
|
||||
|
||||
|
||||
|
@ -337,7 +337,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -415,7 +415,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -741,7 +741,7 @@ smp_handle_ipi(struct trapframe *frame)
|
||||
|
||||
do {
|
||||
ipis = PCPU_GET(pending_ipis);
|
||||
} while (atomic_cmpset_64(&PCPU_GET(pending_ipis), ipis, 0));
|
||||
} while (atomic_cmpset_64(PCPU_PTR(pending_ipis), ipis, 0));
|
||||
|
||||
CTR1(KTR_SMP, "smp_handle_ipi(), ipis=%lx", ipis);
|
||||
while (ipis) {
|
||||
|
@ -415,7 +415,7 @@ proc0_post(void *dummy __unused)
|
||||
p->p_runtime = 0;
|
||||
}
|
||||
ALLPROC_LOCK(AP_RELEASE);
|
||||
microuptime(&switchtime);
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/*
|
||||
|
@ -160,7 +160,7 @@ hardclock(frame)
|
||||
int need_softclock = 0;
|
||||
|
||||
p = curproc;
|
||||
if (p != idleproc) {
|
||||
if (p != PCPU_GET(idleproc)) {
|
||||
register struct pstats *pstats;
|
||||
|
||||
/*
|
||||
@ -400,7 +400,7 @@ statclock(frame)
|
||||
cp_time[CP_INTR]++;
|
||||
} else {
|
||||
p->p_sticks++;
|
||||
if (p != idleproc)
|
||||
if (p != PCPU_GET(idleproc))
|
||||
cp_time[CP_SYS]++;
|
||||
else
|
||||
cp_time[CP_IDLE]++;
|
||||
|
@ -305,8 +305,8 @@ exit1(p, rv)
|
||||
* directly. Set it now so that the rest of the exit time gets
|
||||
* counted somewhere if possible.
|
||||
*/
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/*
|
||||
* notify interested parties of our demise.
|
||||
|
@ -61,7 +61,7 @@
|
||||
#endif
|
||||
|
||||
#ifdef SMP
|
||||
#define KTR_CPU cpuid
|
||||
#define KTR_CPU PCPU_GET(cpuid)
|
||||
#else
|
||||
#define KTR_CPU 0
|
||||
#endif
|
||||
|
@ -147,7 +147,7 @@ malloc(size, type, flags)
|
||||
|
||||
#if defined(INVARIANTS) && defined(__i386__)
|
||||
if (flags == M_WAITOK)
|
||||
KASSERT(intr_nesting_level == 0,
|
||||
KASSERT(PCPU_GET(intr_nesting_level) == 0,
|
||||
("malloc(M_WAITOK) in interrupt context"));
|
||||
#endif
|
||||
indx = BUCKETINDX(size);
|
||||
|
@ -844,7 +844,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
i = witness_spin_check;
|
||||
i = PCPU_GET(witness_spin_check);
|
||||
if (i != 0 && w->w_level < i) {
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
|
||||
@ -977,7 +977,8 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
PCPU_SET(witness_spin_check, witness_spin_check & ~w->w_level);
|
||||
PCPU_SET(witness_spin_check,
|
||||
PCPU_GET(witness_spin_check) & ~w->w_level);
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
return;
|
||||
}
|
||||
@ -1011,7 +1012,8 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
PCPU_SET(witness_spin_check, witness_spin_check | w->w_level);
|
||||
PCPU_SET(witness_spin_check,
|
||||
PCPU_GET(witness_spin_check) | w->w_level);
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
w->w_file = file;
|
||||
w->w_line = line;
|
||||
|
@ -534,13 +534,14 @@ calcru(p, up, sp, ip)
|
||||
* quantum, which is much greater than the sampling error.
|
||||
*/
|
||||
microuptime(&tv);
|
||||
if (timevalcmp(&tv, &switchtime, <))
|
||||
if (timevalcmp(&tv, PCPU_PTR(switchtime), <))
|
||||
printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
|
||||
switchtime.tv_sec, switchtime.tv_usec,
|
||||
PCPU_GET(switchtime.tv_sec), PCPU_GET(switchtime.tv_usec),
|
||||
tv.tv_sec, tv.tv_usec);
|
||||
else
|
||||
tu += (tv.tv_usec - switchtime.tv_usec) +
|
||||
(tv.tv_sec - switchtime.tv_sec) * (int64_t)1000000;
|
||||
tu += (tv.tv_usec - PCPU_GET(switchtime.tv_usec)) +
|
||||
(tv.tv_sec - PCPU_GET(switchtime.tv_sec)) *
|
||||
(int64_t)1000000;
|
||||
}
|
||||
ptu = p->p_uu + p->p_su + p->p_iu;
|
||||
if (tu < ptu || (int64_t)tu < 0) {
|
||||
|
@ -209,7 +209,7 @@ boot(int howto)
|
||||
|
||||
#ifdef SMP
|
||||
if (smp_active)
|
||||
printf("boot() called on cpu#%d\n", cpuid);
|
||||
printf("boot() called on cpu#%d\n", PCPU_GET(cpuid));
|
||||
#endif
|
||||
/*
|
||||
* Do any callouts that should be done BEFORE syncing the filesystems.
|
||||
@ -557,7 +557,7 @@ panic(const char *fmt, ...)
|
||||
printf("panic: %s\n", buf);
|
||||
#ifdef SMP
|
||||
/* two seperate prints in case of an unmapped page and trap */
|
||||
printf("cpuid = %d; ", cpuid);
|
||||
printf("cpuid = %d; ", PCPU_GET(cpuid));
|
||||
#ifdef APIC_IO
|
||||
printf("lapic.id = %08x\n", lapic.id);
|
||||
#endif
|
||||
|
@ -92,7 +92,7 @@ uiomove(cp, n, uio)
|
||||
|
||||
case UIO_USERSPACE:
|
||||
case UIO_USERISPACE:
|
||||
if (ticks - switchticks >= hogticks)
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
@ -154,7 +154,7 @@ uiomoveco(cp, n, uio, obj)
|
||||
|
||||
case UIO_USERSPACE:
|
||||
case UIO_USERISPACE:
|
||||
if (ticks - switchticks >= hogticks)
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
if (uio->uio_rw == UIO_READ) {
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
@ -236,7 +236,7 @@ uioread(n, uio, obj, nread)
|
||||
|
||||
cnt &= ~PAGE_MASK;
|
||||
|
||||
if (ticks - switchticks >= hogticks)
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
|
||||
uio->uio_offset, cnt,
|
||||
|
@ -232,12 +232,12 @@ chooseproc(void)
|
||||
} else {
|
||||
CTR1(KTR_PROC, "chooseproc: idleproc, schedlock %lx",
|
||||
(long)sched_lock.mtx_lock);
|
||||
return idleproc;
|
||||
return PCPU_GET(idleproc);
|
||||
}
|
||||
p = TAILQ_FIRST(q);
|
||||
#ifdef SMP
|
||||
/* wander down the current run queue for this pri level for a match */
|
||||
id = cpuid;
|
||||
id = PCPU_GET(cpuid);
|
||||
while (p->p_lastcpu != id) {
|
||||
p = TAILQ_NEXT(p, p_procq);
|
||||
if (p == NULL) {
|
||||
|
@ -142,7 +142,7 @@ maybe_resched(chk)
|
||||
* standard process becomes runaway cpu-bound, the system can lockup
|
||||
* due to idle-scheduler processes in wakeup never getting any cpu.
|
||||
*/
|
||||
if (p == idleproc) {
|
||||
if (p == PCPU_GET(idleproc)) {
|
||||
#if 0
|
||||
need_resched();
|
||||
#endif
|
||||
@ -176,7 +176,7 @@ roundrobin(arg)
|
||||
need_resched();
|
||||
forward_roundrobin();
|
||||
#else
|
||||
if (p == idleproc || RTP_PRIO_NEED_RR(p->p_rtprio.type))
|
||||
if (p == PCPU_GET(idleproc) || RTP_PRIO_NEED_RR(p->p_rtprio.type))
|
||||
need_resched();
|
||||
#endif
|
||||
|
||||
@ -925,14 +925,15 @@ mi_switch()
|
||||
* process was running, and add that to its total so far.
|
||||
*/
|
||||
microuptime(&new_switchtime);
|
||||
if (timevalcmp(&new_switchtime, &switchtime, <)) {
|
||||
if (timevalcmp(&new_switchtime, PCPU_PTR(switchtime), <)) {
|
||||
printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
|
||||
switchtime.tv_sec, switchtime.tv_usec,
|
||||
PCPU_GET(switchtime.tv_sec), PCPU_GET(switchtime.tv_usec),
|
||||
new_switchtime.tv_sec, new_switchtime.tv_usec);
|
||||
new_switchtime = switchtime;
|
||||
new_switchtime = PCPU_GET(switchtime);
|
||||
} else {
|
||||
p->p_runtime += (new_switchtime.tv_usec - switchtime.tv_usec) +
|
||||
(new_switchtime.tv_sec - switchtime.tv_sec) * (int64_t)1000000;
|
||||
p->p_runtime += (new_switchtime.tv_usec - PCPU_GET(switchtime.tv_usec)) +
|
||||
(new_switchtime.tv_sec - PCPU_GET(switchtime.tv_sec)) *
|
||||
(int64_t)1000000;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -959,15 +960,15 @@ mi_switch()
|
||||
* Pick a new current process and record its start time.
|
||||
*/
|
||||
cnt.v_swtch++;
|
||||
switchtime = new_switchtime;
|
||||
PCPU_SET(switchtime, new_switchtime);
|
||||
CTR4(KTR_PROC, "mi_switch: old proc %p (pid %d, %s), schedlock %p",
|
||||
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
|
||||
cpu_switch();
|
||||
CTR4(KTR_PROC, "mi_switch: new proc %p (pid %d, %s), schedlock %p",
|
||||
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
|
||||
if (switchtime.tv_sec == 0)
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
if (PCPU_GET(switchtime.tv_sec) == 0)
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
splx(x);
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ uprintf(const char *fmt, ...)
|
||||
struct putchar_arg pca;
|
||||
int retval = 0;
|
||||
|
||||
if (p && p != idleproc && p->p_flag & P_CONTROLT &&
|
||||
if (p && p != PCPU_GET(idleproc) && p->p_flag & P_CONTROLT &&
|
||||
p->p_session->s_ttyvp) {
|
||||
va_start(ap, fmt);
|
||||
pca.tty = p->p_session->s_ttyp;
|
||||
|
@ -484,11 +484,11 @@ init_secondary(void)
|
||||
|
||||
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
|
||||
gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
|
||||
common_tss.tss_esp0 = 0; /* not used until after switch */
|
||||
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
common_tss.tss_ioopt = (sizeof common_tss) << 16;
|
||||
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
|
||||
common_tssd = *tss_gdt;
|
||||
PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
|
||||
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
|
||||
PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
|
||||
PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
|
||||
PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
|
||||
ltr(gsel_tss);
|
||||
|
||||
pmap_set_opt();
|
||||
@ -2045,7 +2045,7 @@ start_all_aps(u_int boot_addr)
|
||||
}
|
||||
|
||||
/* build our map of 'other' CPUs */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
/* fill in our (BSP) APIC version */
|
||||
cpu_apic_versions[0] = lapic.version;
|
||||
@ -2398,9 +2398,9 @@ ap_init(void)
|
||||
#endif
|
||||
|
||||
/* Build our map of 'other' CPUs. */
|
||||
other_cpus = all_cpus & ~(1 << cpuid);
|
||||
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
|
||||
|
||||
printf("SMP: AP CPU #%d Launched!\n", cpuid);
|
||||
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
|
||||
|
||||
/* set up CPU registers and state */
|
||||
cpu_setregs();
|
||||
@ -2410,8 +2410,8 @@ ap_init(void)
|
||||
|
||||
/* A quick check from sanity claus */
|
||||
apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
|
||||
if (cpuid != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", cpuid);
|
||||
if (PCPU_GET(cpuid) != apic_id) {
|
||||
printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
|
||||
printf("SMP: apic_id = %d\n", apic_id);
|
||||
printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
|
||||
panic("cpuid mismatch! boom!!");
|
||||
@ -2445,10 +2445,10 @@ ap_init(void)
|
||||
* Set curproc to our per-cpu idleproc so that mutexes have
|
||||
* something unique to lock with.
|
||||
*/
|
||||
PCPU_SET(curproc,idleproc);
|
||||
PCPU_SET(curproc, PCPU_GET(idleproc));
|
||||
|
||||
microuptime(&switchtime);
|
||||
switchticks = ticks;
|
||||
microuptime(PCPU_PTR(switchtime));
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/* ok, now grab sched_lock and enter the scheduler */
|
||||
enable_intr();
|
||||
@ -2610,7 +2610,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2636,7 +2636,7 @@ forward_statclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2685,7 +2685,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
|
||||
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
checkstate_probed_cpus = 0;
|
||||
if (map != 0)
|
||||
selected_apic_ipi(map,
|
||||
@ -2712,7 +2712,7 @@ forward_hardclock(int pscnt)
|
||||
|
||||
map = 0;
|
||||
for (id = 0; id < mp_ncpus; id++) {
|
||||
if (id == cpuid)
|
||||
if (id == PCPU_GET(cpuid))
|
||||
continue;
|
||||
if (((1 << id) & checkstate_probed_cpus) == 0)
|
||||
continue;
|
||||
@ -2813,8 +2813,8 @@ forward_roundrobin(void)
|
||||
return;
|
||||
if (!forward_roundrobin_enabled)
|
||||
return;
|
||||
resched_cpus |= other_cpus;
|
||||
map = other_cpus & ~stopped_cpus ;
|
||||
resched_cpus |= PCPU_GET(other_cpus);
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
|
||||
#if 1
|
||||
selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
|
||||
#else
|
||||
|
@ -480,7 +480,7 @@ trap(frame)
|
||||
if (in_vm86call)
|
||||
break;
|
||||
|
||||
if (intr_nesting_level != 0)
|
||||
if (PCPU_GET(intr_nesting_level) != 0)
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -493,7 +493,7 @@ trap(frame)
|
||||
* a signal.
|
||||
*/
|
||||
if (frame.tf_eip == (int)cpu_switch_load_gs) {
|
||||
curpcb->pcb_gs = 0;
|
||||
PCPU_GET(curpcb)->pcb_gs = 0;
|
||||
psignal(p, SIGBUS);
|
||||
goto out;
|
||||
}
|
||||
@ -519,13 +519,15 @@ trap(frame)
|
||||
if (frame.tf_eip == (int)doreti_popl_es) {
|
||||
frame.tf_eip = (int)doreti_popl_es_fault;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (frame.tf_eip == (int)doreti_popl_fs) {
|
||||
frame.tf_eip = (int)doreti_popl_fs_fault;
|
||||
goto out;
|
||||
}
|
||||
if (curpcb && curpcb->pcb_onfault) {
|
||||
frame.tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame.tf_eip =
|
||||
(int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
@ -685,8 +687,9 @@ trap_pfault(frame, usermode, eva)
|
||||
|
||||
if (p == NULL ||
|
||||
(!usermode && va < VM_MAXUSER_ADDRESS &&
|
||||
(intr_nesting_level != 0 || curpcb == NULL ||
|
||||
curpcb->pcb_onfault == NULL))) {
|
||||
(PCPU_GET(intr_nesting_level) != 0 ||
|
||||
PCPU_GET(curpcb) == NULL ||
|
||||
PCPU_GET(curpcb)->pcb_onfault == NULL))) {
|
||||
trap_fatal(frame, eva);
|
||||
return (-1);
|
||||
}
|
||||
@ -748,8 +751,10 @@ trap_pfault(frame, usermode, eva)
|
||||
return (0);
|
||||
nogo:
|
||||
if (!usermode) {
|
||||
if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
|
||||
frame->tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(intr_nesting_level) == 0 &&
|
||||
PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
return (0);
|
||||
}
|
||||
trap_fatal(frame, eva);
|
||||
@ -853,8 +858,10 @@ trap_pfault(frame, usermode, eva)
|
||||
return (0);
|
||||
nogo:
|
||||
if (!usermode) {
|
||||
if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
|
||||
frame->tf_eip = (int)curpcb->pcb_onfault;
|
||||
if (PCPU_GET(intr_nesting_level) == 0 &&
|
||||
PCPU_GET(curpcb) != NULL &&
|
||||
PCPU_GET(curpcb)->pcb_onfault != NULL) {
|
||||
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
|
||||
return (0);
|
||||
}
|
||||
trap_fatal(frame, eva);
|
||||
@ -886,7 +893,7 @@ trap_fatal(frame, eva)
|
||||
ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
|
||||
#ifdef SMP
|
||||
/* two seperate prints in case of a trap on an unmapped page */
|
||||
printf("cpuid = %d; ", cpuid);
|
||||
printf("cpuid = %d; ", PCPU_GET(cpuid));
|
||||
printf("lapic.id = %08x\n", lapic.id);
|
||||
#endif
|
||||
if (type == T_PAGEFLT) {
|
||||
@ -964,12 +971,12 @@ void
|
||||
dblfault_handler()
|
||||
{
|
||||
printf("\nFatal double fault:\n");
|
||||
printf("eip = 0x%x\n", common_tss.tss_eip);
|
||||
printf("esp = 0x%x\n", common_tss.tss_esp);
|
||||
printf("ebp = 0x%x\n", common_tss.tss_ebp);
|
||||
printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip));
|
||||
printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp));
|
||||
printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp));
|
||||
#ifdef SMP
|
||||
/* two seperate prints in case of a trap on an unmapped page */
|
||||
printf("cpuid = %d; ", cpuid);
|
||||
printf("cpuid = %d; ", PCPU_GET(cpuid));
|
||||
printf("lapic.id = %08x\n", lapic.id);
|
||||
#endif
|
||||
panic("double fault");
|
||||
|
@ -844,7 +844,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
i = witness_spin_check;
|
||||
i = PCPU_GET(witness_spin_check);
|
||||
if (i != 0 && w->w_level < i) {
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
|
||||
@ -977,7 +977,8 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
PCPU_SET(witness_spin_check, witness_spin_check & ~w->w_level);
|
||||
PCPU_SET(witness_spin_check,
|
||||
PCPU_GET(witness_spin_check) & ~w->w_level);
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
return;
|
||||
}
|
||||
@ -1011,7 +1012,8 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
PCPU_SET(witness_spin_check, witness_spin_check | w->w_level);
|
||||
PCPU_SET(witness_spin_check,
|
||||
PCPU_GET(witness_spin_check) | w->w_level);
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
w->w_file = file;
|
||||
w->w_line = line;
|
||||
|
@ -844,7 +844,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
i = witness_spin_check;
|
||||
i = PCPU_GET(witness_spin_check);
|
||||
if (i != 0 && w->w_level < i) {
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
|
||||
@ -977,7 +977,8 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
PCPU_SET(witness_spin_check, witness_spin_check & ~w->w_level);
|
||||
PCPU_SET(witness_spin_check,
|
||||
PCPU_GET(witness_spin_check) & ~w->w_level);
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
return;
|
||||
}
|
||||
@ -1011,7 +1012,8 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
|
||||
if (m->mtx_recurse != 0)
|
||||
return;
|
||||
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
PCPU_SET(witness_spin_check, witness_spin_check | w->w_level);
|
||||
PCPU_SET(witness_spin_check,
|
||||
PCPU_GET(witness_spin_check) | w->w_level);
|
||||
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
|
||||
w->w_file = file;
|
||||
w->w_line = line;
|
||||
|
@ -510,7 +510,7 @@ bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
|
||||
|
||||
/* if not found in cache, do some I/O */
|
||||
if ((bp->b_flags & B_CACHE) == 0) {
|
||||
if (curproc != idleproc)
|
||||
if (curproc != PCPU_GET(idleproc))
|
||||
curproc->p_stats->p_ru.ru_inblock++;
|
||||
KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
|
||||
bp->b_iocmd = BIO_READ;
|
||||
@ -547,7 +547,7 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
|
||||
|
||||
/* if not found in cache, do some I/O */
|
||||
if ((bp->b_flags & B_CACHE) == 0) {
|
||||
if (curproc != idleproc)
|
||||
if (curproc != PCPU_GET(idleproc))
|
||||
curproc->p_stats->p_ru.ru_inblock++;
|
||||
bp->b_iocmd = BIO_READ;
|
||||
bp->b_flags &= ~B_INVAL;
|
||||
@ -568,7 +568,7 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
|
||||
rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
|
||||
|
||||
if ((rabp->b_flags & B_CACHE) == 0) {
|
||||
if (curproc != idleproc)
|
||||
if (curproc != PCPU_GET(idleproc))
|
||||
curproc->p_stats->p_ru.ru_inblock++;
|
||||
rabp->b_flags |= B_ASYNC;
|
||||
rabp->b_flags &= ~B_INVAL;
|
||||
@ -695,7 +695,7 @@ bwrite(struct buf * bp)
|
||||
|
||||
bp->b_vp->v_numoutput++;
|
||||
vfs_busy_pages(bp, 1);
|
||||
if (curproc != idleproc)
|
||||
if (curproc != PCPU_GET(idleproc))
|
||||
curproc->p_stats->p_ru.ru_oublock++;
|
||||
splx(s);
|
||||
if (oldflags & B_ASYNC)
|
||||
@ -2107,7 +2107,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
|
||||
* XXX remove if 0 sections (clean this up after its proven)
|
||||
*/
|
||||
if (numfreebuffers == 0) {
|
||||
if (curproc == idleproc)
|
||||
if (curproc == PCPU_GET(idleproc))
|
||||
return NULL;
|
||||
needsbuffer |= VFS_BIO_NEED_ANY;
|
||||
}
|
||||
|
@ -318,7 +318,7 @@ BUF_KERNPROC(struct buf *bp)
|
||||
{
|
||||
struct proc *p = curproc;
|
||||
|
||||
if (p != idleproc && bp->b_lock.lk_lockholder == p->p_pid)
|
||||
if (p != PCPU_GET(idleproc) && bp->b_lock.lk_lockholder == p->p_pid)
|
||||
p->p_locks--;
|
||||
bp->b_lock.lk_lockholder = LK_KERNPROC;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user