mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-19 10:53:58 +00:00
Initial support for SMP on PowerPC MPC85xx.
Tested with Freescale dual-core MPC8572DS development system. Obtained from: Freescale, Semihalf
This commit is contained in:
parent
2a476ed9ed
commit
28bb01e5ba
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=192532
@ -91,6 +91,7 @@ powerpc/booke/copyinout.c optional e500
|
||||
powerpc/booke/interrupt.c optional e500
|
||||
powerpc/booke/locore.S optional e500 no-obj
|
||||
powerpc/booke/machdep.c optional e500
|
||||
powerpc/booke/mp_cpudep.c optional e500 smp
|
||||
powerpc/booke/platform_bare.c optional mpc85xx
|
||||
powerpc/booke/pmap.c optional e500
|
||||
powerpc/booke/swtch.S optional e500
|
||||
|
@ -63,6 +63,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/ktr.h>
|
||||
#include <sys/pcpu.h>
|
||||
#include <sys/timetc.h>
|
||||
#include <sys/interrupt.h>
|
||||
|
||||
@ -97,7 +99,6 @@ static struct timecounter decr_timecounter = {
|
||||
void
|
||||
decr_intr(struct trapframe *frame)
|
||||
{
|
||||
u_long msr;
|
||||
|
||||
/*
|
||||
* Check whether we are initialized.
|
||||
@ -111,13 +112,17 @@ decr_intr(struct trapframe *frame)
|
||||
*/
|
||||
mtspr(SPR_TSR, TSR_DIS);
|
||||
|
||||
/*
|
||||
* Reenable interrupts
|
||||
*/
|
||||
msr = mfmsr();
|
||||
mtmsr(msr | PSL_EE);
|
||||
CTR1(KTR_INTR, "%s: DEC interrupt", __func__);
|
||||
|
||||
if (PCPU_GET(cpuid) == 0)
|
||||
hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
|
||||
else
|
||||
hardclock_cpu(TRAPF_USERMODE(frame));
|
||||
|
||||
statclock(TRAPF_USERMODE(frame));
|
||||
if (profprocs != 0)
|
||||
profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
|
||||
|
||||
hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
|
||||
}
|
||||
|
||||
void
|
||||
@ -125,10 +130,12 @@ cpu_initclocks(void)
|
||||
{
|
||||
|
||||
decr_tc_init();
|
||||
stathz = hz;
|
||||
profhz = hz;
|
||||
}
|
||||
|
||||
void
|
||||
decr_init (void)
|
||||
decr_init(void)
|
||||
{
|
||||
struct cpuref cpu;
|
||||
unsigned int msr;
|
||||
@ -148,9 +155,24 @@ decr_init (void)
|
||||
mtspr(SPR_DECAR, ticks_per_intr);
|
||||
mtspr(SPR_TCR, mfspr(SPR_TCR) | TCR_DIE | TCR_ARE);
|
||||
|
||||
set_cputicker(mftb, ticks_per_sec, 0);
|
||||
|
||||
mtmsr(msr);
|
||||
}
|
||||
|
||||
#ifdef SMP
|
||||
void
|
||||
decr_ap_init(void)
|
||||
{
|
||||
|
||||
/* Set auto-reload value and enable DEC interrupts in TCR */
|
||||
mtspr(SPR_DECAR, ticks_per_intr);
|
||||
mtspr(SPR_TCR, mfspr(SPR_TCR) | TCR_DIE | TCR_ARE);
|
||||
|
||||
CTR2(KTR_INTR, "%s: set TCR=%p", __func__, mfspr(SPR_TCR));
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
decr_tc_init(void)
|
||||
{
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*-
|
||||
* Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
|
||||
* Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
|
||||
* Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
|
||||
* All rights reserved.
|
||||
*
|
||||
@ -28,6 +28,8 @@
|
||||
|
||||
#include "assym.s"
|
||||
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <machine/asm.h>
|
||||
#include <machine/hid.h>
|
||||
#include <machine/param.h>
|
||||
@ -162,6 +164,9 @@ __start:
|
||||
|
||||
lis %r3, KERNBASE@h
|
||||
ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
|
||||
#ifdef SMP
|
||||
ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
|
||||
#endif
|
||||
mtspr SPR_MAS2, %r3
|
||||
isync
|
||||
|
||||
@ -201,6 +206,17 @@ __start:
|
||||
lis %r3, kernload@ha
|
||||
addi %r3, %r3, kernload@l
|
||||
stw %r28, 0(%r3)
|
||||
#ifdef SMP
|
||||
/*
|
||||
* APs need a separate copy of kernload info within the __boot_page
|
||||
* area so they can access this value very early, before their TLBs
|
||||
* are fully set up and the kernload global location is available.
|
||||
*/
|
||||
lis %r3, kernload_ap@ha
|
||||
addi %r3, %r3, kernload_ap@l
|
||||
stw %r28, 0(%r3)
|
||||
msync
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Setup a temporary stack
|
||||
@ -236,6 +252,168 @@ __start:
|
||||
/* NOT REACHED */
|
||||
5: b 5b
|
||||
|
||||
|
||||
#ifdef SMP
|
||||
/************************************************************************/
|
||||
/* AP Boot page */
|
||||
/************************************************************************/
|
||||
.text
|
||||
.globl __boot_page
|
||||
.align 12
|
||||
__boot_page:
|
||||
bl 1f
|
||||
|
||||
kernload_ap:
|
||||
.long 0
|
||||
|
||||
/*
|
||||
* Initial configuration
|
||||
*/
|
||||
1:
|
||||
/* Set HIDs */
|
||||
lis %r3, HID0_E500_DEFAULT_SET@h
|
||||
ori %r3, %r3, HID0_E500_DEFAULT_SET@l
|
||||
mtspr SPR_HID0, %r3
|
||||
isync
|
||||
lis %r3, HID1_E500_DEFAULT_SET@h
|
||||
ori %r3, %r3, HID1_E500_DEFAULT_SET@l
|
||||
mtspr SPR_HID1, %r3
|
||||
isync
|
||||
|
||||
/* Enable branch prediction */
|
||||
li %r3, BUCSR_BPEN
|
||||
mtspr SPR_BUCSR, %r3
|
||||
isync
|
||||
|
||||
/* Invalidate all entries in TLB0 */
|
||||
li %r3, 0
|
||||
bl tlb_inval_all
|
||||
|
||||
/*
|
||||
* Find TLB1 entry which is translating us now
|
||||
*/
|
||||
bl 2f
|
||||
2: mflr %r3
|
||||
bl tlb1_find_current /* the entry number found is in r30 */
|
||||
|
||||
bl tlb1_inval_all_but_current
|
||||
/*
|
||||
* Create temporary translation in AS=1 and switch to it
|
||||
*/
|
||||
bl tlb1_temp_mapping_as1
|
||||
|
||||
mfmsr %r3
|
||||
ori %r3, %r3, (PSL_IS | PSL_DS)
|
||||
bl 3f
|
||||
3: mflr %r4
|
||||
addi %r4, %r4, 20
|
||||
mtspr SPR_SRR0, %r4
|
||||
mtspr SPR_SRR1, %r3
|
||||
rfi /* Switch context */
|
||||
|
||||
/*
|
||||
* Invalidate initial entry
|
||||
*/
|
||||
mr %r3, %r30
|
||||
bl tlb1_inval_entry
|
||||
|
||||
/*
|
||||
* Setup final mapping in TLB1[1] and switch to it
|
||||
*/
|
||||
/* Final kernel mapping, map in 16 MB of RAM */
|
||||
lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
|
||||
li %r4, 1 /* Entry 1 */
|
||||
rlwimi %r3, %r4, 16, 4, 15
|
||||
mtspr SPR_MAS0, %r3
|
||||
isync
|
||||
|
||||
li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
|
||||
oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
|
||||
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
|
||||
isync
|
||||
|
||||
lis %r3, KERNBASE@h
|
||||
ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
|
||||
#if SMP
|
||||
ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
|
||||
#endif
|
||||
mtspr SPR_MAS2, %r3
|
||||
isync
|
||||
|
||||
/* Retrieve kernel load [physical] address from kernload_ap */
|
||||
bl 4f
|
||||
4: mflr %r3
|
||||
rlwinm %r3, %r3, 0, 0, 19
|
||||
lis %r4, kernload_ap@h
|
||||
ori %r4, %r4, kernload_ap@l
|
||||
lis %r5, __boot_page@h
|
||||
ori %r5, %r5, __boot_page@l
|
||||
sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */
|
||||
lwzx %r3, %r4, %r3
|
||||
|
||||
/* Set RPN and protection */
|
||||
ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
|
||||
mtspr SPR_MAS3, %r3
|
||||
isync
|
||||
tlbwe
|
||||
isync
|
||||
msync
|
||||
|
||||
/* Switch to the final mapping */
|
||||
bl 5f
|
||||
5: mflr %r3
|
||||
rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
|
||||
add %r3, %r3, %r5 /* Make this virtual address */
|
||||
addi %r3, %r3, 32
|
||||
li %r4, 0 /* Note AS=0 */
|
||||
mtspr SPR_SRR0, %r3
|
||||
mtspr SPR_SRR1, %r4
|
||||
rfi
|
||||
|
||||
/*
|
||||
* At this point we're running at virtual addresses KERNBASE and beyond so
|
||||
* it's allowed to directly access all locations the kernel was linked
|
||||
* against.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Invalidate temp mapping
|
||||
*/
|
||||
mr %r3, %r29
|
||||
bl tlb1_inval_entry
|
||||
|
||||
/*
|
||||
* Setup a temporary stack
|
||||
*/
|
||||
lis %r1, tmpstack@ha
|
||||
addi %r1, %r1, tmpstack@l
|
||||
addi %r1, %r1, (TMPSTACKSZ - 8)
|
||||
|
||||
/*
|
||||
* Initialise exception vector offsets
|
||||
*/
|
||||
bl ivor_setup
|
||||
|
||||
/*
|
||||
* Assign our pcpu instance
|
||||
*/
|
||||
lis %r3, ap_pcpu@h
|
||||
ori %r3, %r3, ap_pcpu@l
|
||||
lwz %r3, 0(%r3)
|
||||
mtsprg0 %r3
|
||||
|
||||
bl pmap_bootstrap_ap
|
||||
|
||||
bl cpudep_ap_bootstrap
|
||||
/* Switch to the idle thread's kstack */
|
||||
mr %r1, %r3
|
||||
|
||||
bl machdep_ap_bootstrap
|
||||
|
||||
/* NOT REACHED */
|
||||
6: b 6b
|
||||
#endif /* SMP */
|
||||
|
||||
/*
|
||||
* Invalidate all entries in the given TLB.
|
||||
*
|
||||
@ -369,6 +547,18 @@ tlb1_inval_all_but_current:
|
||||
bne 1b
|
||||
blr
|
||||
|
||||
#ifdef SMP
|
||||
__boot_page_padding:
|
||||
/*
|
||||
* Boot page needs to be exactly 4K, with the last word of this page
|
||||
* acting as the reset vector, so we need to stuff the remainder.
|
||||
* Upon release from holdoff CPU fetches the last word of the boot
|
||||
* page.
|
||||
*/
|
||||
.space 4092 - (__boot_page_padding - __boot_page)
|
||||
b __boot_page
|
||||
#endif /* SMP */
|
||||
|
||||
/************************************************************************/
|
||||
/* locore subroutines */
|
||||
/************************************************************************/
|
||||
|
@ -378,6 +378,9 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp)
|
||||
/* Initialize TLB1 handling */
|
||||
tlb1_init(bootinfo->bi_bar_base);
|
||||
|
||||
/* Reset Time Base */
|
||||
mttb(0);
|
||||
|
||||
/* Init params/tunables that can be overridden by the loader. */
|
||||
init_param1();
|
||||
|
||||
@ -408,6 +411,11 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp)
|
||||
debugf(" MSR = 0x%08x\n", mfmsr());
|
||||
debugf(" HID0 = 0x%08x\n", mfspr(SPR_HID0));
|
||||
debugf(" HID1 = 0x%08x\n", mfspr(SPR_HID1));
|
||||
debugf(" BUCSR = 0x%08x\n", mfspr(SPR_BUCSR));
|
||||
|
||||
__asm __volatile("msync; isync");
|
||||
csr = ccsr_read4(OCP85XX_L2CTL);
|
||||
debugf(" L2CTL = 0x%08x\n", csr);
|
||||
|
||||
print_bootinfo();
|
||||
print_kernel_section_addr();
|
||||
@ -479,12 +487,25 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp)
|
||||
return (((uintptr_t)thread0.td_pcb - 16) & ~15);
|
||||
}
|
||||
|
||||
#define RES_GRANULE 32
|
||||
extern uint32_t tlb0_miss_locks[];
|
||||
|
||||
/* Initialise a struct pcpu. */
|
||||
void
|
||||
cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
|
||||
{
|
||||
|
||||
pcpu->pc_tid_next = TID_MIN;
|
||||
|
||||
#ifdef SMP
|
||||
uint32_t *ptr;
|
||||
int words_per_gran = RES_GRANULE / sizeof(uint32_t);
|
||||
|
||||
ptr = &tlb0_miss_locks[cpuid * words_per_gran];
|
||||
pcpu->pc_booke_tlb_lock = ptr;
|
||||
*ptr = MTX_UNOWNED;
|
||||
*(ptr + 1) = 0; /* recurse counter */
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Set set up registers on exec. */
|
||||
|
80
sys/powerpc/booke/mp_cpudep.c
Normal file
80
sys/powerpc/booke/mp_cpudep.c
Normal file
@ -0,0 +1,80 @@
|
||||
/*-
|
||||
* Copyright (c) 2008-2009 Semihalf, Rafal Jaworowski
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/pcpu.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
#include <machine/pcb.h>
|
||||
#include <machine/psl.h>
|
||||
#include <machine/smp.h>
|
||||
#include <machine/spr.h>
|
||||
|
||||
extern void dcache_enable(void);
|
||||
extern void dcache_inval(void);
|
||||
extern void icache_enable(void);
|
||||
extern void icache_inval(void);
|
||||
|
||||
volatile void *ap_pcpu;
|
||||
|
||||
uint32_t
|
||||
cpudep_ap_bootstrap()
|
||||
{
|
||||
uint32_t msr, sp, csr;
|
||||
|
||||
/* Enable L1 caches */
|
||||
csr = mfspr(SPR_L1CSR0);
|
||||
if ((csr & L1CSR0_DCE) == 0) {
|
||||
dcache_inval();
|
||||
dcache_enable();
|
||||
}
|
||||
|
||||
csr = mfspr(SPR_L1CSR1);
|
||||
if ((csr & L1CSR1_ICE) == 0) {
|
||||
icache_inval();
|
||||
icache_enable();
|
||||
}
|
||||
|
||||
/* Set MSR */
|
||||
msr = PSL_ME;
|
||||
mtmsr(msr);
|
||||
|
||||
/* Assign pcpu fields, return ptr to this AP's idle thread kstack */
|
||||
pcpup->pc_curthread = pcpup->pc_idlethread;
|
||||
pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
|
||||
sp = pcpup->pc_curpcb->pcb_sp;
|
||||
|
||||
/* XXX shouldn't the pcb_sp be checked/forced for alignment here?? */
|
||||
|
||||
return (sp);
|
||||
}
|
@ -50,6 +50,12 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include "platform_if.h"
|
||||
|
||||
#ifdef SMP
|
||||
extern void *ap_pcpu;
|
||||
extern uint8_t __boot_page[]; /* Boot page body */
|
||||
extern uint32_t kernload; /* Kernel physical load address */
|
||||
#endif
|
||||
|
||||
static int cpu;
|
||||
|
||||
static int bare_probe(platform_t);
|
||||
@ -179,7 +185,40 @@ bare_smp_get_bsp(platform_t plat, struct cpuref *cpuref)
|
||||
static int
|
||||
bare_smp_start_cpu(platform_t plat, struct pcpu *pc)
|
||||
{
|
||||
#ifdef SMP
|
||||
uint32_t bptr, eebpcr;
|
||||
int timeout;
|
||||
|
||||
eebpcr = ccsr_read4(OCP85XX_EEBPCR);
|
||||
if ((eebpcr & (pc->pc_cpumask << 24)) != 0) {
|
||||
printf("%s: CPU=%d already out of hold-off state!\n",
|
||||
__func__, pc->pc_cpuid);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
ap_pcpu = pc;
|
||||
__asm __volatile("msync; isync");
|
||||
|
||||
/*
|
||||
* Set BPTR to the physical address of the boot page
|
||||
*/
|
||||
bptr = ((uint32_t)__boot_page - KERNBASE) + kernload;
|
||||
ccsr_write4(OCP85XX_BPTR, (bptr >> 12) | 0x80000000);
|
||||
|
||||
/*
|
||||
* Release AP from hold-off state
|
||||
*/
|
||||
eebpcr |= (pc->pc_cpumask << 24);
|
||||
ccsr_write4(OCP85XX_EEBPCR, eebpcr);
|
||||
__asm __volatile("isync; msync");
|
||||
|
||||
timeout = 500;
|
||||
while (!pc->pc_awake && timeout--)
|
||||
DELAY(1000); /* wait 1ms */
|
||||
|
||||
return ((pc->pc_awake) ? 0 : EBUSY);
|
||||
#else
|
||||
/* No SMP support */
|
||||
return (ENXIO);
|
||||
#endif
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*-
|
||||
* Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
|
||||
* Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
|
||||
* Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
|
||||
* All rights reserved.
|
||||
*
|
||||
@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/msgbuf.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/vmmeter.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
@ -266,6 +267,8 @@ static vm_offset_t ptbl_buf_pool_vabase;
|
||||
/* Pointer to ptbl_buf structures. */
|
||||
static struct ptbl_buf *ptbl_bufs;
|
||||
|
||||
void pmap_bootstrap_ap(volatile uint32_t *);
|
||||
|
||||
/*
|
||||
* Kernel MMU interface
|
||||
*/
|
||||
@ -387,6 +390,54 @@ static mmu_def_t booke_mmu = {
|
||||
};
|
||||
MMU_DEF(booke_mmu);
|
||||
|
||||
static inline void
|
||||
tlb_miss_lock(void)
|
||||
{
|
||||
#ifdef SMP
|
||||
struct pcpu *pc;
|
||||
|
||||
if (!smp_started)
|
||||
return;
|
||||
|
||||
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
|
||||
if (pc != pcpup) {
|
||||
|
||||
CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
|
||||
"tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
|
||||
|
||||
KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
|
||||
("tlb_miss_lock: tried to lock self"));
|
||||
|
||||
tlb_lock(pc->pc_booke_tlb_lock);
|
||||
|
||||
CTR1(KTR_PMAP, "%s: locked", __func__);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
tlb_miss_unlock(void)
|
||||
{
|
||||
#ifdef SMP
|
||||
struct pcpu *pc;
|
||||
|
||||
if (!smp_started)
|
||||
return;
|
||||
|
||||
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
|
||||
if (pc != pcpup) {
|
||||
CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
|
||||
__func__, pc->pc_cpuid);
|
||||
|
||||
tlb_unlock(pc->pc_booke_tlb_lock);
|
||||
|
||||
CTR1(KTR_PMAP, "%s: unlocked", __func__);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Return number of entries in TLB0. */
|
||||
static __inline void
|
||||
tlb0_get_tlbconf(void)
|
||||
@ -552,9 +603,11 @@ ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
* don't attempt to look up the page tables we are releasing.
|
||||
*/
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
pmap->pm_pdir[pdir_idx] = NULL;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
|
||||
for (i = 0; i < PTBL_PAGES; i++) {
|
||||
@ -778,11 +831,13 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
|
||||
}
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(va);
|
||||
pte->flags = 0;
|
||||
pte->rpn = 0;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
|
||||
pmap->pm_stats.resident_count--;
|
||||
@ -849,6 +904,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
|
||||
pmap->pm_stats.resident_count++;
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(va);
|
||||
if (pmap->pm_pdir[pdir_idx] == NULL) {
|
||||
@ -862,6 +918,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
|
||||
pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
|
||||
pte->flags |= (PTE_VALID | flags);
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
}
|
||||
|
||||
@ -1189,6 +1246,27 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
debugf("mmu_booke_bootstrap: exit\n");
|
||||
}
|
||||
|
||||
void
|
||||
pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Finish TLB1 configuration: the BSP already set up its TLB1 and we
|
||||
* have the snapshot of its contents in the s/w tlb1[] table, so use
|
||||
* these values directly to (re)program AP's TLB1 hardware.
|
||||
*/
|
||||
for (i = 0; i < tlb1_idx; i ++) {
|
||||
/* Skip invalid entries */
|
||||
if (!(tlb1[i].mas1 & MAS1_VALID))
|
||||
continue;
|
||||
|
||||
tlb1_write_entry(i);
|
||||
}
|
||||
|
||||
set_mas4_defaults();
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the physical page address for the given pmap/virtual address.
|
||||
*/
|
||||
@ -1303,6 +1381,7 @@ mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
|
||||
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
if (PTE_ISVALID(pte)) {
|
||||
|
||||
@ -1324,6 +1403,7 @@ mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
|
||||
__syncicache((void *)va, PAGE_SIZE);
|
||||
}
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
}
|
||||
|
||||
@ -1353,12 +1433,14 @@ mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
|
||||
}
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
/* Invalidate entry in TLB0, update PTE. */
|
||||
tlb0_flush_entry(va);
|
||||
pte->flags = 0;
|
||||
pte->rpn = 0;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
}
|
||||
|
||||
@ -1527,10 +1609,12 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
* update the PTE.
|
||||
*/
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(va);
|
||||
pte->flags = flags;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
|
||||
} else {
|
||||
@ -1821,6 +1905,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
/* Handle modified pages. */
|
||||
if (PTE_ISMODIFIED(pte))
|
||||
@ -1834,6 +1919,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
|
||||
PTE_REFERENCED);
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
}
|
||||
}
|
||||
@ -1863,6 +1949,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
/* Handle modified pages. */
|
||||
if (PTE_ISMODIFIED(pte))
|
||||
@ -1876,6 +1963,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
|
||||
PTE_REFERENCED);
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
}
|
||||
}
|
||||
@ -2085,6 +2173,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
goto make_sure_to_unlock;
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
|
||||
tlb0_flush_entry(pv->pv_va);
|
||||
@ -2092,6 +2181,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
PTE_REFERENCED);
|
||||
}
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
}
|
||||
make_sure_to_unlock:
|
||||
@ -2129,10 +2219,12 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
|
||||
if (PTE_ISREFERENCED(pte)) {
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(pv->pv_va);
|
||||
pte->flags &= ~PTE_REFERENCED;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
|
||||
if (++count > 4) {
|
||||
@ -2168,10 +2260,12 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
|
||||
if (PTE_ISREFERENCED(pte)) {
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(pv->pv_va);
|
||||
pte->flags &= ~PTE_REFERENCED;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
}
|
||||
}
|
||||
@ -2884,7 +2978,9 @@ set_mas4_defaults(void)
|
||||
/* Defaults: TLB0, PID0, TSIZED=4K */
|
||||
mas4 = MAS4_TLBSELD0;
|
||||
mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
|
||||
|
||||
#ifdef SMP
|
||||
mas4 |= MAS4_MD;
|
||||
#endif
|
||||
mtspr(SPR_MAS4, mas4);
|
||||
__asm __volatile("isync");
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*-
|
||||
* Copyright (C) 2006-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
|
||||
* Copyright (C) 2006-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
|
||||
* Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
|
||||
* Copyright (C) 2006 Juniper Networks, Inc.
|
||||
* All rights reserved.
|
||||
@ -75,12 +75,17 @@
|
||||
* SPRG1 - all interrupts except TLB miss, critical, machine check
|
||||
* SPRG2 - critical
|
||||
* SPRG3 - machine check
|
||||
* SPRG4-6 - scratch
|
||||
*
|
||||
*/
|
||||
|
||||
/* Get the per-CPU data structure */
|
||||
#define GET_CPUINFO(r) mfsprg0 r
|
||||
|
||||
#define RES_GRANULE 32
|
||||
#define RES_LOCK 0 /* offset to the 'lock' word */
|
||||
#define RES_RECURSE 4 /* offset to the 'recurse' word */
|
||||
|
||||
/*
|
||||
* Standard interrupt prolog
|
||||
*
|
||||
@ -265,7 +270,7 @@
|
||||
/* calculate TLB nesting level and TLBSAVE instance address */ \
|
||||
GET_CPUINFO(%r1); /* Per-cpu structure */ \
|
||||
lwz %r28, PC_BOOKE_TLB_LEVEL(%r1); \
|
||||
rlwinm %r29, %r28, 6, 24, 25; /* 4 x TLBSAVE_LEN */ \
|
||||
rlwinm %r29, %r28, 6, 23, 25; /* 4 x TLBSAVE_LEN */ \
|
||||
addi %r28, %r28, 1; \
|
||||
stw %r28, PC_BOOKE_TLB_LEVEL(%r1); \
|
||||
addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \
|
||||
@ -300,7 +305,7 @@
|
||||
lwz %r28, PC_BOOKE_TLB_LEVEL(%r1); \
|
||||
subi %r28, %r28, 1; \
|
||||
stw %r28, PC_BOOKE_TLB_LEVEL(%r1); \
|
||||
rlwinm %r29, %r28, 6, 24, 25; /* 4 x TLBSAVE_LEN */ \
|
||||
rlwinm %r29, %r28, 6, 23, 25; /* 4 x TLBSAVE_LEN */ \
|
||||
addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \
|
||||
add %r1, %r1, %r29; \
|
||||
\
|
||||
@ -318,6 +323,55 @@
|
||||
lmw %r20, (TLBSAVE_BOOKE_R20)(%r1); \
|
||||
mfsprg4 %r1
|
||||
|
||||
#ifdef SMP
|
||||
#define TLB_LOCK \
|
||||
GET_CPUINFO(%r20); \
|
||||
lwz %r21, PC_CURTHREAD(%r20); \
|
||||
lwz %r22, PC_BOOKE_TLB_LOCK(%r20); \
|
||||
\
|
||||
1: lwarx %r23, 0, %r22; \
|
||||
cmpwi %r23, MTX_UNOWNED; \
|
||||
beq 2f; \
|
||||
\
|
||||
/* check if this is recursion */ \
|
||||
cmplw cr0, %r21, %r23; \
|
||||
bne- 1b; \
|
||||
\
|
||||
2: /* try to acquire lock */ \
|
||||
stwcx. %r21, 0, %r22; \
|
||||
bne- 1b; \
|
||||
\
|
||||
/* got it, update recursion counter */ \
|
||||
lwz %r21, RES_RECURSE(%r22); \
|
||||
addi %r21, %r21, 1; \
|
||||
stw %r21, RES_RECURSE(%r22); \
|
||||
isync; \
|
||||
msync
|
||||
|
||||
#define TLB_UNLOCK \
|
||||
GET_CPUINFO(%r20); \
|
||||
lwz %r21, PC_CURTHREAD(%r20); \
|
||||
lwz %r22, PC_BOOKE_TLB_LOCK(%r20); \
|
||||
\
|
||||
/* update recursion counter */ \
|
||||
lwz %r23, RES_RECURSE(%r22); \
|
||||
subi %r23, %r23, 1; \
|
||||
stw %r23, RES_RECURSE(%r22); \
|
||||
\
|
||||
cmpwi %r23, 0; \
|
||||
bne 1f; \
|
||||
isync; \
|
||||
msync; \
|
||||
\
|
||||
/* release the lock */ \
|
||||
li %r23, MTX_UNOWNED; \
|
||||
stw %r23, 0(%r22); \
|
||||
1: isync; \
|
||||
msync
|
||||
#else
|
||||
#define TLB_LOCK
|
||||
#define TLB_UNLOCK
|
||||
#endif /* SMP */
|
||||
|
||||
#define INTERRUPT(label) \
|
||||
.globl label; \
|
||||
@ -461,6 +515,7 @@ INTERRUPT(int_watchdog)
|
||||
****************************************************************************/
|
||||
INTERRUPT(int_data_tlb_error)
|
||||
TLB_PROLOG
|
||||
TLB_LOCK
|
||||
|
||||
mfdear %r31
|
||||
|
||||
@ -503,6 +558,7 @@ tlb_miss_handle:
|
||||
bl tlb_fill_entry
|
||||
|
||||
tlb_miss_return:
|
||||
TLB_UNLOCK
|
||||
TLB_RESTORE
|
||||
rfi
|
||||
|
||||
@ -648,6 +704,7 @@ tlb_fill_entry:
|
||||
****************************************************************************/
|
||||
INTERRUPT(int_inst_tlb_error)
|
||||
TLB_PROLOG
|
||||
TLB_LOCK
|
||||
|
||||
mfsrr0 %r31 /* faulting address */
|
||||
|
||||
@ -796,3 +853,36 @@ dbleave:
|
||||
FRAME_LEAVE(SPR_SRR0, SPR_SRR1)
|
||||
rfi
|
||||
#endif /* KDB */
|
||||
|
||||
#ifdef SMP
|
||||
ENTRY(tlb_lock)
|
||||
GET_CPUINFO(%r5)
|
||||
lwz %r5, PC_CURTHREAD(%r5)
|
||||
1: lwarx %r4, 0, %r3
|
||||
cmpwi %r4, MTX_UNOWNED
|
||||
bne 1b
|
||||
stwcx. %r5, 0, %r3
|
||||
bne- 1b
|
||||
isync
|
||||
msync
|
||||
blr
|
||||
|
||||
ENTRY(tlb_unlock)
|
||||
isync
|
||||
msync
|
||||
li %r4, MTX_UNOWNED
|
||||
stw %r4, 0(%r3)
|
||||
isync
|
||||
msync
|
||||
blr
|
||||
/*
|
||||
* TLB miss spin locks. For each CPU we have a reservation granule (32 bytes);
|
||||
* only a single word from this granule will actually be used as a spin lock
|
||||
* for mutual exclusion between TLB miss handler and pmap layer that
|
||||
* manipulates page table contents.
|
||||
*/
|
||||
.data
|
||||
.align 5
|
||||
GLOBAL(tlb0_miss_locks)
|
||||
.space RES_GRANULE * MAXCPU
|
||||
#endif
|
||||
|
@ -180,7 +180,7 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
|
||||
p1 = td1->td_proc;
|
||||
|
||||
pcb = (struct pcb *)((td2->td_kstack +
|
||||
td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fU);
|
||||
td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x3fU);
|
||||
td2->td_pcb = pcb;
|
||||
|
||||
/* Copy the pcb */
|
||||
@ -403,7 +403,7 @@ cpu_thread_alloc(struct thread *td)
|
||||
struct pcb *pcb;
|
||||
|
||||
pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
|
||||
sizeof(struct pcb)) & ~0x2fU);
|
||||
sizeof(struct pcb)) & ~0x3fU);
|
||||
td->td_pcb = pcb;
|
||||
td->td_frame = (struct trapframe *)pcb - 1;
|
||||
}
|
||||
@ -469,7 +469,8 @@ cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
|
||||
|
||||
tf = td->td_frame;
|
||||
/* align stack and alloc space for frame ptr and saved LR */
|
||||
sp = ((uint32_t)stack->ss_sp + stack->ss_size - 2 * sizeof(u_int32_t)) & ~0x1f;
|
||||
sp = ((uint32_t)stack->ss_sp + stack->ss_size -
|
||||
2 * sizeof(u_int32_t)) & ~0x3f;
|
||||
bzero(tf, sizeof(struct trapframe));
|
||||
|
||||
tf->fixreg[1] = (register_t)sp;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#ifndef _MACHINE_MUTEX_H_
|
||||
#define _MACHINE_MUTEX_H_
|
||||
|
||||
#if 0
|
||||
#ifdef LOCORE
|
||||
|
||||
/*
|
||||
@ -62,4 +63,5 @@
|
||||
|
||||
#endif /* !LOCORE */
|
||||
|
||||
#endif
|
||||
#endif /* __MACHINE_MUTEX_H */
|
||||
|
@ -31,6 +31,7 @@
|
||||
#define _MACHINE_PCPU_H_
|
||||
|
||||
#include <machine/cpufunc.h>
|
||||
#include <machine/tlb.h>
|
||||
|
||||
struct pmap;
|
||||
#define CPUSAVE_LEN 8
|
||||
@ -61,6 +62,7 @@ struct pmap;
|
||||
register_t pc_booke_mchksave[CPUSAVE_LEN]; \
|
||||
register_t pc_booke_tlbsave[BOOKE_TLBSAVE_LEN]; \
|
||||
register_t pc_booke_tlb_level; \
|
||||
uint32_t *pc_booke_tlb_lock; \
|
||||
int pc_tid_next;
|
||||
|
||||
/* Definitions for register offsets within the exception tmp save areas */
|
||||
|
@ -667,6 +667,9 @@
|
||||
#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */
|
||||
#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
|
||||
|
||||
#define SPR_BUCSR 0x3F5 /* ..8 Branch Unit Control and Status Register */
|
||||
#define BUCSR_BPEN 0x00000001 /* Branch Prediction Enable */
|
||||
|
||||
#endif /* #elif defined(E500) */
|
||||
|
||||
#endif /* !_POWERPC_SPR_H_ */
|
||||
|
@ -31,7 +31,18 @@
|
||||
#define _MACHINE_OCP85XX_H_
|
||||
|
||||
/*
|
||||
* Local access registers.
|
||||
* Configuration control and status registers
|
||||
*/
|
||||
#define OCP85XX_CCSRBAR (CCSRBAR_VA + 0x0)
|
||||
#define OCP85XX_BPTR (CCSRBAR_VA + 0x20)
|
||||
|
||||
/*
|
||||
* E500 Coherency Module registers
|
||||
*/
|
||||
#define OCP85XX_EEBPCR (CCSRBAR_VA + 0x1010)
|
||||
|
||||
/*
|
||||
* Local access registers
|
||||
*/
|
||||
#define OCP85XX_LAWBAR(n) (CCSRBAR_VA + 0xc08 + 0x20 * (n))
|
||||
#define OCP85XX_LAWSR(n) (CCSRBAR_VA + 0xc10 + 0x20 * (n))
|
||||
@ -46,7 +57,12 @@
|
||||
#define OCP85XX_TGTIF_RAM2 22
|
||||
|
||||
/*
|
||||
* Power-On Reset configuration.
|
||||
* L2 cache registers
|
||||
*/
|
||||
#define OCP85XX_L2CTL (CCSRBAR_VA + 0x20000)
|
||||
|
||||
/*
|
||||
* Power-On Reset configuration
|
||||
*/
|
||||
#define OCP85XX_PORDEVSR (CCSRBAR_VA + 0xe000c)
|
||||
#define OCP85XX_PORDEVSR2 (CCSRBAR_VA + 0xe0014)
|
||||
@ -61,7 +77,7 @@
|
||||
*/
|
||||
#define OCP85XX_I2C0_OFF 0x03000
|
||||
#define OCP85XX_I2C1_OFF 0x03100
|
||||
#define OCP85XX_I2C_SIZE 0x15
|
||||
#define OCP85XX_I2C_SIZE 0x16
|
||||
#define OCP85XX_UART0_OFF 0x04500
|
||||
#define OCP85XX_UART1_OFF 0x04600
|
||||
#define OCP85XX_UART_SIZE 0x10
|
||||
|
@ -67,6 +67,7 @@ ASSYM(PC_BOOKE_CRITSAVE, offsetof(struct pcpu, pc_booke_critsave));
|
||||
ASSYM(PC_BOOKE_MCHKSAVE, offsetof(struct pcpu, pc_booke_mchksave));
|
||||
ASSYM(PC_BOOKE_TLBSAVE, offsetof(struct pcpu, pc_booke_tlbsave));
|
||||
ASSYM(PC_BOOKE_TLB_LEVEL, offsetof(struct pcpu, pc_booke_tlb_level));
|
||||
ASSYM(PC_BOOKE_TLB_LOCK, offsetof(struct pcpu, pc_booke_tlb_lock));
|
||||
#endif
|
||||
|
||||
ASSYM(CPUSAVE_R28, CPUSAVE_R28*4);
|
||||
|
@ -249,6 +249,8 @@ powerpc_ipi_handler(void *arg)
|
||||
uint32_t ipimask;
|
||||
int msg;
|
||||
|
||||
CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
|
||||
|
||||
ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
|
||||
if (ipimask == 0)
|
||||
return (FILTER_STRAY);
|
||||
@ -257,14 +259,18 @@ powerpc_ipi_handler(void *arg)
|
||||
ipi_msg_cnt[msg]++;
|
||||
switch (msg) {
|
||||
case IPI_AST:
|
||||
CTR1(KTR_SMP, "%s: IPI_AST", __func__);
|
||||
break;
|
||||
case IPI_PREEMPT:
|
||||
CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
|
||||
sched_preempt(curthread);
|
||||
break;
|
||||
case IPI_RENDEZVOUS:
|
||||
CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
|
||||
smp_rendezvous_action();
|
||||
break;
|
||||
case IPI_STOP:
|
||||
CTR1(KTR_SMP, "%s: IPI_STOP (stop)", __func__);
|
||||
self = PCPU_GET(cpumask);
|
||||
savectx(PCPU_GET(curpcb));
|
||||
atomic_set_int(&stopped_cpus, self);
|
||||
@ -272,6 +278,7 @@ powerpc_ipi_handler(void *arg)
|
||||
cpu_spinwait();
|
||||
atomic_clear_int(&started_cpus, self);
|
||||
atomic_clear_int(&stopped_cpus, self);
|
||||
CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -283,8 +290,13 @@ static void
|
||||
ipi_send(struct pcpu *pc, int ipi)
|
||||
{
|
||||
|
||||
CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
|
||||
pc, pc->pc_cpuid, ipi);
|
||||
|
||||
atomic_set_32(&pc->pc_ipimask, (1 << ipi));
|
||||
PIC_IPI(pic, pc->pc_cpuid);
|
||||
|
||||
CTR1(KTR_SMP, "%s: sent", __func__);
|
||||
}
|
||||
|
||||
/* Send an IPI to a set of cpus. */
|
||||
|
@ -206,6 +206,8 @@ openpic_dispatch(device_t dev, struct trapframe *tf)
|
||||
struct openpic_softc *sc;
|
||||
u_int cpuid, vector;
|
||||
|
||||
CTR1(KTR_INTR, "%s: got interrupt", __func__);
|
||||
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
sc = device_get_softc(dev);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user