1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-26 11:47:31 +00:00

FreeBSD right now support 32 CPUs on all the architectures at least.

With the arrival of 128+ cores it is necessary to handle more than that.
One of the first thing to change is the support for cpumask_t that needs
to handle more than 32 bits masking (which happens now).  Some places,
however, still assume that cpumask_t is a 32 bits mask.
Fix that situation by using always correctly cpumask_t when needed.

While here, remove the part under STOP_NMI for the Xen support as it
is broken in any case.

Additively make ipi_nmi_pending as static.

Reviewed by:	jhb, kmacy
Tested by:	Giovanni Trematerra <giovanni dot trematerra at gmail dot com>
This commit is contained in:
Attilio Rao 2009-05-14 17:43:00 +00:00
parent b7ced94c8c
commit 120b18d86f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=192114
7 changed files with 35 additions and 93 deletions

View File

@ -114,9 +114,9 @@ volatile int smp_tlb_wait;
extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
#ifdef STOP_NMI
volatile cpumask_t ipi_nmi_pending;
static volatile cpumask_t ipi_nmi_pending;
static void ipi_nmi_selected(u_int32_t cpus);
static void ipi_nmi_selected(cpumask_t cpus);
#endif
/*
@ -1016,7 +1016,7 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
int ncpu, othercpus;
@ -1090,7 +1090,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
smp_masked_invltlb(u_int mask)
smp_masked_invltlb(cpumask_t mask)
{
if (smp_started) {
@ -1099,7 +1099,7 @@ smp_masked_invltlb(u_int mask)
}
void
smp_masked_invlpg(u_int mask, vm_offset_t addr)
smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
{
if (smp_started) {
@ -1108,7 +1108,7 @@ smp_masked_invlpg(u_int mask, vm_offset_t addr)
}
void
smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@ -1143,7 +1143,7 @@ ipi_bitmap_handler(struct trapframe frame)
* send an IPI to a set of cpus.
*/
void
ipi_selected(u_int32_t cpus, u_int ipi)
ipi_selected(cpumask_t cpus, u_int ipi)
{
int cpu;
u_int bitmap = 0;
@ -1206,8 +1206,8 @@ ipi_all_but_self(u_int ipi)
#define BEFORE_SPIN 1000000
void
ipi_nmi_selected(u_int32_t cpus)
static void
ipi_nmi_selected(cpumask_t cpus)
{
int cpu;
register_t icrlo;
@ -1331,7 +1331,7 @@ SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
static int
sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
{
u_int mask;
cpumask_t mask;
int error;
mask = hlt_cpus_mask;

View File

@ -52,19 +52,19 @@ void cpu_add(u_int apic_id, char boot_cpu);
void cpustop_handler(void);
void cpususpend_handler(void);
void init_secondary(void);
void ipi_selected(u_int cpus, u_int ipi);
void ipi_selected(cpumask_t cpus, u_int ipi);
void ipi_all_but_self(u_int ipi);
void ipi_bitmap_handler(struct trapframe frame);
u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(u_int mask, vm_offset_t addr);
void smp_masked_invlpg(cpumask_t mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
void smp_masked_invlpg_range(u_int mask, vm_offset_t startva,
void smp_masked_invlpg_range(cpumask_t mask, vm_offset_t startva,
vm_offset_t endva);
void smp_invltlb(void);
void smp_masked_invltlb(u_int mask);
void smp_masked_invltlb(cpumask_t mask);
#ifdef STOP_NMI
int ipi_nmi_handler(void);

View File

@ -75,6 +75,5 @@ extern int get_thread_id(void);
#endif
#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__))
#define debug_stop_all_cpus(param) /* param is "cpumask_t *" */
#endif /* __XFS_SUPPORT_DEBUG_H__ */

View File

@ -155,9 +155,9 @@ vm_offset_t smp_tlb_addr2;
volatile int smp_tlb_wait;
#ifdef STOP_NMI
volatile cpumask_t ipi_nmi_pending;
static volatile cpumask_t ipi_nmi_pending;
static void ipi_nmi_selected(u_int32_t cpus);
static void ipi_nmi_selected(cpumask_t cpus);
#endif
#ifdef COUNT_IPIS
@ -1146,7 +1146,7 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
int ncpu, othercpus;
@ -1231,7 +1231,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
smp_masked_invltlb(u_int mask)
smp_masked_invltlb(cpumask_t mask)
{
if (smp_started) {
@ -1243,7 +1243,7 @@ smp_masked_invltlb(u_int mask)
}
void
smp_masked_invlpg(u_int mask, vm_offset_t addr)
smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
{
if (smp_started) {
@ -1255,7 +1255,7 @@ smp_masked_invlpg(u_int mask, vm_offset_t addr)
}
void
smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@ -1303,7 +1303,7 @@ ipi_bitmap_handler(struct trapframe frame)
* send an IPI to a set of cpus.
*/
void
ipi_selected(u_int32_t cpus, u_int ipi)
ipi_selected(cpumask_t cpus, u_int ipi)
{
int cpu;
u_int bitmap = 0;
@ -1367,7 +1367,7 @@ ipi_all_but_self(u_int ipi)
#define BEFORE_SPIN 1000000
void
ipi_nmi_selected(u_int32_t cpus)
ipi_nmi_selected(cpumask_t cpus)
{
int cpu;
register_t icrlo;
@ -1456,7 +1456,7 @@ SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
static int
sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
{
u_int mask;
cpumask_t mask;
int error;
mask = hlt_cpus_mask;

View File

@ -1624,7 +1624,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
* Deal with a SMP shootdown of other users of the pmap that we are
* trying to dispose of. This can be a bit hairy.
*/
static u_int *lazymask;
static cpumask_t *lazymask;
static u_int lazyptd;
static volatile u_int lazywait;
@ -1633,7 +1633,7 @@ void pmap_lazyfix_action(void);
void
pmap_lazyfix_action(void)
{
u_int mymask = PCPU_GET(cpumask);
cpumask_t mymask = PCPU_GET(cpumask);
#ifdef COUNT_IPIS
(*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
@ -1645,7 +1645,7 @@ pmap_lazyfix_action(void)
}
static void
pmap_lazyfix_self(u_int mymask)
pmap_lazyfix_self(cpumask_t mymask)
{
if (rcr3() == lazyptd)
@ -1657,8 +1657,7 @@ pmap_lazyfix_self(u_int mymask)
static void
pmap_lazyfix(pmap_t pmap)
{
u_int mymask;
u_int mask;
cpumask_t mymask, mask;
u_int spins;
while ((mask = pmap->pm_active) != 0) {

View File

@ -69,12 +69,12 @@ u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(u_int mask, vm_offset_t addr);
void smp_masked_invlpg(cpumask_t mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
void smp_masked_invlpg_range(u_int mask, vm_offset_t startva,
void smp_masked_invlpg_range(cpumask_t mask, vm_offset_t startva,
vm_offset_t endva);
void smp_invltlb(void);
void smp_masked_invltlb(u_int mask);
void smp_masked_invltlb(cpumask_t mask);
#ifdef STOP_NMI
int ipi_nmi_handler(void);

View File

@ -993,7 +993,7 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
int ncpu, othercpus;
struct _call_data data;
@ -1072,7 +1072,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
smp_masked_invltlb(u_int mask)
smp_masked_invltlb(cpumask_t mask)
{
if (smp_started) {
@ -1081,7 +1081,7 @@ smp_masked_invltlb(u_int mask)
}
void
smp_masked_invlpg(u_int mask, vm_offset_t addr)
smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
{
if (smp_started) {
@ -1090,7 +1090,7 @@ smp_masked_invlpg(u_int mask, vm_offset_t addr)
}
void
smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@ -1102,7 +1102,7 @@ smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
* send an IPI to a set of cpus.
*/
void
ipi_selected(uint32_t cpus, u_int ipi)
ipi_selected(cpumask_t cpus, u_int ipi)
{
int cpu;
u_int bitmap = 0;
@ -1114,12 +1114,6 @@ ipi_selected(uint32_t cpus, u_int ipi)
ipi = IPI_BITMAP_VECTOR;
}
#ifdef STOP_NMI
if (ipi == IPI_STOP && stop_cpus_with_nmi) {
ipi_nmi_selected(cpus);
return;
}
#endif
CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
while ((cpu = ffs(cpus)) != 0) {
cpu--;
@ -1160,56 +1154,6 @@ ipi_all_but_self(u_int ipi)
ipi_selected(PCPU_GET(other_cpus), ipi);
}
#ifdef STOP_NMI
/*
* send NMI IPI to selected CPUs
*/
#define BEFORE_SPIN 1000000
void
ipi_nmi_selected(u_int32_t cpus)
{
int cpu;
register_t icrlo;
icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
| APIC_TRIGMOD_EDGE;
CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
atomic_set_int(&ipi_nmi_pending, cpus);
while ((cpu = ffs(cpus)) != 0) {
cpu--;
cpus &= ~(1 << cpu);
KASSERT(cpu_apic_ids[cpu] != -1,
("IPI NMI to non-existent CPU %d", cpu));
/* Wait for an earlier IPI to finish. */
if (!lapic_ipi_wait(BEFORE_SPIN))
panic("ipi_nmi_selected: previous IPI has not cleared");
lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]);
}
}
int
ipi_nmi_handler(void)
{
int cpumask = PCPU_GET(cpumask);
if (!(ipi_nmi_pending & cpumask))
return 1;
atomic_clear_int(&ipi_nmi_pending, cpumask);
cpustop_handler();
return 0;
}
#endif /* STOP_NMI */
/*
* Handle an IPI_STOP by saving our current context and spinning until we
* are resumed.