mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-28 11:57:28 +00:00
5fdd34ee20
field. Perform vcpu enumeration for Xen PV and HVM environments and convert all Xen drivers to use vcpu_id instead of a hard coded assumption of the mapping algorithm (acpi or apic ID) in use. Submitted by: Roger Pau Monné Sponsored by: Citrix Systems R&D Reviewed by: gibbs Approved by: re (blanket Xen) amd64/include/pcpu.h: i386/include/pcpu.h: Add vcpu_id to the amd64 and i386 pcpu structures. dev/xen/timer/timer.c x86/xen/xen_intr.c Use new vcpu_id instead of assuming acpi_id == vcpu_id. i386/xen/mp_machdep.c: i386/xen/mptable.c x86/xen/hvm.c: Perform Xen HVM and Xen full PV vcpu_id mapping. x86/xen/hvm.c: x86/acpica/madt.c Change SYSINIT ordering of acpi CPU enumeration so that it is guaranteed to be available at the time of Xen HVM vcpu id mapping.
250 lines
8.0 KiB
C
250 lines
8.0 KiB
C
/*-
|
|
* Copyright (c) Peter Wemm <peter@netplex.com.au>
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#ifndef _MACHINE_PCPU_H_
|
|
#define _MACHINE_PCPU_H_
|
|
|
|
#ifndef _SYS_CDEFS_H_
|
|
#error "sys/cdefs.h is a prerequisite for this file"
|
|
#endif
|
|
|
|
/*
|
|
* The SMP parts are setup in pmap.c and locore.s for the BSP, and
|
|
* mp_machdep.c sets up the data for the AP's to "see" when they awake.
|
|
* The reason for doing it via a struct is so that an array of pointers
|
|
* to each CPU's data can be set up for things like "check curproc on all
|
|
* other processors"
|
|
*/
|
|
#define PCPU_MD_FIELDS \
|
|
char pc_monitorbuf[128] __aligned(128); /* cache line */ \
|
|
struct pcpu *pc_prvspace; /* Self-reference */ \
|
|
struct pmap *pc_curpmap; \
|
|
struct amd64tss *pc_tssp; /* TSS segment active on CPU */ \
|
|
struct amd64tss *pc_commontssp;/* Common TSS for the CPU */ \
|
|
register_t pc_rsp0; \
|
|
register_t pc_scratch_rsp; /* User %rsp in syscall */ \
|
|
u_int pc_apic_id; \
|
|
u_int pc_acpi_id; /* ACPI CPU id */ \
|
|
/* Pointer to the CPU %fs descriptor */ \
|
|
struct user_segment_descriptor *pc_fs32p; \
|
|
/* Pointer to the CPU %gs descriptor */ \
|
|
struct user_segment_descriptor *pc_gs32p; \
|
|
/* Pointer to the CPU LDT descriptor */ \
|
|
struct system_segment_descriptor *pc_ldt; \
|
|
/* Pointer to the CPU TSS descriptor */ \
|
|
struct system_segment_descriptor *pc_tss; \
|
|
uint64_t pc_pm_save_cnt; \
|
|
u_int pc_cmci_mask; /* MCx banks for CMCI */ \
|
|
uint64_t pc_dbreg[16]; /* ddb debugging regs */ \
|
|
int pc_dbreg_cmd; /* ddb debugging reg cmd */ \
|
|
u_int pc_vcpu_id; /* Xen vCPU ID */ \
|
|
char __pad[157] /* be divisor of PAGE_SIZE \
|
|
after cache alignment */
|
|
|
|
#define PC_DBREG_CMD_NONE 0
|
|
#define PC_DBREG_CMD_LOAD 1
|
|
|
|
#ifdef _KERNEL
|
|
|
|
#ifdef lint
|
|
|
|
extern struct pcpu *pcpup;
|
|
|
|
#define PCPU_GET(member) (pcpup->pc_ ## member)
|
|
#define PCPU_ADD(member, val) (pcpup->pc_ ## member += (val))
|
|
#define PCPU_INC(member) PCPU_ADD(member, 1)
|
|
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
|
#define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
|
|
|
|
#elif defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF)
|
|
|
|
/*
|
|
* Evaluates to the byte offset of the per-cpu variable name.
|
|
*/
|
|
#define __pcpu_offset(name) \
|
|
__offsetof(struct pcpu, name)
|
|
|
|
/*
|
|
* Evaluates to the type of the per-cpu variable name.
|
|
*/
|
|
#define __pcpu_type(name) \
|
|
__typeof(((struct pcpu *)0)->name)
|
|
|
|
/*
|
|
* Evaluates to the address of the per-cpu variable name.
|
|
*/
|
|
#define __PCPU_PTR(name) __extension__ ({ \
|
|
__pcpu_type(name) *__p; \
|
|
\
|
|
__asm __volatile("movq %%gs:%1,%0; addq %2,%0" \
|
|
: "=r" (__p) \
|
|
: "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))), \
|
|
"i" (__pcpu_offset(name))); \
|
|
\
|
|
__p; \
|
|
})
|
|
|
|
/*
|
|
* Evaluates to the value of the per-cpu variable name.
|
|
*/
|
|
#define __PCPU_GET(name) __extension__ ({ \
|
|
__pcpu_type(name) __res; \
|
|
struct __s { \
|
|
u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
|
|
} __s; \
|
|
\
|
|
if (sizeof(__res) == 1 || sizeof(__res) == 2 || \
|
|
sizeof(__res) == 4 || sizeof(__res) == 8) { \
|
|
__asm __volatile("mov %%gs:%1,%0" \
|
|
: "=r" (__s) \
|
|
: "m" (*(struct __s *)(__pcpu_offset(name)))); \
|
|
*(struct __s *)(void *)&__res = __s; \
|
|
} else { \
|
|
__res = *__PCPU_PTR(name); \
|
|
} \
|
|
__res; \
|
|
})
|
|
|
|
/*
|
|
* Adds the value to the per-cpu counter name. The implementation
|
|
* must be atomic with respect to interrupts.
|
|
*/
|
|
#define __PCPU_ADD(name, val) do { \
|
|
__pcpu_type(name) __val; \
|
|
struct __s { \
|
|
u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
|
|
} __s; \
|
|
\
|
|
__val = (val); \
|
|
if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
|
|
sizeof(__val) == 4 || sizeof(__val) == 8) { \
|
|
__s = *(struct __s *)(void *)&__val; \
|
|
__asm __volatile("add %1,%%gs:%0" \
|
|
: "=m" (*(struct __s *)(__pcpu_offset(name))) \
|
|
: "r" (__s)); \
|
|
} else \
|
|
*__PCPU_PTR(name) += __val; \
|
|
} while (0)
|
|
|
|
/*
|
|
* Increments the value of the per-cpu counter name. The implementation
|
|
* must be atomic with respect to interrupts.
|
|
*/
|
|
#define __PCPU_INC(name) do { \
|
|
CTASSERT(sizeof(__pcpu_type(name)) == 1 || \
|
|
sizeof(__pcpu_type(name)) == 2 || \
|
|
sizeof(__pcpu_type(name)) == 4 || \
|
|
sizeof(__pcpu_type(name)) == 8); \
|
|
if (sizeof(__pcpu_type(name)) == 1) { \
|
|
__asm __volatile("incb %%gs:%0" \
|
|
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
|
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
|
} else if (sizeof(__pcpu_type(name)) == 2) { \
|
|
__asm __volatile("incw %%gs:%0" \
|
|
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
|
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
|
} else if (sizeof(__pcpu_type(name)) == 4) { \
|
|
__asm __volatile("incl %%gs:%0" \
|
|
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
|
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
|
} else if (sizeof(__pcpu_type(name)) == 8) { \
|
|
__asm __volatile("incq %%gs:%0" \
|
|
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
|
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
|
} \
|
|
} while (0)
|
|
|
|
/*
|
|
* Sets the value of the per-cpu variable name to value val.
|
|
*/
|
|
#define __PCPU_SET(name, val) { \
|
|
__pcpu_type(name) __val; \
|
|
struct __s { \
|
|
u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
|
|
} __s; \
|
|
\
|
|
__val = (val); \
|
|
if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
|
|
sizeof(__val) == 4 || sizeof(__val) == 8) { \
|
|
__s = *(struct __s *)(void *)&__val; \
|
|
__asm __volatile("mov %1,%%gs:%0" \
|
|
: "=m" (*(struct __s *)(__pcpu_offset(name))) \
|
|
: "r" (__s)); \
|
|
} else { \
|
|
*__PCPU_PTR(name) = __val; \
|
|
} \
|
|
}
|
|
|
|
#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
|
|
#define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val)
|
|
#define PCPU_INC(member) __PCPU_INC(pc_ ## member)
|
|
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
|
|
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
|
|
|
|
#define OFFSETOF_CURTHREAD 0
|
|
#ifdef __clang__
|
|
#pragma clang diagnostic push
|
|
#pragma clang diagnostic ignored "-Wnull-dereference"
|
|
#endif
|
|
static __inline __pure2 struct thread *
|
|
__curthread(void)
|
|
{
|
|
struct thread *td;
|
|
|
|
__asm("movq %%gs:%1,%0" : "=r" (td)
|
|
: "m" (*(char *)OFFSETOF_CURTHREAD));
|
|
return (td);
|
|
}
|
|
#ifdef __clang__
|
|
#pragma clang diagnostic pop
|
|
#endif
|
|
#define curthread (__curthread())
|
|
|
|
#define OFFSETOF_CURPCB 32
|
|
static __inline __pure2 struct pcb *
|
|
__curpcb(void)
|
|
{
|
|
struct pcb *pcb;
|
|
|
|
__asm("movq %%gs:%1,%0" : "=r" (pcb) : "m" (*(char *)OFFSETOF_CURPCB));
|
|
return (pcb);
|
|
}
|
|
#define curpcb (__curpcb())
|
|
|
|
#define IS_BSP() (PCPU_GET(cpuid) == 0)
|
|
|
|
#else /* !lint || defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) */
|
|
|
|
#error "this file needs to be ported to your compiler"
|
|
|
|
#endif /* lint, etc. */
|
|
|
|
#endif /* _KERNEL */
|
|
|
|
#endif /* !_MACHINE_PCPU_H_ */
|