1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-17 10:26:15 +00:00

Bring back r313037, with fixes for mips:

Implement get_pcpu() for amd64/sparc64/mips/powerpc, and use it to
replace pcpu_find(curcpu) in MI code.

Reviewed by:	andreast, kan, lidl
Tested by:	lidl(mips, sparc64), andreast(powerpc)
Differential Revision:	https://reviews.freebsd.org/D9587
This commit is contained in:
Jason A. Harmening 2017-02-19 02:03:09 +00:00
parent 5c5df0d99b
commit e2a8d17887
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=313930
8 changed files with 34 additions and 15 deletions

View File

@ -78,6 +78,7 @@
extern struct pcpu *pcpup;
#define get_pcpu() (pcpup)
#define PCPU_GET(member) (pcpup->pc_ ## member)
#define PCPU_ADD(member, val) (pcpup->pc_ ## member += (val))
#define PCPU_INC(member) PCPU_ADD(member, 1)
@ -203,6 +204,15 @@ extern struct pcpu *pcpup;
} \
}
#define get_pcpu() __extension__ ({ \
struct pcpu *__pc; \
\
__asm __volatile("movq %%gs:%1,%0" \
: "=r" (__pc) \
: "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace)))); \
__pc; \
})
#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
#define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val)
#define PCPU_INC(member) __PCPU_INC(pc_ ## member)

View File

@ -156,7 +156,7 @@ unlock_rm(struct lock_object *lock)
*/
critical_enter();
td = curthread;
pc = pcpu_find(curcpu);
pc = get_pcpu();
for (queue = pc->pc_rm_queue.rmq_next;
queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
tracker = (struct rm_priotracker *)queue;
@ -258,7 +258,7 @@ rm_cleanIPI(void *arg)
struct rmlock *rm = arg;
struct rm_priotracker *tracker;
struct rm_queue *queue;
pc = pcpu_find(curcpu);
pc = get_pcpu();
for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
queue = queue->rmq_next) {
@ -355,7 +355,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
struct pcpu *pc;
critical_enter();
pc = pcpu_find(curcpu);
pc = get_pcpu();
/* Check if we just need to do a proper critical_exit. */
if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
@ -416,7 +416,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
}
critical_enter();
pc = pcpu_find(curcpu);
pc = get_pcpu();
CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
rm_tracker_add(pc, tracker);
sched_pin();
@ -641,7 +641,7 @@ _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
#ifdef INVARIANTS
if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
critical_enter();
KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
KASSERT(rm_trackers_present(get_pcpu(), rm,
curthread) == 0,
("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
rm->lock_object.lo_name, file, line));
@ -771,7 +771,7 @@ _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
}
critical_enter();
count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
count = rm_trackers_present(get_pcpu(), rm, curthread);
critical_exit();
if (count == 0)
@ -797,7 +797,7 @@ _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
rm->lock_object.lo_name, file, line);
critical_enter();
count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
count = rm_trackers_present(get_pcpu(), rm, curthread);
critical_exit();
if (count != 0)

View File

@ -39,16 +39,17 @@
struct pmap *pc_curpmap; /* pmap of curthread */ \
u_int32_t pc_next_asid; /* next ASID to alloc */ \
u_int32_t pc_asid_generation; /* current ASID generation */ \
u_int pc_pending_ipis; /* IPIs pending to this CPU */
u_int pc_pending_ipis; /* IPIs pending to this CPU */ \
struct pcpu *pc_self; /* globally-uniqe self pointer */
#ifdef __mips_n64
#define PCPU_MD_MIPS64_FIELDS \
PCPU_MD_COMMON_FIELDS \
char __pad[61]
char __pad[53]
#else
#define PCPU_MD_MIPS32_FIELDS \
PCPU_MD_COMMON_FIELDS \
char __pad[193]
char __pad[189]
#endif
#ifdef __mips_n64
@ -65,6 +66,13 @@ extern char pcpu_space[MAXCPU][PAGE_SIZE * 2];
extern struct pcpu *pcpup;
#define PCPUP pcpup
/*
* Since we use a wired TLB entry to map the same VA to a different
* physical page for each CPU, get_pcpu() must use the pc_self
* field to obtain a globally-unique pointer.
*/
#define get_pcpu() (PCPUP->pc_self)
#define PCPU_ADD(member, value) (PCPUP->pc_ ## member += (value))
#define PCPU_GET(member) (PCPUP->pc_ ## member)
#define PCPU_INC(member) PCPU_ADD(member, 1)

View File

@ -475,6 +475,7 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
pcpu->pc_next_asid = 1;
pcpu->pc_asid_generation = 1;
pcpu->pc_self = pcpu;
#ifdef SMP
if ((vm_offset_t)pcpup >= VM_MIN_KERNEL_ADDRESS &&
(vm_offset_t)pcpup <= VM_MAX_KERNEL_ADDRESS) {

View File

@ -1268,9 +1268,7 @@ netisr_start_swi(u_int cpuid, struct pcpu *pc)
static void
netisr_init(void *arg)
{
#ifdef EARLY_AP_STARTUP
struct pcpu *pc;
#endif
NETISR_LOCK_INIT();
if (netisr_maxthreads == 0 || netisr_maxthreads < -1 )
@ -1308,7 +1306,8 @@ netisr_init(void *arg)
netisr_start_swi(pc->pc_cpuid, pc);
}
#else
netisr_start_swi(curcpu, pcpu_find(curcpu));
pc = get_pcpu();
netisr_start_swi(pc->pc_cpuid, pc);
#endif
}
SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);

View File

@ -201,7 +201,7 @@ intr_restore(register_t msr)
}
static __inline struct pcpu *
powerpc_get_pcpup(void)
get_pcpu(void)
{
struct pcpu *ret;

View File

@ -142,7 +142,7 @@ struct pvo_entry;
#ifdef _KERNEL
#define pcpup ((struct pcpu *) powerpc_get_pcpup())
#define pcpup (get_pcpu())
static __inline __pure2 struct thread *
__curthread(void)

View File

@ -74,6 +74,7 @@ struct pcpu;
register struct pcb *curpcb __asm__(__XSTRING(PCB_REG));
register struct pcpu *pcpup __asm__(__XSTRING(PCPU_REG));
#define get_pcpu() (pcpup)
#define PCPU_GET(member) (pcpup->pc_ ## member)
static __inline __pure2 struct thread *