1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-30 16:51:41 +00:00

The POWER7 has only 32 SLB slots instead of 64, like other supported

64-bit PowerPC CPUs. Add infrastructure to support variable numbers of
SLB slots and move the user slot from 63 to 0, so that it is always
available.
This commit is contained in:
Nathan Whitehorn 2011-06-02 14:25:52 +00:00
parent d63d020e22
commit 17763042e4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=222620
4 changed files with 52 additions and 44 deletions

View File

@ -132,6 +132,7 @@ extern vm_offset_t ksym_start, ksym_end;
int cold = 1;
#ifdef __powerpc64__
extern int n_slbs;
int cacheline_size = 128;
#else
int cacheline_size = 32;
@ -337,13 +338,13 @@ powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
kdb_init();
/*
* PowerPC 970 CPUs have a misfeature requested by Apple that makes
* them pretend they have a 32-byte cacheline. Turn this off
* before we measure the cacheline size.
*/
/* Various very early CPU fix ups */
switch (mfpvr() >> 16) {
/*
* PowerPC 970 CPUs have a misfeature requested by Apple that
* makes them pretend they have a 32-byte cacheline. Turn this
* off before we measure the cacheline size.
*/
case IBM970:
case IBM970FX:
case IBM970MP:
@ -352,6 +353,12 @@ powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
scratch &= ~HID5_970_DCBZ_SIZE_HI;
mtspr(SPR_HID5, scratch);
break;
#ifdef __powerpc64__
case IBMPOWER7:
/* XXX: get from ibm,slb-size in device tree */
n_slbs = 32;
break;
#endif
}
/*
@ -367,7 +374,6 @@ powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
msr = mfmsr();
mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
isync();
/*
* Measure the cacheline size using dcbz
@ -502,7 +508,6 @@ powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
* Restore MSR
*/
mtmsr(msr);
isync();
/* Warn if cachline size was not determined */
if (cacheline_warn == 1) {
@ -527,7 +532,6 @@ powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
pmap_bootstrap(startkernel, endkernel);
mtmsr(PSL_KERNSET & ~PSL_EE);
isync();
/*
* Initialize params/tunables that are derived from memsize

View File

@ -51,8 +51,9 @@ uintptr_t moea64_get_unique_vsid(void);
void moea64_release_vsid(uint64_t vsid);
static void slb_zone_init(void *);
uma_zone_t slbt_zone;
uma_zone_t slb_cache_zone;
static uma_zone_t slbt_zone;
static uma_zone_t slb_cache_zone;
int n_slbs = 64;
SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
@ -426,16 +427,18 @@ slb_insert_kernel(uint64_t slbe, uint64_t slbv)
/* Check for an unused slot, abusing the user slot as a full flag */
if (slbcache[USER_SLB_SLOT].slbe == 0) {
for (i = 0; i < USER_SLB_SLOT; i++) {
for (i = 0; i < n_slbs; i++) {
if (i == USER_SLB_SLOT)
continue;
if (!(slbcache[i].slbe & SLBE_VALID))
goto fillkernslb;
}
if (i == USER_SLB_SLOT)
if (i == n_slbs)
slbcache[USER_SLB_SLOT].slbe = 1;
}
for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
for (i = mftb() % n_slbs, j = 0; j < n_slbs; j++, i = (i+1) % n_slbs) {
if (i == USER_SLB_SLOT)
continue;
@ -443,9 +446,11 @@ slb_insert_kernel(uint64_t slbe, uint64_t slbv)
break;
}
KASSERT(j < 64, ("All kernel SLB slots locked!"));
KASSERT(j < n_slbs, ("All kernel SLB slots locked!"));
fillkernslb:
KASSERT(i != USER_SLB_SLOT,
("Filling user SLB slot with a kernel mapping"));
slbcache[i].slbv = slbv;
slbcache[i].slbe = slbe | (uint64_t)i;
@ -466,11 +471,11 @@ slb_insert_user(pmap_t pm, struct slb *slb)
PMAP_LOCK_ASSERT(pm, MA_OWNED);
if (pm->pm_slb_len < 64) {
if (pm->pm_slb_len < n_slbs) {
i = pm->pm_slb_len;
pm->pm_slb_len++;
} else {
i = mftb() % 64;
i = mftb() % n_slbs;
}
/* Note that this replacement is atomic with respect to trap_subr */
@ -521,8 +526,9 @@ slb_zone_init(void *dummy)
slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb *),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
slb_cache_zone = uma_zcreate("SLB cache",
(n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM);
if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);

View File

@ -53,55 +53,53 @@
* User SRs are loaded through a pointer to the current pmap.
*/
restore_usersrs:
GET_CPUINFO(%r28);
ld %r28,PC_USERSLB(%r28);
GET_CPUINFO(%r28)
ld %r28,PC_USERSLB(%r28)
li %r29, 0 /* Set the counter to zero */
slbia
slbmfee %r31,%r29
clrrdi %r31,%r31,28
slbie %r31
instuserslb:
ld %r31, 0(%r28); /* Load SLB entry pointer */
cmpli 0, %r31, 0; /* If NULL, stop */
beqlr;
1: ld %r31, 0(%r28) /* Load SLB entry pointer */
cmpli 0, %r31, 0 /* If NULL, stop */
beqlr
ld %r30, 0(%r31) /* Load SLBV */
ld %r31, 8(%r31) /* Load SLBE */
or %r31, %r31, %r29 /* Set SLBE slot */
slbmte %r30, %r31; /* Install SLB entry */
slbmte %r30, %r31 /* Install SLB entry */
addi %r28, %r28, 8; /* Advance pointer */
addi %r29, %r29, 1;
cmpli 0, %r29, 64; /* Repeat if we are not at the end */
blt instuserslb;
blr;
addi %r28, %r28, 8 /* Advance pointer */
addi %r29, %r29, 1
b 1b /* Repeat */
/*
* Kernel SRs are loaded directly from the PCPU fields
*/
restore_kernsrs:
GET_CPUINFO(%r28);
addi %r28,%r28,PC_KERNSLB;
GET_CPUINFO(%r28)
addi %r28,%r28,PC_KERNSLB
li %r29, 0 /* Set the counter to zero */
slbia
slbmfee %r31,%r29
clrrdi %r31,%r31,28
slbie %r31
instkernslb:
ld %r31, 8(%r28); /* Load SLBE */
1: cmpli 0, %r29, USER_SLB_SLOT /* Skip the user slot */
beq- 2f
cmpli 0, %r31, 0; /* If SLBE is not valid, stop */
beqlr;
ld %r31, 8(%r28) /* Load SLBE */
cmpli 0, %r31, 0 /* If SLBE is not valid, stop */
beqlr
ld %r30, 0(%r28) /* Load SLBV */
slbmte %r30, %r31; /* Install SLB entry */
slbmte %r30, %r31 /* Install SLB entry */
addi %r28, %r28, 16; /* Advance pointer */
addi %r29, %r29, 1;
cmpli 0, %r29, USER_SLB_SLOT; /* Repeat if we are not at the end */
blt instkernslb;
blr;
2: addi %r28, %r28, 16 /* Advance pointer */
addi %r29, %r29, 1
cmpli 0, %r29, 64 /* Repeat if we are not at the end */
blt 1b
blr
/*
* FRAME_SETUP assumes:

View File

@ -65,7 +65,7 @@
/*
* User segment for copyin/out
*/
#define USER_SLB_SLOT 63
#define USER_SLB_SLOT 0
#define USER_SLB_SLBE (((USER_ADDR >> ADDR_SR_SHFT) << SLBE_ESID_SHIFT) | \
SLBE_VALID | USER_SLB_SLOT)