1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-11-26 07:55:01 +00:00

Patch some more concurrency issues here. This expands the page table

lock to cover the PVOs, and removes the scratchpage PTEs from the PVOs
entirely to avoid the system trying to be helpful and rewriting them.
This commit is contained in:
Nathan Whitehorn 2010-03-04 06:39:58 +00:00
parent 98dc296435
commit 9fcd9ccb86
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=204694

View File

@ -327,7 +327,6 @@ SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
&moea64_pvo_remove_calls, 0, "");
vm_offset_t moea64_scratchpage_va[2];
struct pvo_entry *moea64_scratchpage_pvo[2];
struct lpte *moea64_scratchpage_pte[2];
struct mtx moea64_scratchpage_mtx;
@ -965,22 +964,36 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele
PMAP_UNLOCK(kernel_pmap);
/*
* Allocate some things for page zeroing
* Allocate some things for page zeroing. We put this directly
* in the page table, marked with LPTE_LOCKED, to avoid any
* of the PVO book-keeping or other parts of the VM system
* from even knowing that this hack exists.
*/
mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF);
for (i = 0; i < 2; i++) {
struct lpte pt;
uint64_t vsid;
int pteidx, ptegidx;
moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
virtual_end -= PAGE_SIZE;
moea64_kenter(mmup,moea64_scratchpage_va[i],0);
LOCK_TABLE();
moea64_scratchpage_pvo[i] = moea64_pvo_find_va(kernel_pmap,
moea64_scratchpage_va[i],&j);
moea64_scratchpage_pte[i] = moea64_pvo_to_pte(
moea64_scratchpage_pvo[i],j);
moea64_scratchpage_pte[i]->pte_hi |= LPTE_LOCKED;
vsid = va_to_vsid(kernel_pmap, moea64_scratchpage_va[i]);
moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i],
LPTE_NOEXEC);
pt.pte_hi |= LPTE_LOCKED;
ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i]);
pteidx = moea64_pte_insert(ptegidx, &pt);
if (pt.pte_hi & LPTE_HID)
ptegidx ^= moea64_pteg_mask;
moea64_scratchpage_pte[i] =
&moea64_pteg_table[ptegidx].pt[pteidx];
UNLOCK_TABLE();
}
@ -1088,18 +1101,16 @@ moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
static __inline
void moea64_set_scratchpage_pa(int which, vm_offset_t pa) {
mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
~(LPTE_WIMG | LPTE_RPGN);
moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
moea64_calc_wimg(pa) | (uint64_t)pa;
mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID;
TLBIE(kernel_pmap, moea64_scratchpage_va[which]);
moea64_scratchpage_pte[which]->pte_lo =
moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo;
moea64_scratchpage_pte[which]->pte_lo &=
~(LPTE_WIMG | LPTE_RPGN);
moea64_scratchpage_pte[which]->pte_lo |=
moea64_calc_wimg(pa) | (uint64_t)pa;
EIEIO();
moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
@ -1496,11 +1507,11 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
return;
lo = moea64_attr_fetch(m);
SYNC();
LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
LOCK_TABLE();
pt = moea64_pvo_to_pte(pvo, -1);
pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
@ -1511,10 +1522,10 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
moea64_pte_change(pt, &pvo->pvo_pte.lpte,
pvo->pvo_pmap, PVO_VADDR(pvo));
}
UNLOCK_TABLE();
}
PMAP_UNLOCK(pmap);
}
UNLOCK_TABLE();
if ((lo & LPTE_CHG) != 0) {
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
@ -1651,12 +1662,14 @@ moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
return FALSE;
loops = 0;
LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (pvo->pvo_pmap == pmap)
return (TRUE);
if (++loops >= 16)
break;
}
UNLOCK_TABLE();
return (FALSE);
}
@ -1675,9 +1688,11 @@ moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
count++;
UNLOCK_TABLE();
return (count);
}
@ -1896,6 +1911,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pvo_head = vm_page_to_pvoh(m);
LOCK_TABLE();
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
@ -1905,6 +1921,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
moea64_pvo_remove(pvo, -1);
PMAP_UNLOCK(pmap);
}
UNLOCK_TABLE();
if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) {
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
@ -2130,7 +2147,6 @@ moea64_pvo_remove(struct pvo_entry *pvo, int pteidx)
} else {
moea64_pte_overflow--;
}
UNLOCK_TABLE();
/*
* Update our statistics.
@ -2162,9 +2178,12 @@ moea64_pvo_remove(struct pvo_entry *pvo, int pteidx)
* if we aren't going to reuse it.
*/
LIST_REMOVE(pvo, pvo_olink);
UNLOCK_TABLE();
if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
moea64_upvo_zone, pvo);
moea64_pvo_entries--;
moea64_pvo_remove_calls++;
}
@ -2313,6 +2332,7 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
if (moea64_attr_fetch(m) & ptebit)
return (TRUE);
LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
MOEA_PVO_CHECK(pvo); /* sanity check */
@ -2322,6 +2342,7 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
*/
if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
moea64_attr_save(m, ptebit);
UNLOCK_TABLE();
MOEA_PVO_CHECK(pvo); /* sanity check */
return (TRUE);
}
@ -2341,7 +2362,6 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
* REF/CHG bits from the valid PTE. If the appropriate
* ptebit is set, cache it and return success.
*/
LOCK_TABLE();
pt = moea64_pvo_to_pte(pvo, -1);
if (pt != NULL) {
moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
@ -2353,8 +2373,8 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
return (TRUE);
}
}
UNLOCK_TABLE();
}
UNLOCK_TABLE();
return (FALSE);
}
@ -2387,10 +2407,10 @@ moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
* valid pte clear the ptebit from the valid pte.
*/
count = 0;
LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
MOEA_PVO_CHECK(pvo); /* sanity check */
LOCK_TABLE();
pt = moea64_pvo_to_pte(pvo, -1);
if (pt != NULL) {
moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
@ -2399,11 +2419,11 @@ moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit);
}
}
UNLOCK_TABLE();
rv |= pvo->pvo_pte.lpte.pte_lo;
pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
MOEA_PVO_CHECK(pvo); /* sanity check */
}
UNLOCK_TABLE();
if (origbit != NULL) {
*origbit = rv;