1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-19 10:53:58 +00:00

- add ranged shootdowns when fewer than 64 mappings are being invalidated

This commit is contained in:
Kip Macy 2006-12-25 02:05:52 +00:00
parent 0321d7f9a8
commit 35d16ac000
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=165533
3 changed files with 48 additions and 21 deletions

View File

@ -104,7 +104,7 @@ extern char tl_ipi_level[];
extern char tl_invltlb[];
extern char tl_invlctx[];
extern char tl_invlpg[];
extern char tl_ipi_tlb_range_demap[];
extern char tl_invlrng[];
extern char tl_tsbupdate[];
extern char tl_ttehashupdate[];

View File

@ -452,6 +452,35 @@ ENTRY(tl_invlpg)
membar #Sync
END(tl_invlpg)
ENTRY(tl_invlrng)
sethi %hi(PAGE_SIZE), %g5
dec %g5
and %g1, %g5, %g4
andn %g1, %g5, %g1
dec %g4
1: mov %o0, %g5
mov %o1, %g6
mov %o2, %g7
mov MAP_ITLB|MAP_DTLB, %o2
mov %g1, %o0
mov %g2, %o1
ta MMU_UNMAP_ADDR
brnz,a,pn %o0, interrupt_panic_bad_hcall
mov MMU_UNMAP_ADDR, %o1
brnz,pt %g4, 1b
dec %g4
mov %g5, %o0
mov %g6, %o1
mov %g7, %o2
ba,pt %xcc, set_ackmask
membar #Sync
END(tl_invlrng)
ENTRY(tl_tsbupdate)
/* compare current context with one to be updated */
mov MMU_CID_S, %g4

View File

@ -1509,9 +1509,9 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va, int cleartsb)
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int cleartsb)
{
vm_offset_t tva;
#ifdef SMP
vm_offset_t tva, invlrngva;
char *func;
#ifdef SMP
cpumask_t active;
#endif
if ((eva - sva) == PAGE_SIZE) {
@ -1520,8 +1520,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int clearts
}
if (sva >= eva)
panic("invalidating negative or zero range sva=0x%lx eva=0x%lx", sva, eva);
KASSERT(sva >= eva, ("invalidating negative or zero range sva=0x%lx eva=0x%lx", sva, eva))
if (cleartsb == TRUE)
tsb_clear_range(&pmap->pm_tsb, sva, eva);
@ -1530,18 +1529,18 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int clearts
if ((sva - eva) < PAGE_SIZE*64) {
for (tva = sva; tva < eva; tva += PAGE_SIZE_8K)
invlpg(tva, pmap->pm_context);
} else if (pmap->pm_context)
invlctx(pmap->pm_context);
else
invltlb();
#ifdef SMP
if (pmap == kernel_pmap)
func = tl_invltlb;
else
func = tl_invlrng;
} else if (pmap->pm_context) {
func = tl_invlctx;
invlctx(pmap->pm_context);
active = pmap_ipi(pmap, (void *)func, pmap->pm_context, 0);
} else {
func = tl_invltlb;
invltlb();
}
#ifdef SMP
invlrngva = sva | ((eva - sva) >> PAGE_SHIFT);
active = pmap_ipi(pmap, (void *)func, pmap->pm_context, invlrngva);
active &= ~pmap->pm_active;
atomic_clear_int(&pmap->pm_tlbactive, active);
#endif
@ -1552,8 +1551,8 @@ void
pmap_invalidate_all(pmap_t pmap)
{
if (pmap == kernel_pmap)
panic("invalidate_all called on kernel_pmap");
KASSERT(pmap == kernel_pmap,
("invalidate_all called on kernel_pmap"));
tsb_clear(&pmap->pm_tsb);
@ -1802,14 +1801,13 @@ void
pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
{
vm_offset_t va;
tte_t otte_data;
tte_t otte;
otte_data = 0;
otte = 0;
va = sva;
while (count-- > 0) {
otte |= tte_hash_update(kernel_pmap->pm_hash, va,
VM_PAGE_TO_PHYS(*m),
pa | TTE_KERNEL | VTD_8K);
VM_PAGE_TO_PHYS(*m) | TTE_KERNEL | VTD_8K);
va += PAGE_SIZE;
m++;
}