mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-22 11:17:19 +00:00
Reduce the number of global TLB shootdowns generated by pmap_qenter().
Specifically, teach pmap_qenter() to recognize the case when it is being asked to replace a mapping with the very same mapping and not generate a shootdown. Unfortunately, the buffer cache commonly passes an entire buffer to pmap_qenter() when only a subset of the mappings are changing. For the extension of buffers in allocbuf() this was resulting in unnecessary shootdowns. The addition of new pages to the end of the buffer need not and did not trigger a shootdown, but overwriting the initial mappings with the very same mappings was seen as a change that necessitated a shootdown. With this change, that is no longer so. For a "buildworld" on amd64, this change eliminates 14-15% of the pmap_invalidate_range() shootdowns, and about 4% of the overall shootdowns. MFC after: 3 weeks
This commit is contained in:
parent
daf94b1b7a
commit
8155e5d561
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=209887
@ -1331,19 +1331,22 @@ pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
|
||||
void
|
||||
pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
|
||||
{
|
||||
pt_entry_t *endpte, oldpte, *pte;
|
||||
pt_entry_t *endpte, oldpte, pa, *pte;
|
||||
vm_page_t m;
|
||||
|
||||
oldpte = 0;
|
||||
pte = vtopte(sva);
|
||||
endpte = pte + count;
|
||||
while (pte < endpte) {
|
||||
oldpte |= *pte;
|
||||
pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G |
|
||||
pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
|
||||
m = *ma++;
|
||||
pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
|
||||
if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
|
||||
oldpte |= *pte;
|
||||
pte_store(pte, pa | PG_G | PG_RW | PG_V);
|
||||
}
|
||||
pte++;
|
||||
ma++;
|
||||
}
|
||||
if ((oldpte & PG_V) != 0)
|
||||
if (__predict_false((oldpte & PG_V) != 0))
|
||||
pmap_invalidate_range(kernel_pmap, sva, sva + count *
|
||||
PAGE_SIZE);
|
||||
}
|
||||
|
@ -1461,19 +1461,22 @@ pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
|
||||
void
|
||||
pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
|
||||
{
|
||||
pt_entry_t *endpte, oldpte, *pte;
|
||||
pt_entry_t *endpte, oldpte, pa, *pte;
|
||||
vm_page_t m;
|
||||
|
||||
oldpte = 0;
|
||||
pte = vtopte(sva);
|
||||
endpte = pte + count;
|
||||
while (pte < endpte) {
|
||||
oldpte |= *pte;
|
||||
pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag |
|
||||
pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
|
||||
m = *ma++;
|
||||
pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
|
||||
if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
|
||||
oldpte |= *pte;
|
||||
pte_store(pte, pa | pgeflag | PG_RW | PG_V);
|
||||
}
|
||||
pte++;
|
||||
ma++;
|
||||
}
|
||||
if ((oldpte & PG_V) != 0)
|
||||
if (__predict_false((oldpte & PG_V) != 0))
|
||||
pmap_invalidate_range(kernel_pmap, sva, sva + count *
|
||||
PAGE_SIZE);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user