1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-14 14:55:41 +00:00

Remove locking of the vm page queues from several pmaps, which only

protected the dirty mask updates. The dirty mask updates are handled
by atomics after the r225840.

Submitted by:	alc
Tested by:	flo (sparc64)
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2011-09-28 15:01:20 +00:00
parent abb9b935ca
commit 578113aaa3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=225841
3 changed files with 1 additions and 6 deletions

View File

@ -1486,7 +1486,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
panic("pmap_protect: unaligned addresses");
vm_page_lock_queues();
PMAP_LOCK(pmap);
oldpmap = pmap_switch(pmap);
for ( ; sva < eva; sva += PAGE_SIZE) {
@ -1514,7 +1513,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pmap_pte_prot(pmap, pte, prot);
pmap_invalidate_page(sva);
}
vm_page_unlock_queues();
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}

View File

@ -1918,7 +1918,6 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
if (prot & VM_PROT_WRITE)
return;
vm_page_lock_queues();
PMAP_LOCK(pmap);
for (va = sva; va < eva; va += PAGE_SIZE) {
if ((pte = pte_find(mmu, pmap, va)) != NULL) {
@ -1941,7 +1940,6 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
}
}
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
}
/*

View File

@ -1423,6 +1423,7 @@ pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
u_long data;
vm_page_t m;
PMAP_LOCK_ASSERT(pm, MA_OWNED);
data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
if ((data & (TD_PV | TD_W)) == (TD_PV | TD_W)) {
m = PHYS_TO_VM_PAGE(TD_PA(data));
@ -1451,7 +1452,6 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (prot & VM_PROT_WRITE)
return;
vm_page_lock_queues();
PMAP_LOCK(pm);
if (eva - sva > PMAP_TSB_THRESH) {
tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
@ -1463,7 +1463,6 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
tlb_range_demap(pm, sva, eva - 1);
}
PMAP_UNLOCK(pm);
vm_page_unlock_queues();
}
/*