1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-25 16:13:17 +00:00

- Push down the acquisition and release of Giant into pmap_protect() on

those architectures without pmap locking.
 - Eliminate the acquisition and release of Giant from vm_map_protect().

(Translation: mprotect(2) runs to completion without touching Giant on
alpha, amd64, i386 and ia64.)
This commit is contained in:
Alan Cox 2004-07-30 20:38:30 +00:00
parent 9be60284a6
commit 9bb0e06861
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=132899
6 changed files with 20 additions and 2 deletions

View File

@ -3014,7 +3014,9 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
int flush;
if ((prot & VM_PROT_READ) == 0) {
mtx_lock(&Giant);
pmap_remove(pm, sva, eva);
mtx_unlock(&Giant);
return;
}
@ -3026,6 +3028,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
return;
}
mtx_lock(&Giant);
/*
* OK, at this point, we know we're doing write-protect operation.
@ -3091,6 +3094,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pmap_tlb_flushD(pm);
}
mtx_unlock(&Giant);
}

View File

@ -1459,10 +1459,13 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
("pmap_protect: non current pmap"));
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
mtx_lock(&Giant);
pmap_remove(pm, sva, eva);
mtx_unlock(&Giant);
return;
}
mtx_lock(&Giant);
vm_page_lock_queues();
for (; sva < eva; sva += PAGE_SIZE) {
pvo = pmap_pvo_find_va(pm, sva, &pteidx);
@ -1490,6 +1493,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
}
vm_page_unlock_queues();
mtx_unlock(&Giant);
}
/*

View File

@ -1459,10 +1459,13 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
("pmap_protect: non current pmap"));
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
mtx_lock(&Giant);
pmap_remove(pm, sva, eva);
mtx_unlock(&Giant);
return;
}
mtx_lock(&Giant);
vm_page_lock_queues();
for (; sva < eva; sva += PAGE_SIZE) {
pvo = pmap_pvo_find_va(pm, sva, &pteidx);
@ -1490,6 +1493,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
}
vm_page_unlock_queues();
mtx_unlock(&Giant);
}
/*

View File

@ -1459,10 +1459,13 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
("pmap_protect: non current pmap"));
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
mtx_lock(&Giant);
pmap_remove(pm, sva, eva);
mtx_unlock(&Giant);
return;
}
mtx_lock(&Giant);
vm_page_lock_queues();
for (; sva < eva; sva += PAGE_SIZE) {
pvo = pmap_pvo_find_va(pm, sva, &pteidx);
@ -1490,6 +1493,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
}
vm_page_unlock_queues();
mtx_unlock(&Giant);
}
/*

View File

@ -1208,13 +1208,16 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot);
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
mtx_lock(&Giant);
pmap_remove(pm, sva, eva);
mtx_unlock(&Giant);
return;
}
if (prot & VM_PROT_WRITE)
return;
mtx_lock(&Giant);
vm_page_lock_queues();
if (eva - sva > PMAP_TSB_THRESH) {
tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
@ -1227,6 +1230,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
tlb_range_demap(pm, sva, eva - 1);
}
vm_page_unlock_queues();
mtx_unlock(&Giant);
}
/*

View File

@ -1394,14 +1394,12 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
* here -- CHECK THIS XXX
*/
if (current->protection != old_prot) {
mtx_lock(&Giant);
#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
VM_PROT_ALL)
pmap_protect(map->pmap, current->start,
current->end,
current->protection & MASK(current));
#undef MASK
mtx_unlock(&Giant);
}
vm_map_simplify_entry(map, current);
current = current->next;