1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-17 10:26:15 +00:00

Use an atomic op to update the pte in pmap_protect(). This is to prevent

the loss of a page modified (PG_M) bit in a race between processors.

Quoting Tor:
	One scenario where the old code could cause a lost PG_M bit is a
	multithreaded linux program (or FreeBSD program using the
	linuxthreads port) where one thread was starting a subprocess.
	The thread doing fork() would call vmspace_fork(), which would then
	call vm_map_copy_entry() which would call pmap_protect() on an area
	possibly accessed by other threads.

Additionally, make the clearing of PG_M by pmap_protect() unconditional if
write permission is removed.  Previously, PG_M could persist on a read-only
unmanaged page.  That seems inconsistent and confusing.

In collaboration with: tegge@

MT5 candidate
PR: 61852
This commit is contained in:
Alan Cox 2004-09-12 20:20:40 +00:00
parent d78fc68a15
commit 031102cc7b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=135123
2 changed files with 20 additions and 13 deletions

View File

@ -1784,14 +1784,15 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
va_next = eva;
for (; sva != va_next; sva += PAGE_SIZE) {
pt_entry_t pbits;
pt_entry_t obits, pbits;
pt_entry_t *pte;
vm_page_t m;
pte = pmap_pte(pmap, sva);
if (pte == NULL)
continue;
pbits = *pte;
retry:
obits = pbits = *pte;
if (pbits & PG_MANAGED) {
m = NULL;
if (pbits & PG_A) {
@ -1805,14 +1806,14 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
m = PHYS_TO_VM_PAGE(pbits &
PG_FRAME);
vm_page_dirty(m);
pbits &= ~PG_M;
}
}
pbits &= ~PG_RW;
pbits &= ~(PG_RW | PG_M);
if (pbits != *pte) {
pte_store(pte, pbits);
if (pbits != obits) {
if (!atomic_cmpset_long(pte, obits, pbits))
goto retry;
anychanged = 1;
}
}

View File

@ -1814,7 +1814,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
sched_pin();
PMAP_LOCK(pmap);
for (; sva < eva; sva = pdnxt) {
unsigned pdirindex;
unsigned obits, pbits, pdirindex;
pdnxt = (sva + NBPDR) & ~PDRMASK;
@ -1842,13 +1842,18 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pdnxt = eva;
for (; sva != pdnxt; sva += PAGE_SIZE) {
pt_entry_t pbits;
pt_entry_t *pte;
vm_page_t m;
if ((pte = pmap_pte_quick(pmap, sva)) == NULL)
continue;
pbits = *pte;
retry:
/*
* Regardless of whether a pte is 32 or 64 bits in
* size, PG_RW, PG_A, and PG_M are among the least
* significant 32 bits.
*/
obits = pbits = *(u_int *)pte;
if (pbits & PG_MANAGED) {
m = NULL;
if (pbits & PG_A) {
@ -1861,14 +1866,15 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (m == NULL)
m = PHYS_TO_VM_PAGE(pbits);
vm_page_dirty(m);
pbits &= ~PG_M;
}
}
pbits &= ~PG_RW;
pbits &= ~(PG_RW | PG_M);
if (pbits != *pte) {
pte_store(pte, pbits);
if (pbits != obits) {
if (!atomic_cmpset_int((u_int *)pte, obits,
pbits))
goto retry;
anychanged = 1;
}
}