mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-02 08:42:48 +00:00
Simplify both the invocation and the implementation of vm_fault() for wiring
pages. (Note: Claims made in the comments about the handling of breakpoints in wired pages have been false for roughly a decade. This and another bug involving breakpoints will be fixed in coming changes.) Reviewed by: kib
This commit is contained in:
parent
12036bce1d
commit
2db65ab46e
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=199490
@ -58,7 +58,7 @@ int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
|
||||
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
|
||||
vm_ooffset_t *);
|
||||
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
|
||||
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
|
||||
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
|
||||
int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
|
||||
void vm_waitproc(struct proc *);
|
||||
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
|
||||
|
@ -185,7 +185,7 @@ unlock_and_deallocate(struct faultstate *fs)
|
||||
* default objects are zero-fill, there is no real pager.
|
||||
*/
|
||||
#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
|
||||
(((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
|
||||
((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired))
|
||||
|
||||
/*
|
||||
* vm_fault:
|
||||
@ -238,31 +238,15 @@ RetryFault:;
|
||||
result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry,
|
||||
&fs.first_object, &fs.first_pindex, &prot, &wired);
|
||||
if (result != KERN_SUCCESS) {
|
||||
if (result != KERN_PROTECTION_FAILURE ||
|
||||
(fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) {
|
||||
if (growstack && result == KERN_INVALID_ADDRESS &&
|
||||
map != kernel_map && curproc != NULL) {
|
||||
result = vm_map_growstack(curproc, vaddr);
|
||||
if (result != KERN_SUCCESS)
|
||||
return (KERN_FAILURE);
|
||||
growstack = FALSE;
|
||||
goto RetryFault;
|
||||
}
|
||||
return (result);
|
||||
if (growstack && result == KERN_INVALID_ADDRESS &&
|
||||
map != kernel_map) {
|
||||
result = vm_map_growstack(curproc, vaddr);
|
||||
if (result != KERN_SUCCESS)
|
||||
return (KERN_FAILURE);
|
||||
growstack = FALSE;
|
||||
goto RetryFault;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are user-wiring a r/w segment, and it is COW, then
|
||||
* we need to do the COW operation. Note that we don't COW
|
||||
* currently RO sections now, because it is NOT desirable
|
||||
* to COW .text. We simply keep .text from ever being COW'ed
|
||||
* and take the heat that one cannot debug wired .text sections.
|
||||
*/
|
||||
result = vm_map_lookup(&fs.map, vaddr,
|
||||
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
|
||||
&fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired);
|
||||
if (result != KERN_SUCCESS)
|
||||
return (result);
|
||||
return (result);
|
||||
}
|
||||
|
||||
map_generation = fs.map->timestamp;
|
||||
@ -919,9 +903,8 @@ RetryFault:;
|
||||
* won't find it (yet).
|
||||
*/
|
||||
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
|
||||
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
|
||||
if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
|
||||
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
|
||||
}
|
||||
VM_OBJECT_LOCK(fs.object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_set(fs.m, PG_REFERENCED);
|
||||
@ -930,7 +913,7 @@ RetryFault:;
|
||||
* If the page is not wired down, then put it where the pageout daemon
|
||||
* can find it.
|
||||
*/
|
||||
if (fault_flags & VM_FAULT_WIRE_MASK) {
|
||||
if (fault_flags & VM_FAULT_CHANGE_WIRING) {
|
||||
if (wired)
|
||||
vm_page_wire(fs.m);
|
||||
else
|
||||
@ -1048,7 +1031,7 @@ vm_fault_quick(caddr_t v, int prot)
|
||||
*/
|
||||
int
|
||||
vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
boolean_t user_wire, boolean_t fictitious)
|
||||
boolean_t fictitious)
|
||||
{
|
||||
vm_offset_t va;
|
||||
int rv;
|
||||
@ -1059,9 +1042,7 @@ vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
* read-only sections.
|
||||
*/
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
rv = vm_fault(map, va,
|
||||
user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
|
||||
user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
|
||||
rv = vm_fault(map, va, VM_PROT_NONE, VM_FAULT_CHANGE_WIRING);
|
||||
if (rv) {
|
||||
if (va != start)
|
||||
vm_fault_unwire(map, start, va, fictitious);
|
||||
|
@ -2381,7 +2381,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
*/
|
||||
vm_map_unlock(map);
|
||||
rv = vm_fault_wire(map, saved_start, saved_end,
|
||||
user_wire, fictitious);
|
||||
fictitious);
|
||||
vm_map_lock(map);
|
||||
if (last_timestamp + 1 != map->timestamp) {
|
||||
/*
|
||||
@ -3563,7 +3563,7 @@ RetryLookup:;
|
||||
else
|
||||
prot = entry->protection;
|
||||
fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
|
||||
if ((fault_type & prot) != fault_type) {
|
||||
if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
|
||||
vm_map_unlock_read(map);
|
||||
return (KERN_PROTECTION_FAILURE);
|
||||
}
|
||||
|
@ -319,8 +319,6 @@ long vmspace_wired_count(struct vmspace *vmspace);
|
||||
*/
|
||||
#define VM_FAULT_NORMAL 0 /* Nothing special */
|
||||
#define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
|
||||
#define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */
|
||||
#define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
|
||||
#define VM_FAULT_DIRTY 8 /* Dirty the page */
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user