1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-10-19 02:29:40 +00:00

vm_map_protect: allow to set prot and max_prot in one go.

This prevents a situation where other thread modifies map entries
permissions between setting max_prot, then relocking, then setting prot,
confusing the operation outcome.  E.g. you can get an error that is not
possible if operation is performed atomic.

Also enable setting rwx for max_prot even if map does not allow to set
effective rwx protection.

Reviewed by:	brooks, markj (previous version)
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D28117
This commit is contained in:
Konstantin Belousov 2021-01-12 14:43:39 +02:00
parent d89e1db5a3
commit 0659df6fad
8 changed files with 53 additions and 33 deletions

View File

@ -158,7 +158,8 @@ exec_linux_imgact(struct image_params *imgp)
* remove write enable on the 'text' part
*/
error = vm_map_protect(&vmspace->vm_map, vmaddr,
vmaddr + a_out->a_text, VM_PROT_EXECUTE|VM_PROT_READ, TRUE);
vmaddr + a_out->a_text, 0, VM_PROT_EXECUTE | VM_PROT_READ,
VM_MAP_PROTECT_SET_MAXPROT);
if (error)
goto fail;
} else {
@ -185,7 +186,8 @@ exec_linux_imgact(struct image_params *imgp)
* allow read/write of data
*/
error = vm_map_protect(&vmspace->vm_map, vmaddr + a_out->a_text,
vmaddr + a_out->a_text + a_out->a_data, VM_PROT_ALL, FALSE);
vmaddr + a_out->a_text + a_out->a_data, VM_PROT_ALL, 0,
VM_MAP_PROTECT_SET_PROT);
if (error)
goto fail;

View File

@ -692,7 +692,7 @@ __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset,
*/
if ((prot & VM_PROT_WRITE) == 0)
vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
map_len), prot, FALSE);
map_len), prot, 0, VM_MAP_PROTECT_SET_PROT);
return (0);
}

View File

@ -770,7 +770,8 @@ kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
addr = trunc_page(addr);
size = round_page(size);
(void)vm_map_protect(&p->p_vmspace->vm_map,
addr, addr + size, prot, FALSE);
addr, addr + size, prot, 0,
VM_MAP_PROTECT_SET_PROT);
}
}

View File

@ -1224,7 +1224,7 @@ link_elf_load_file(linker_class_t cls, const char* filename,
error = vm_map_protect(kernel_map,
(vm_offset_t)segbase,
(vm_offset_t)segbase + round_page(segs[i]->p_memsz),
prot, FALSE);
prot, 0, VM_MAP_PROTECT_SET_PROT);
if (error != KERN_SUCCESS) {
error = ENOMEM;
goto out;

View File

@ -219,7 +219,8 @@ link_elf_protect_range(elf_file_t ef, vm_offset_t start, vm_offset_t end,
#endif
return;
}
error = vm_map_protect(kernel_map, start, end, prot, FALSE);
error = vm_map_protect(kernel_map, start, end, prot, 0,
VM_MAP_PROTECT_SET_PROT);
KASSERT(error == KERN_SUCCESS,
("link_elf_protect_range: vm_map_protect() returned %d", error));
}

View File

@ -2733,14 +2733,12 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
/*
* vm_map_protect:
*
* Sets the protection of the specified address
* region in the target map. If "set_max" is
* specified, the maximum protection is to be set;
* otherwise, only the current protection is affected.
* Sets the protection and/or the maximum protection of the
* specified address region in the target map.
*/
int
vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_prot_t new_prot, boolean_t set_max)
vm_prot_t new_prot, vm_prot_t new_maxprot, int flags)
{
vm_map_entry_t entry, first_entry, in_tran, prev_entry;
vm_object_t obj;
@ -2751,12 +2749,18 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
if (start == end)
return (KERN_SUCCESS);
if ((flags & (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT)) ==
(VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT) &&
(new_prot & new_maxprot) != new_prot)
return (KERN_OUT_OF_BOUNDS);
again:
in_tran = NULL;
vm_map_lock(map);
if ((map->flags & MAP_WXORX) != 0 && (new_prot &
(VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE |
if ((map->flags & MAP_WXORX) != 0 &&
(flags & VM_MAP_PROTECT_SET_PROT) != 0 &&
(new_prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE |
VM_PROT_EXECUTE)) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);
@ -2786,7 +2790,12 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_unlock(map);
return (KERN_INVALID_ARGUMENT);
}
if ((new_prot & entry->max_protection) != new_prot) {
if ((flags & VM_MAP_PROTECT_SET_PROT) == 0)
new_prot = entry->protection;
if ((flags & VM_MAP_PROTECT_SET_MAXPROT) == 0)
new_maxprot = entry->max_protection;
if ((new_prot & entry->max_protection) != new_prot ||
(new_maxprot & entry->max_protection) != new_maxprot) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);
}
@ -2827,12 +2836,16 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
return (rv);
}
if (set_max ||
if ((flags & VM_MAP_PROTECT_SET_PROT) == 0)
new_prot = entry->protection;
if ((flags & VM_MAP_PROTECT_SET_MAXPROT) == 0)
new_maxprot = entry->max_protection;
if ((flags & VM_MAP_PROTECT_SET_PROT) == 0 ||
((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
ENTRY_CHARGED(entry) ||
(entry->eflags & MAP_ENTRY_GUARD) != 0) {
(entry->eflags & MAP_ENTRY_GUARD) != 0)
continue;
}
cred = curthread->td_ucred;
obj = entry->object.vm_object;
@ -2893,11 +2906,11 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
old_prot = entry->protection;
if (set_max)
entry->protection =
(entry->max_protection = new_prot) &
old_prot;
else
if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) {
entry->max_protection = new_maxprot;
entry->protection = new_maxprot & old_prot;
}
if ((flags & VM_MAP_PROTECT_SET_PROT) != 0)
entry->protection = new_prot;
/*

View File

@ -510,7 +510,12 @@ vm_map_entry_succ(vm_map_entry_t entry)
for ((it) = vm_map_entry_first(map); \
(it) != &(map)->header; \
(it) = vm_map_entry_succ(it))
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
#define VM_MAP_PROTECT_SET_PROT 0x0001
#define VM_MAP_PROTECT_SET_MAXPROT 0x0002
int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_prot_t new_prot, vm_prot_t new_maxprot, int flags);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,
vm_map_entry_t entry);

View File

@ -653,6 +653,7 @@ kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
vm_offset_t addr;
vm_size_t pageoff;
int vm_error, max_prot;
int flags;
addr = addr0;
if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
@ -672,16 +673,11 @@ kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
if (addr + size < addr)
return (EINVAL);
vm_error = KERN_SUCCESS;
if (max_prot != 0) {
if ((max_prot & prot) != prot)
return (ENOTSUP);
vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
addr, addr + size, max_prot, TRUE);
}
if (vm_error == KERN_SUCCESS)
vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
addr, addr + size, prot, FALSE);
flags = VM_MAP_PROTECT_SET_PROT;
if (max_prot != 0)
flags |= VM_MAP_PROTECT_SET_MAXPROT;
vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
addr, addr + size, prot, max_prot, flags);
switch (vm_error) {
case KERN_SUCCESS:
@ -690,6 +686,8 @@ kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
return (EACCES);
case KERN_RESOURCE_SHORTAGE:
return (ENOMEM);
case KERN_OUT_OF_BOUNDS:
return (ENOTSUP);
}
return (EINVAL);
}