mirror of
https://git.FreeBSD.org/src.git
synced 2024-10-19 02:29:40 +00:00
Implement enforcing write XOR execute mapping policy.
It is checked in vm_map_insert() and vm_map_protect() that PROT_WRITE | PROT_EXEC are never specified together, if vm_map has MAP_WX flag set. FreeBSD control flag allows specific binary to request WX exempt, and there are per ABI boolean sysctls kern.elf{32,64}.allow_wx to enable/ disable globally. Reviewed by: emaste, jhb Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D28050
This commit is contained in:
parent
2c52512caf
commit
2e1c94aa1f
@ -190,6 +190,11 @@ SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock,
|
||||
CTLFLAG_RWTUN, &__elfN(sigfastblock), 0,
|
||||
"enable sigfastblock for new processes");
|
||||
|
||||
static bool __elfN(allow_wx) = true;
|
||||
SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx,
|
||||
CTLFLAG_RWTUN, &__elfN(allow_wx), 0,
|
||||
"Allow pages to be mapped simultaneously writable and executable");
|
||||
|
||||
static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
|
||||
|
||||
#define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a))
|
||||
@ -1237,6 +1242,9 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
|
||||
imgp->map_flags |= MAP_ASLR_IGNSTART;
|
||||
}
|
||||
|
||||
if (!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0)
|
||||
imgp->map_flags |= MAP_WXORX;
|
||||
|
||||
error = exec_new_vmspace(imgp, sv);
|
||||
vmspace = imgp->proc->p_vmspace;
|
||||
map = &vmspace->vm_map;
|
||||
|
@ -1074,12 +1074,12 @@ exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
|
||||
pmap_remove_pages(vmspace_pmap(vmspace));
|
||||
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
|
||||
/*
|
||||
* An exec terminates mlockall(MCL_FUTURE), ASLR state
|
||||
* must be re-evaluated.
|
||||
* An exec terminates mlockall(MCL_FUTURE).
|
||||
* ASLR and W^X states must be re-evaluated.
|
||||
*/
|
||||
vm_map_lock(map);
|
||||
vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
|
||||
MAP_ASLR_IGNSTART);
|
||||
MAP_ASLR_IGNSTART | MAP_WXORX);
|
||||
vm_map_unlock(map);
|
||||
} else {
|
||||
error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
|
||||
|
@ -1671,6 +1671,10 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
||||
if (start == end || !vm_map_range_valid(map, start, end))
|
||||
return (KERN_INVALID_ADDRESS);
|
||||
|
||||
if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE |
|
||||
VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE))
|
||||
return (KERN_PROTECTION_FAILURE);
|
||||
|
||||
/*
|
||||
* Find the entry prior to the proposed starting address; if it's part
|
||||
* of an existing entry, this range is bogus.
|
||||
@ -2751,6 +2755,13 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
in_tran = NULL;
|
||||
vm_map_lock(map);
|
||||
|
||||
if ((map->flags & MAP_WXORX) != 0 && (new_prot &
|
||||
(VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE |
|
||||
VM_PROT_EXECUTE)) {
|
||||
vm_map_unlock(map);
|
||||
return (KERN_PROTECTION_FAILURE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that we are not concurrently wiring pages. vm_map_wire() may
|
||||
* need to fault pages into the map and will drop the map lock while
|
||||
|
@ -228,6 +228,7 @@ struct vm_map {
|
||||
#define MAP_ASLR 0x08 /* enabled ASLR */
|
||||
#define MAP_ASLR_IGNSTART 0x10
|
||||
#define MAP_REPLENISH 0x20
|
||||
#define MAP_WXORX 0x40 /* enforce W^X */
|
||||
|
||||
#ifdef _KERNEL
|
||||
#if defined(KLD_MODULE) && !defined(KLD_TIED)
|
||||
|
Loading…
Reference in New Issue
Block a user