1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-19 15:33:56 +00:00

Set the arm64 Execute-never bits in more places.

We need to set the Execute-never bits when mapping device memory as the
hardware may perform speculative instruction fetches.

Set the Privileged Execute-ever bit on userspace memory to stop the kernel
if it is tricked into executing it.

Reviewed by:	kib
Sponsored by:	DARPA, AFRL
Differential Revision:	https://reviews.freebsd.org/D10382
This commit is contained in:
Andrew Turner 2017-04-13 15:03:03 +00:00
parent d255847d9e
commit ad0b190e82
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=316761

View File

@ -1127,7 +1127,7 @@ static void
pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
{
pd_entry_t *pde;
pt_entry_t *pte;
pt_entry_t *pte, attr;
vm_offset_t va;
int lvl;
@ -1138,6 +1138,10 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
KASSERT((size & PAGE_MASK) == 0,
("pmap_kenter: Mapping is not page-sized"));
attr = ATTR_DEFAULT | ATTR_IDX(mode) | L3_PAGE;
if (mode == DEVICE_MEMORY)
attr |= ATTR_XN;
va = sva;
while (size != 0) {
pde = pmap_pde(kernel_pmap, va, &lvl);
@ -1146,8 +1150,7 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
pte = pmap_l2_to_l3(pde, va);
pmap_load_store(pte, (pa & ~L3_OFFSET) | ATTR_DEFAULT |
ATTR_IDX(mode) | L3_PAGE);
pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
PTE_SYNC(pte);
va += PAGE_SIZE;
@ -1259,6 +1262,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
m = ma[i];
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
if (m->md.pv_memattr == DEVICE_MEMORY)
pa |= ATTR_XN;
pte = pmap_l2_to_l3(pde, va);
pmap_load_store(pte, pa);
PTE_SYNC(pte);
@ -2719,12 +2724,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
L3_PAGE);
if ((prot & VM_PROT_WRITE) == 0)
new_l3 |= ATTR_AP(ATTR_AP_RO);
if ((prot & VM_PROT_EXECUTE) == 0)
if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
new_l3 |= ATTR_XN;
if ((flags & PMAP_ENTER_WIRED) != 0)
new_l3 |= ATTR_SW_WIRED;
if ((va >> 63) == 0)
new_l3 |= ATTR_AP(ATTR_AP_USER);
new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
@ -3127,8 +3132,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
ATTR_AP(ATTR_AP_RO) | L3_PAGE;
if ((prot & VM_PROT_EXECUTE) == 0)
if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
pa |= ATTR_XN;
else if (va < VM_MAXUSER_ADDRESS)
pa |= ATTR_PXN;
/*
* Now validate mapping with RO protection
@ -4263,6 +4270,8 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
l3 = pmap_load(pte);
l3 &= ~ATTR_IDX_MASK;
l3 |= ATTR_IDX(mode);
if (mode == DEVICE_MEMORY)
l3 |= ATTR_XN;
pmap_update_entry(kernel_pmap, pte, l3, tmpva,
PAGE_SIZE);