Extract the logic from pmap_kextract

This allows us to use it when we only need to check if the virtual address
is valid. For example when checking if an address in the DMAP region is
mapped.

Reviewed by:	kib, markj
Sponsored by:	Innovate UK
Differential Revision:	https://reviews.freebsd.org/D27621
This commit is contained in:
Andrew Turner 2021-01-15 18:48:43 +00:00
parent 7c84a7405b
commit f64329bcdc
3 changed files with 62 additions and 13 deletions

View File

@ -1365,16 +1365,42 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
return (m);
}
vm_paddr_t
pmap_kextract(vm_offset_t va)
/*
* Walks the page tables to translate a kernel virtual address to a
* physical address. Returns true if the kva is valid and stores the
* physical address in pa if it is not NULL.
*/
bool
pmap_klookup(vm_offset_t va, vm_paddr_t *pa)
{
pt_entry_t *pte, tpte;
register_t intr;
uint64_t par;
/*
* Disable interrupts so we don't get interrupted between asking
* for address translation, and getting the result back.
*/
intr = intr_disable();
par = arm64_address_translate_s1e1r(va);
intr_restore(intr);
if (PAR_SUCCESS(par)) {
if (pa != NULL)
*pa = (par & PAR_PA_MASK) | (va & PAR_LOW_MASK);
return (true);
}
/*
* Fall back to walking the page table. The address translation
* instruction may fail when the page is in a break-before-make
* sequence. As we only clear the valid bit in said sequence we
* can walk the page table to find the physical address.
*/
if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
return (DMAP_TO_PHYS(va));
pte = pmap_l1(kernel_pmap, va);
if (pte == NULL)
return (0);
return (false);
/*
* A concurrent pmap_update_entry() will clear the entry's valid bit
@ -1384,20 +1410,41 @@ pmap_kextract(vm_offset_t va)
*/
tpte = pmap_load(pte);
if (tpte == 0)
return (0);
if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
return ((tpte & ~ATTR_MASK) | (va & L1_OFFSET));
return (false);
if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
if (pa != NULL)
*pa = (tpte & ~ATTR_MASK) | (va & L1_OFFSET);
return (true);
}
pte = pmap_l1_to_l2(&tpte, va);
tpte = pmap_load(pte);
if (tpte == 0)
return (0);
if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
return ((tpte & ~ATTR_MASK) | (va & L2_OFFSET));
return (false);
if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
if (pa != NULL)
*pa = (tpte & ~ATTR_MASK) | (va & L2_OFFSET);
return (true);
}
pte = pmap_l2_to_l3(&tpte, va);
tpte = pmap_load(pte);
if (tpte == 0)
return (false);
if (pa != NULL)
*pa = (tpte & ~ATTR_MASK) | (va & L3_OFFSET);
return (true);
}
vm_paddr_t
pmap_kextract(vm_offset_t va)
{
vm_paddr_t pa;
if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
return (DMAP_TO_PHYS(va));
if (pmap_klookup(va, &pa) == false)
return (0);
return ((tpte & ~ATTR_MASK) | (va & L3_OFFSET));
return (pa);
}
/***************************************************
@ -6833,7 +6880,7 @@ pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
* critical section. Therefore, we must check the
* address without acquiring the kernel pmap's lock.
*/
if (pmap_kextract(far) != 0)
if (pmap_klookup(far, NULL))
rv = KERN_SUCCESS;
} else {
PMAP_LOCK(pmap);

View File

@ -743,6 +743,7 @@
#define PAR_F (0x1 << PAR_F_SHIFT)
#define PAR_SUCCESS(x) (((x) & PAR_F) == 0)
/* When PAR_F == 0 (success) */
#define PAR_LOW_MASK 0xfff
#define PAR_SH_SHIFT 7
#define PAR_SH_MASK (0x3 << PAR_SH_SHIFT)
#define PAR_NS_SHIFT 9

View File

@ -167,6 +167,7 @@ void pmap_bootstrap(vm_offset_t, vm_offset_t, vm_paddr_t, vm_size_t);
int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode);
void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
bool pmap_klookup(vm_offset_t va, vm_paddr_t *pa);
vm_paddr_t pmap_kextract(vm_offset_t va);
void pmap_kremove(vm_offset_t);
void pmap_kremove_device(vm_offset_t, vm_size_t);