1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-19 10:53:58 +00:00

Document places we assume that physical memory is direct-mapped at zero by

using a new macro PHYS_TO_DMAP, which deliberately has the same name as the
equivalent macro on amd64. This also sets the stage for moving the direct
map to another base address.
This commit is contained in:
Nathan Whitehorn 2018-01-13 23:14:53 +00:00
parent 26c1d774b5
commit 68b9c019aa
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=327950
5 changed files with 33 additions and 17 deletions

View File

@ -540,7 +540,8 @@ moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
DISABLE_TRANS(msr);
for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
/* If this address is direct-mapped, skip remapping */
if (hw_direct_map && translations[i].om_va == pa_base &&
if (hw_direct_map &&
translations[i].om_va == PHYS_TO_DMAP(pa_base) &&
moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) == LPTE_M)
continue;
@ -633,7 +634,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
pvo = alloc_pvo_entry(1 /* bootstrap */);
pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
init_pvo_entry(pvo, kernel_pmap, pa);
init_pvo_entry(pvo, kernel_pmap, PHYS_TO_DMAP(pa));
/*
* Set memory access as guarded if prefetch within
@ -1111,7 +1112,8 @@ moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
src = VM_PAGE_TO_PHYS(msrc);
if (hw_direct_map) {
bcopy((void *)src, (void *)dst, PAGE_SIZE);
bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst),
PAGE_SIZE);
} else {
mtx_lock(&moea64_scratchpage_mtx);
@ -1136,11 +1138,13 @@ moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
while (xfersize > 0) {
a_pg_offset = a_offset & PAGE_MASK;
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
a_cp = (char *)PHYS_TO_DMAP(
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
a_pg_offset;
b_pg_offset = b_offset & PAGE_MASK;
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
b_cp = (char *)PHYS_TO_DMAP(
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
b_pg_offset;
bcopy(a_cp, b_cp, cnt);
a_offset += cnt;
@ -1200,7 +1204,7 @@ moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
panic("moea64_zero_page: size + off > PAGE_SIZE");
if (hw_direct_map) {
bzero((caddr_t)pa + off, size);
bzero((caddr_t)PHYS_TO_DMAP(pa) + off, size);
} else {
mtx_lock(&moea64_scratchpage_mtx);
moea64_set_scratchpage_pa(mmu, 0, pa);
@ -1224,7 +1228,7 @@ moea64_zero_page(mmu_t mmu, vm_page_t m)
moea64_set_scratchpage_pa(mmu, 0, pa);
va = moea64_scratchpage_va[0];
} else {
va = pa;
va = PHYS_TO_DMAP(pa);
}
for (off = 0; off < PAGE_SIZE; off += cacheline_size)
@ -1241,7 +1245,7 @@ moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
if (hw_direct_map)
return (pa);
return (PHYS_TO_DMAP(pa));
/*
* MOEA64_PTE_REPLACE does some locking, so we can't just grab
@ -1402,7 +1406,7 @@ moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
} else if (pmap == kernel_pmap) {
__syncicache((void *)va, sz);
} else if (hw_direct_map) {
__syncicache((void *)pa, sz);
__syncicache((void *)PHYS_TO_DMAP(pa), sz);
} else {
/* Use the scratch page to set up a temp mapping */
@ -1565,7 +1569,7 @@ moea64_init(mmu_t mmu)
if (!hw_direct_map) {
installed_mmu = mmu;
uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc);
uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc);
}
#ifdef COMPAT_FREEBSD32
@ -1855,7 +1859,7 @@ moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
break;
if (va == pa_end)
return (pa_start);
return (PHYS_TO_DMAP(pa_start));
}
sva = *virt;
va = sva;

View File

@ -497,7 +497,7 @@ slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
if (m == NULL)
return (NULL);
va = (void *) VM_PAGE_TO_PHYS(m);
va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
if (!hw_direct_map)
pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));

View File

@ -238,5 +238,15 @@ struct pmap_physseg {
#define SFBUF_NOMD
#define SFBUF_OPTIONAL_DIRECT_MAP hw_direct_map
#define SFBUF_PHYS_DMAP(x) (x)
/*
* We (usually) have a direct map of all physical memory. All
* uses of this macro must be gated by a check on hw_direct_map!
* The location of the direct map may not be 1:1 in future, so use
* of the macro is recommended; it may also grow an assert that hw_direct_map
* is set.
*/
#define PHYS_TO_DMAP(x) x
#define DMAP_TO_PHYS(x) x
#endif /* _MACHINE_VMPARAM_H_ */

View File

@ -71,10 +71,12 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
if ((vm_offset_t)pa != pa)
return (NULL);
va = (void *)(vm_offset_t)pa;
if (!hw_direct_map)
pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
if (!hw_direct_map) {
pmap_kenter(pa, pa);
va = (void *)(vm_offset_t)pa;
} else {
va = (void *)(vm_offset_t)PHYS_TO_DMAP(pa);
}
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);

View File

@ -226,7 +226,7 @@ static int
ps3_smp_start_cpu(platform_t plat, struct pcpu *pc)
{
/* kernel is spinning on 0x40 == -1 right now */
volatile uint32_t *secondary_spin_sem = (uint32_t *)(0x40);
volatile uint32_t *secondary_spin_sem = (uint32_t *)PHYS_TO_DMAP(0x40);
int remote_pir = pc->pc_hwref;
int timeout;