1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-22 11:17:19 +00:00

Implement pmap_change_attr() for PowerPC (Book-E only for now)

Summary:
Some drivers need special memory requirements.  X86 solves this with a
pmap_change_attr() API, which DRM uses for changing the mapping of the GART and
other memory regions.  Implement the same function for PowerPC.  AIM currently
does not need this, but will in the future for DRM, so a default is added for
that, for business as usual.  Book-E has some drivers coming down that do
require non-default memory coherency.  In this case, the Datapath Acceleration
Architecture (DPAA) based ethernet controller has 2 regions for the buffer
portals: cache-inhibited, and cache-enabled.  By default, device memory is
cache-inhibited.  If the cache-enabled memory regions are mapped
cache-inhibited, an alignment exception is thrown on access.

Test Plan:
Tested with a new driver to be added after this (DPAA dTSEC ethernet driver).
No alignment exceptions thrown, driver works as expected with this.

Reviewed By:	nwhitehorn
Sponsored by:	Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D5471
This commit is contained in:
Justin Hibbits 2016-02-27 20:39:36 +00:00
parent f7dc5935d3
commit 0f7aeab0e7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=296142
5 changed files with 94 additions and 0 deletions

View File

@ -340,6 +340,8 @@ static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
static void mmu_booke_scan_init(mmu_t);
static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
vm_size_t sz, vm_memattr_t mode);
static mmu_method_t mmu_booke_methods[] = {
/* pmap dispatcher interface */
@ -392,6 +394,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_kextract, mmu_booke_kextract),
/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
/* dumpsys() support */
MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
@ -419,6 +422,8 @@ tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
return (MAS2_I);
case VM_MEMATTR_WRITE_THROUGH:
return (MAS2_W | MAS2_M);
case VM_MEMATTR_CACHEABLE:
return (MAS2_M);
}
}
@ -2900,6 +2905,63 @@ mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
return (0);
}
static int
mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
vm_memattr_t mode)
{
vm_offset_t va;
pte_t *pte;
int i, j;
/* Check TLB1 mappings */
for (i = 0; i < tlb1_idx; i++) {
if (!(tlb1[i].mas1 & MAS1_VALID))
continue;
if (addr >= tlb1[i].virt && addr < tlb1[i].virt + tlb1[i].size)
break;
}
if (i < tlb1_idx) {
/* Only allow full mappings to be modified for now. */
/* Validate the range. */
for (j = i, va = addr; va < addr + sz; va += tlb1[j].size, j++) {
if (va != tlb1[j].virt || (sz - (va - addr) < tlb1[j].size))
return (EINVAL);
}
for (va = addr; va < addr + sz; va += tlb1[i].size, i++) {
tlb1[i].mas2 &= ~MAS2_WIMGE_MASK;
tlb1[i].mas2 |= tlb_calc_wimg(tlb1[i].phys, mode);
/*
* Write it out to the TLB. Should really re-sync with other
* cores.
*/
tlb1_write_entry(i);
}
return (0);
}
/* Not in TLB1, try through pmap */
/* First validate the range. */
for (va = addr; va < addr + sz; va += PAGE_SIZE) {
pte = pte_find(mmu, kernel_pmap, va);
if (pte == NULL || !PTE_ISVALID(pte))
return (EINVAL);
}
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
for (va = addr; va < addr + sz; va += PAGE_SIZE) {
pte = pte_find(mmu, kernel_pmap, va);
*pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
*pte |= tlb_calc_wimg(PTE_PA(pte), mode << PTE_MAS2_SHIFT);
tlb0_flush_entry(va);
}
tlb_miss_unlock();
mtx_unlock_spin(&tlbivax_mutex);
return (pte_vatopa(mmu, kernel_pmap, va));
}
/**************************************************************************/
/* TID handling */
/**************************************************************************/

View File

@ -238,6 +238,7 @@ void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
void pmap_deactivate(struct thread *);
vm_paddr_t pmap_kextract(vm_offset_t);
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);

View File

@ -74,6 +74,7 @@
#define MAS2_M 0x00000004
#define MAS2_G 0x00000002
#define MAS2_E 0x00000001
#define MAS2_WIMGE_MASK 0x0000001F
#define MAS3_RPN 0xFFFFF000
#define MAS3_RPN_SHIFT 12

View File

@ -124,6 +124,12 @@ CODE {
{
return;
}
static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
vm_size_t sz, vm_memattr_t mode)
{
return (0);
}
};
@ -956,3 +962,20 @@ METHOD void quick_remove_page {
vm_offset_t _va;
};
/**
* @brief Change the specified virtual address range's memory type.
*
* @param _va The virtual base address to change
*
* @param _sz Size of the region to change
*
* @param _mode New mode to set on the VA range
*
* @retval error 0 on success, EINVAL or ENOMEM on error.
*/
METHOD int change_attr {
mmu_t _mmu;
vm_offset_t _va;
vm_size_t _sz;
vm_memattr_t _mode;
} DEFAULT mmu_null_change_attr;

View File

@ -564,6 +564,13 @@ pmap_quick_remove_page(vm_offset_t addr)
MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
}
int
pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
{
CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
}
/*
* MMU install routines. Highest priority wins, equal priority also
* overrides allowing last-set to win.