1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-28 11:57:28 +00:00

Begin reducing code duplication in arm pmap.c and pmap-v6.c by factoring

out common code related to mapping device memory into a new devmap.c file.

Remove the growing duplication of code that used pmap_devmap_find_pa() and
then did some math with the returned results to generate a virtual address,
and likewise in reverse to get a physical address.  Now there are a pair
of functions, arm_devmap_vtop() and arm_devmap_ptov(), to do that.  The
bus_space_map() implementations are rewritten in terms of these.
This commit is contained in:
Ian Lepore 2013-11-04 19:44:37 +00:00
parent 538c3c05e7
commit 13a98c8536
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=257648
10 changed files with 255 additions and 277 deletions

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <machine/bus.h>
#include <machine/devmap.h>
/* Prototypes for all the bus_space structure functions */
bs_protos(generic);
@ -58,36 +59,20 @@ int
generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
bus_space_handle_t *bshp)
{
const struct pmap_devmap *pd;
vm_paddr_t startpa, endpa, pa, offset;
vm_offset_t va;
pt_entry_t *pte;
if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
/* Device was statically mapped. */
*bshp = pd->pd_va + (bpa - pd->pd_pa);
return (0);
}
endpa = round_page(bpa + size);
offset = bpa & PAGE_MASK;
startpa = trunc_page(bpa);
va = kva_alloc(endpa - startpa);
if (va == 0)
return (ENOMEM);
*bshp = va + offset;
for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter(va, pa);
pte = vtopte(va);
if (!(flags & BUS_SPACE_MAP_CACHEABLE)) {
*pte &= ~L2_S_CACHE_MASK;
PTE_SYNC(pte);
}
}
void *va;
/*
* Look up the address in the static device mappings. If it's not
* there, establish a new dynamic mapping.
*
* We don't even examine the passed-in flags. For ARM, the CACHEABLE
* flag doesn't make sense (we create PTE_DEVICE mappings), and the
* LINEAR flag is just implied because we use kva_alloc(size).
*/
if ((va = arm_devmap_ptov(bpa, size)) == NULL)
if ((va = pmap_mapdev(bpa, size)) == NULL)
return (ENOMEM);
*bshp = (bus_space_handle_t)va;
return (0);
}
@ -104,21 +89,13 @@ generic_bs_alloc(void *t, bus_addr_t rstart, bus_addr_t rend, bus_size_t size,
void
generic_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
{
vm_offset_t va, endva, origva;
if (pmap_devmap_find_va((vm_offset_t)h, size) != NULL) {
/* Device was statically mapped; nothing to do. */
return;
}
endva = round_page((vm_offset_t)h + size);
origva = va = trunc_page((vm_offset_t)h);
while (va < endva) {
pmap_kremove(va);
va += PAGE_SIZE;
}
kva_free(origva, endva - origva);
/*
* If the region is static-mapped do nothing, otherwise remove the
* dynamic mapping.
*/
if (arm_devmap_vtop((void*)h, size) == DEVMAP_PADDR_NOTFOUND)
pmap_unmapdev((vm_offset_t)h, size);
}
void

150
sys/arm/arm/devmap.c Normal file
View File

@ -0,0 +1,150 @@
/*-
* Copyright (c) 2013 Ian Lepore <ian@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Routines for mapping device memory.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <machine/devmap.h>
static const struct pmap_devmap *devmap_table;
/*
* Map all of the static regions in the devmap table, and remember
* the devmap table so other parts of the kernel can do lookups later.
*/
void
pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table)
{
const struct pmap_devmap *pd;
devmap_table = table;
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
pmap_map_chunk(l1pt, pd->pd_va, pd->pd_pa, pd->pd_size,
pd->pd_prot,pd->pd_cache);
}
}
/*
* Look up the given physical address in the static mapping data and return the
* corresponding virtual address, or NULL if not found.
*/
void *
arm_devmap_ptov(vm_paddr_t pa, vm_size_t size)
{
const struct pmap_devmap *pd;
if (devmap_table == NULL)
return (NULL);
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
if (pa >= pd->pd_pa && pa + size <= pd->pd_pa + pd->pd_size)
return ((void *)(pd->pd_va + (pa - pd->pd_pa)));
}
return (NULL);
}
/*
* Look up the given virtual address in the static mapping data and return the
* corresponding physical address, or DEVMAP_PADDR_NOTFOUND if not found.
*/
vm_paddr_t
arm_devmap_vtop(void * vpva, vm_size_t size)
{
const struct pmap_devmap *pd;
vm_offset_t va;
if (devmap_table == NULL)
return (DEVMAP_PADDR_NOTFOUND);
va = (vm_offset_t)vpva;
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
if (va >= pd->pd_va && va + size <= pd->pd_va + pd->pd_size)
return ((vm_paddr_t)(pd->pd_pa + (va - pd->pd_va)));
}
return (DEVMAP_PADDR_NOTFOUND);
}
/*
* Map a set of physical memory pages into the kernel virtual address space.
* Return a pointer to where it is mapped. This routine is intended to be used
* for mapping device memory, NOT real memory.
*/
void *
pmap_mapdev(vm_offset_t pa, vm_size_t size)
{
vm_offset_t va, tmpva, offset;
offset = pa & PAGE_MASK;
pa = trunc_page(pa);
size = round_page(size + offset);
va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
pmap_kenter_device(tmpva, pa);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
return ((void *)(va + offset));
}
/*
* Unmap device memory and free the kva space.
*/
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
vm_offset_t tmpva, offset;
offset = va & PAGE_MASK;
va = trunc_page(va);
size = round_page(size + offset);
for (tmpva = va; size > 0;) {
pmap_kremove(tmpva);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
}
kva_free(va, size);
}

View File

@ -90,6 +90,7 @@ __FBSDID("$FreeBSD$");
#include <machine/armreg.h>
#include <machine/atags.h>
#include <machine/cpu.h>
#include <machine/devmap.h>
#include <machine/frame.h>
#include <machine/machdep.h>
#include <machine/md_var.h>

View File

@ -137,7 +137,9 @@
/*
* Special compilation symbols
* PMAP_DEBUG - Build in pmap_debug_level code
*/
*
* Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c
*/
/* Include header files */
#include "opt_vm.h"
@ -2423,6 +2425,17 @@ pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa)
pmap_kenter_internal(va, pa, 0);
}
void
pmap_kenter_device(vm_offset_t va, vm_paddr_t pa)
{
/*
* XXX - Need a way for kenter_internal to handle PTE_DEVICE mapping as
* a potentially different thing than PTE_NOCACHE.
*/
pmap_kenter_internal(va, pa, 0);
}
void
pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
{
@ -5010,36 +5023,6 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
{
}
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
* routine is intended to be used for mapping device memory,
* NOT real memory.
*/
void *
pmap_mapdev(vm_offset_t pa, vm_size_t size)
{
vm_offset_t va, tmpva, offset;
offset = pa & PAGE_MASK;
size = roundup(size, PAGE_SIZE);
GIANT_REQUIRED;
va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
pmap_kenter_internal(tmpva, pa, 0);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
return ((void *)(va + offset));
}
/*
* pmap_map_section:
*
@ -5222,86 +5205,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
}
/********************** Static device map routines ***************************/
static const struct pmap_devmap *pmap_devmap_table;
/*
* Register the devmap table. This is provided in case early console
* initialization needs to register mappings created by bootstrap code
* before pmap_devmap_bootstrap() is called.
*/
void
pmap_devmap_register(const struct pmap_devmap *table)
{
pmap_devmap_table = table;
}
/*
* Map all of the static regions in the devmap table, and remember
* the devmap table so other parts of the kernel can look up entries
* later.
*/
void
pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table)
{
int i;
pmap_devmap_table = table;
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
#ifdef VERBOSE_INIT_ARM
printf("devmap: %08x -> %08x @ %08x\n",
pmap_devmap_table[i].pd_pa,
pmap_devmap_table[i].pd_pa +
pmap_devmap_table[i].pd_size - 1,
pmap_devmap_table[i].pd_va);
#endif
pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
pmap_devmap_table[i].pd_pa,
pmap_devmap_table[i].pd_size,
pmap_devmap_table[i].pd_prot,
pmap_devmap_table[i].pd_cache);
}
}
const struct pmap_devmap *
pmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size)
{
int i;
if (pmap_devmap_table == NULL)
return (NULL);
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
if (pa >= pmap_devmap_table[i].pd_pa &&
pa + size <= pmap_devmap_table[i].pd_pa +
pmap_devmap_table[i].pd_size)
return (&pmap_devmap_table[i]);
}
return (NULL);
}
const struct pmap_devmap *
pmap_devmap_find_va(vm_offset_t va, vm_size_t size)
{
int i;
if (pmap_devmap_table == NULL)
return (NULL);
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
if (va >= pmap_devmap_table[i].pd_va &&
va + size <= pmap_devmap_table[i].pd_va +
pmap_devmap_table[i].pd_size)
return (&pmap_devmap_table[i]);
}
return (NULL);
}
int
pmap_dmap_iscurrent(pmap_t pmap)
{

View File

@ -134,6 +134,8 @@
/*
* Special compilation symbols
* PMAP_DEBUG - Build in pmap_debug_level code
*
* Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c
*/
/* Include header files */
@ -2841,6 +2843,17 @@ pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa)
pmap_kenter_internal(va, pa, 0);
}
void
pmap_kenter_device(vm_offset_t va, vm_paddr_t pa)
{
/*
* XXX - Need a way for kenter_internal to handle PTE_DEVICE mapping as
* a potentially different thing than PTE_NOCACHE.
*/
pmap_kenter_internal(va, pa, 0);
}
void
pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
{
@ -4690,36 +4703,6 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
{
}
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
* routine is intended to be used for mapping device memory,
* NOT real memory.
*/
void *
pmap_mapdev(vm_offset_t pa, vm_size_t size)
{
vm_offset_t va, tmpva, offset;
offset = pa & PAGE_MASK;
size = roundup(size, PAGE_SIZE);
GIANT_REQUIRED;
va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
pmap_kenter_internal(tmpva, pa, 0);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
return ((void *)(va + offset));
}
#define BOOTSTRAP_DEBUG
/*
@ -4940,86 +4923,6 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
}
/********************** Static device map routines ***************************/
static const struct pmap_devmap *pmap_devmap_table;
/*
* Register the devmap table. This is provided in case early console
* initialization needs to register mappings created by bootstrap code
* before pmap_devmap_bootstrap() is called.
*/
void
pmap_devmap_register(const struct pmap_devmap *table)
{
pmap_devmap_table = table;
}
/*
* Map all of the static regions in the devmap table, and remember
* the devmap table so other parts of the kernel can look up entries
* later.
*/
void
pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table)
{
int i;
pmap_devmap_table = table;
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
#ifdef VERBOSE_INIT_ARM
printf("devmap: %08x -> %08x @ %08x\n",
pmap_devmap_table[i].pd_pa,
pmap_devmap_table[i].pd_pa +
pmap_devmap_table[i].pd_size - 1,
pmap_devmap_table[i].pd_va);
#endif
pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
pmap_devmap_table[i].pd_pa,
pmap_devmap_table[i].pd_size,
pmap_devmap_table[i].pd_prot,
pmap_devmap_table[i].pd_cache);
}
}
const struct pmap_devmap *
pmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size)
{
int i;
if (pmap_devmap_table == NULL)
return (NULL);
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
if (pa >= pmap_devmap_table[i].pd_pa &&
pa + size <= pmap_devmap_table[i].pd_pa +
pmap_devmap_table[i].pd_size)
return (&pmap_devmap_table[i]);
}
return (NULL);
}
const struct pmap_devmap *
pmap_devmap_find_va(vm_offset_t va, vm_size_t size)
{
int i;
if (pmap_devmap_table == NULL)
return (NULL);
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
if (va >= pmap_devmap_table[i].pd_va &&
va + size <= pmap_devmap_table[i].pd_va +
pmap_devmap_table[i].pd_size)
return (&pmap_devmap_table[i]);
}
return (NULL);
}
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{

View File

@ -34,9 +34,12 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/reboot.h>
#include <machine/bus.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/bus.h>
#include <machine/devmap.h>
#include <arm/freescale/imx/imx6_anatopreg.h>
#include <arm/freescale/imx/imx6_anatopvar.h>
#include <arm/freescale/imx/imx_machdep.h>
@ -112,7 +115,6 @@ cpu_reset(void)
*/
u_int imx_soc_type()
{
const struct pmap_devmap *pd;
uint32_t digprog, hwsoc;
uint32_t *pcr;
const uint32_t HWSOC_MX6SL = 0x60;
@ -131,10 +133,8 @@ u_int imx_soc_type()
IMX6_ANALOG_DIGPROG_SOCTYPE_SHIFT;
/*printf("digprog = 0x%08x\n", digprog);*/
if (hwsoc == HWSOC_MX6DL) {
pd = pmap_devmap_find_pa(SCU_CONFIG_PHYSADDR, 4);
if (pd != NULL) {
pcr = (uint32_t *)(pd->pd_va +
(SCU_CONFIG_PHYSADDR - pd->pd_pa));
pcr = arm_devmap_ptov(SCU_CONFIG_PHYSADDR, 4);
if (pcr != NULL) {
/*printf("scu config = 0x%08x\n", *pcr);*/
if ((*pcr & 0x03) == 0) {
hwsoc = HWSOC_MX6SOLO;

View File

@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <machine/armreg.h>
#include <machine/bus.h>
#include <machine/devmap.h>
#include <machine/machdep.h>
#include <arm/freescale/imx/imx_machdep.h>
@ -168,7 +169,6 @@ bus_dma_get_range_nb(void)
void
imx_wdog_cpu_reset(vm_offset_t wdcr_physaddr)
{
const struct pmap_devmap *pd;
volatile uint16_t * pcr;
/*
@ -178,10 +178,9 @@ imx_wdog_cpu_reset(vm_offset_t wdcr_physaddr)
* reset) bit being set in the watchdog status register after the reset.
* This is how software can distinguish a reset from a wdog timeout.
*/
if ((pd = pmap_devmap_find_pa(wdcr_physaddr, 2)) == NULL) {
if ((pcr = arm_devmap_ptov(wdcr_physaddr, sizeof(*pcr))) == NULL) {
printf("cpu_reset() can't find its control register... locking up now.");
} else {
pcr = (uint16_t *)(pd->pd_va + (wdcr_physaddr - pd->pd_pa));
*pcr = WDOG_CR_WDE;
}
for (;;)

46
sys/arm/include/devmap.h Normal file
View File

@ -0,0 +1,46 @@
/*-
* Copyright (c) 2013 Ian Lepore <ian@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_DEVMAP_H_
#define _MACHINE_DEVMAP_H_
/*
* Routines to translate between virtual and physical addresses within a region
* that is static-mapped by the devmap code. If the given address range isn't
* static-mapped, then ptov returns NULL and vtop returns DEVMAP_PADDR_NOTFOUND.
* The latter implies that you can't vtop just the last byte of physical address
* space. This is not as limiting as it might sound, because even if a device
* occupies the end of the physical address space, you're only prevented from
* doing vtop for that single byte. If you vtop a size bigger than 1 it works.
*/
#define DEVMAP_PADDR_NOTFOUND ((vm_paddr_t)(-1))
void * arm_devmap_ptov(vm_paddr_t _pa, vm_size_t _sz);
vm_paddr_t arm_devmap_vtop(void * _va, vm_size_t _sz);
#endif

View File

@ -56,6 +56,8 @@ struct mem_region {
vm_size_t mr_size;
};
struct pmap_devmap;
int fdt_localbus_devmap(phandle_t, struct pmap_devmap *, int, int *);
int fdt_pci_devmap(phandle_t, struct pmap_devmap *devmap, vm_offset_t,
vm_offset_t);

View File

@ -254,6 +254,7 @@ void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt);
int pmap_change_attr(vm_offset_t, vm_size_t, int);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_device(vm_offset_t va, vm_paddr_t pa);
void *pmap_kenter_temp(vm_paddr_t pa, int i);
void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
vm_paddr_t pmap_kextract(vm_offset_t va);
@ -707,11 +708,7 @@ struct pmap_devmap {
int pd_cache; /* cache attributes */
};
const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t);
const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t);
void pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *);
void pmap_devmap_register(const struct pmap_devmap *);
#define SECTION_CACHE 0x1
#define SECTION_PT 0x2