1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-23 11:18:54 +00:00

Retire arm_remap_nocache() and the data and constants associated with it.

The only remaining user was the code that allocates bounce pages for armv4
busdma.  It's not clear why bounce pages would need uncached memory, but
if that ever changes, kmem_alloc_attr() would be the way to get it.
This commit is contained in:
Ian Lepore 2013-10-27 03:13:26 +00:00
parent 6489412064
commit 99af02e3b6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=257201
14 changed files with 18 additions and 118 deletions

View File

@ -57,7 +57,7 @@ vm_offset_t
initarm_lastaddr(void)
{
return (DEVMAP_BOOTSTRAP_MAP_START - ARM_NOCACHE_KVA_SIZE);
return (DEVMAP_BOOTSTRAP_MAP_START);
}
void

View File

@ -122,7 +122,6 @@ struct bus_dma_tag {
struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
@ -1196,39 +1195,23 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
STAILQ_FOREACH(bpage, &map->bpages, links) {
if (op & BUS_DMASYNC_PREWRITE) {
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache :
bpage->vaddr),
bpage->datacount);
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr, bpage->datacount);
else
physcopyout(bpage->dataaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache :
bpage->vaddr),
bpage->datacount);
if (bpage->vaddr_nocache == 0) {
cpu_dcache_wb_range(bpage->vaddr,
bpage->datacount);
cpu_l2cache_wb_range(bpage->vaddr,
bpage->datacount);
}
(void *)bpage->vaddr,bpage->datacount);
cpu_dcache_wb_range(bpage->vaddr, bpage->datacount);
cpu_l2cache_wb_range(bpage->vaddr, bpage->datacount);
dmat->bounce_zone->total_bounced++;
}
if (op & BUS_DMASYNC_POSTREAD) {
if (bpage->vaddr_nocache == 0) {
cpu_dcache_inv_range(bpage->vaddr,
bpage->datacount);
cpu_l2cache_inv_range(bpage->vaddr,
bpage->datacount);
}
cpu_dcache_inv_range(bpage->vaddr, bpage->datacount);
cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr, bpage->datacount);
else
physcopyin((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
physcopyin((void *)bpage->vaddr,
bpage->dataaddr, bpage->datacount);
dmat->bounce_zone->total_bounced++;
}
@ -1385,8 +1368,6 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
break;
}
bpage->busaddr = pmap_kextract(bpage->vaddr);
bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache(
(void *)bpage->vaddr, PAGE_SIZE);
mtx_lock(&bounce_lock);
STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
total_bpages++;

View File

@ -1951,7 +1951,6 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
virtual_avail = round_page(virtual_avail);
virtual_end = vm_max_kernel_address;
kernel_vm_end = pmap_curmaxkvaddr;
arm_nocache_startaddr = vm_max_kernel_address;
mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF);
pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb);

View File

@ -2423,7 +2423,6 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
virtual_avail = round_page(virtual_avail);
virtual_end = vm_max_kernel_address;
kernel_vm_end = pmap_curmaxkvaddr;
arm_nocache_startaddr = vm_max_kernel_address;
mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF);
#ifdef ARM_USE_SMALL_ALLOC

View File

@ -482,80 +482,6 @@ cpu_exit(struct thread *td)
{
}
#define BITS_PER_INT (8 * sizeof(int))
vm_offset_t arm_nocache_startaddr;
static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE *
BITS_PER_INT)];
/*
* Functions to map and unmap memory non-cached into KVA the kernel won't try
* to allocate. The goal is to provide uncached memory to busdma, to honor
* BUS_DMA_COHERENT.
* We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
* The allocator is rather dummy, each page is represented by a bit in
* a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
* As soon as it finds enough contiguous pages to satisfy the request,
* it returns the address.
*/
void *
arm_remap_nocache(void *addr, vm_size_t size)
{
int i, j;
size = round_page(size);
for (i = 0; i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE; i++) {
if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
BITS_PER_INT)))) {
for (j = i; j < i + (size / (PAGE_SIZE)); j++)
if (arm_nocache_allocated[j / BITS_PER_INT] &
(1 << (j % BITS_PER_INT)))
break;
if (j == i + (size / (PAGE_SIZE)))
break;
}
}
if (i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE) {
vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
void *ret = (void *)tomap;
vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
vm_offset_t vaddr = (vm_offset_t) addr;
vaddr = vaddr & ~PAGE_MASK;
for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
vaddr += PAGE_SIZE, physaddr += PAGE_SIZE, i++) {
cpu_idcache_wbinv_range(vaddr, PAGE_SIZE);
#ifdef ARM_L2_PIPT
cpu_l2cache_wbinv_range(physaddr, PAGE_SIZE);
#else
cpu_l2cache_wbinv_range(vaddr, PAGE_SIZE);
#endif
pmap_kenter_nocache(tomap, physaddr);
cpu_tlb_flushID_SE(vaddr);
arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
BITS_PER_INT);
}
return (ret);
}
return (NULL);
}
void
arm_unmap_nocache(void *addr, vm_size_t size)
{
vm_offset_t raddr = (vm_offset_t)addr;
int i;
size = round_page(size);
i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
for (; size > 0; size -= PAGE_SIZE, i++) {
arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i %
BITS_PER_INT));
pmap_kremove(raddr);
raddr += PAGE_SIZE;
}
}
#ifdef ARM_USE_SMALL_ALLOC
static TAILQ_HEAD(,arm_small_page) pages_normal =

View File

@ -65,7 +65,7 @@ vm_offset_t
initarm_lastaddr(void)
{
return (DEVMAP_BOOTSTRAP_MAP_START - ARM_NOCACHE_KVA_SIZE);
return (DEVMAP_BOOTSTRAP_MAP_START);
}
void

View File

@ -735,11 +735,6 @@ struct arm_small_page {
#endif
#define ARM_NOCACHE_KVA_SIZE 0x1000000
extern vm_offset_t arm_nocache_startaddr;
void *arm_remap_nocache(void *, vm_size_t);
void arm_unmap_nocache(void *, vm_size_t);
extern vm_paddr_t dump_avail[];
#endif /* _KERNEL */

View File

@ -66,7 +66,7 @@ initarm_lastaddr(void)
while (1);
/* Platform-specific initialisation */
return (fdt_immr_va - ARM_NOCACHE_KVA_SIZE);
return (fdt_immr_va);
}
void

View File

@ -206,7 +206,7 @@ initarm_lastaddr(void)
while (1);
/* Platform-specific initialisation */
return (fdt_immr_va - ARM_NOCACHE_KVA_SIZE);
return (fdt_immr_va);
}
void

View File

@ -49,7 +49,7 @@ vm_offset_t
initarm_lastaddr(void)
{
return (DEVMAP_BOOTSTRAP_MAP_START - ARM_NOCACHE_KVA_SIZE);
return (DEVMAP_BOOTSTRAP_MAP_START);
}
void

View File

@ -109,7 +109,7 @@ initarm_lastaddr(void)
if (fdt_immr_addr(TEGRA2_BASE) != 0) /* FIXME ???? */
while (1);
return (fdt_immr_va - ARM_NOCACHE_KVA_SIZE);
return (fdt_immr_va);
}
void

View File

@ -63,7 +63,7 @@ initarm_lastaddr(void)
{
ti_cpu_reset = NULL;
return (DEVMAP_BOOTSTRAP_MAP_START - ARM_NOCACHE_KVA_SIZE);
return (DEVMAP_BOOTSTRAP_MAP_START);
}
void

View File

@ -60,7 +60,7 @@ vm_offset_t
initarm_lastaddr(void)
{
return (DEVMAP_BOOTSTRAP_MAP_START - ARM_NOCACHE_KVA_SIZE);
return (DEVMAP_BOOTSTRAP_MAP_START);
}
void

View File

@ -59,7 +59,7 @@ vm_offset_t
initarm_lastaddr(void)
{
return (ZYNQ7_PSIO_VBASE - ARM_NOCACHE_KVA_SIZE);
return (ZYNQ7_PSIO_VBASE);
}
void