1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-14 10:09:48 +00:00

Try to honor BUS_DMA_COHERENT : if the flag is set, normally allocate memory

with malloc() or contigmalloc() as usual, but try to re-map the allocated
memory into a VA outside the KVA, non-cached, thus making the calls to
bus_dmamap_sync() for these buffers useless.
This commit is contained in:
Olivier Houchard 2006-03-01 23:04:25 +00:00
parent 6220e4db34
commit 2456c0ea88
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=156191
4 changed files with 102 additions and 2 deletions

View File

@ -93,6 +93,8 @@ struct bus_dmamap {
bus_dma_tag_t dmat;
int flags;
void *buffer;
void *origbuffer;
void *allocbuffer;
TAILQ_ENTRY(bus_dmamap) freelist;
int len;
};
@ -416,6 +418,23 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
*mapp = NULL;
return (ENOMEM);
}
if (flags & BUS_DMA_COHERENT) {
void *tmpaddr = arm_remap_nocache(
(void *)((vm_offset_t)*vaddr &~ PAGE_MASK),
dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK));
if (tmpaddr) {
tmpaddr = (void *)((vm_offset_t)(tmpaddr) +
((vm_offset_t)*vaddr & PAGE_MASK));
newmap->origbuffer = *vaddr;
newmap->allocbuffer = tmpaddr;
cpu_idcache_wbinv_range((vm_offset_t)*vaddr,
dmat->maxsize);
*vaddr = tmpaddr;
} else
newmap->origbuffer = newmap->allocbuffer = NULL;
} else
newmap->origbuffer = newmap->allocbuffer = NULL;
return (0);
}
@ -426,6 +445,12 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
void
bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
{
if (map->allocbuffer) {
KASSERT(map->allocbuffer == vaddr,
("Trying to freeing the wrong DMA buffer"));
vaddr = map->origbuffer;
arm_unmap_nocache(map->allocbuffer, dmat->maxsize);
}
if (dmat->maxsize <= PAGE_SIZE)
free(vaddr, M_DEVBUF);
else {

View File

@ -2553,9 +2553,12 @@ pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt
virtual_avail = round_page(virtual_avail);
virtual_end = lastaddr;
kernel_vm_end = pmap_curmaxkvaddr;
arm_nocache_startaddr = lastaddr;
#ifdef ARM_USE_SMALL_ALLOC
mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF);
alloc_firstaddr = alloc_curaddr = lastaddr;
alloc_firstaddr = alloc_curaddr = arm_nocache_startaddr +
ARM_NOCACHE_KVA_SIZE;
#endif
}
@ -2915,6 +2918,13 @@ pmap_kenter(vm_offset_t va, vm_paddr_t pa)
pmap_kenter_internal(va, pa, KENTER_CACHE);
}
void
pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa)
{
pmap_kenter_internal(va, pa, 0);
}
void
pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
{

View File

@ -368,6 +368,64 @@ cpu_exit(struct thread *td)
{
}
vm_offset_t arm_nocache_startaddr;
static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * 32)];
/*
* Functions to map and unmap memory non-cached into KVA the kernel won't try
* to allocate. The goal is to provide uncached memory to busdma, to honor
* BUS_DMA_COHERENT.
* We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
* The allocator is rather dummy, each page is represented by a bit in
* a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
* As soon as it finds enough contiguous pages to satisfy the request,
* it returns the address.
*/
void *
arm_remap_nocache(void *addr, vm_size_t size)
{
int i, j;
size = round_page(size);
for (i = 0; i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * 32),
ARM_TP_ADDRESS); i++) {
if (!(arm_nocache_allocated[i / 32] & (1 << (i % 32)))) {
for (j = i; j < i + (size / (PAGE_SIZE)); j++)
if (arm_nocache_allocated[j / 32] &
(1 << (j % 32)))
break;
if (j == i + (size / (PAGE_SIZE)))
break;
}
}
if (i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * 32),
ARM_TP_ADDRESS)) {
vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
void *ret = (void *)tomap;
vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
physaddr += PAGE_SIZE, i++) {
pmap_kenter_nocache(tomap, physaddr);
arm_nocache_allocated[i / 32] |= 1 << (i % 32);
}
return (ret);
}
return (NULL);
}
void
arm_unmap_nocache(void *addr, vm_size_t size)
{
vm_offset_t raddr = (vm_offset_t)addr;
int i;
size = round_page(size);
i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
for (; size > 0; size -= PAGE_SIZE, i++)
arm_nocache_allocated[i / 32] &= ~(1 << (i % 32));
}
#ifdef ARM_USE_SMALL_ALLOC
static TAILQ_HEAD(,arm_small_page) pages_normal =
@ -393,7 +451,7 @@ arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
{
struct arm_small_page *pg;
bytes &= ~PAGE_SIZE;
bytes &= ~PAGE_MASK;
while (bytes > 0) {
pg = (struct arm_small_page *)list;
pg->addr = mem;

View File

@ -210,6 +210,7 @@ extern vm_offset_t virtual_end;
void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
void pmap_kremove(vm_offset_t);
void *pmap_mapdev(vm_offset_t, vm_size_t);
@ -525,6 +526,12 @@ struct arm_small_page {
};
#endif
#define ARM_NOCACHE_KVA_SIZE 0x600000
extern vm_offset_t arm_nocache_startaddr;
void *arm_remap_nocache(void *, vm_size_t);
void arm_unmap_nocache(void *, vm_size_t);
extern vm_paddr_t dump_avail[];
#endif /* _KERNEL */