mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-17 10:26:15 +00:00
- Add bounce pages for arm, largely based on the i386 implementation.
- Add a default parent dma tag, similar to what has been done for sparc64. - Before invalidating the dcache in POSTREAD, save the bits which are in the same cachelines than our buffers, but not part of it, and restore them after the invalidation.
This commit is contained in:
parent
752945d6c0
commit
47010239a8
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=166063
@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/uio.h>
|
||||
#include <sys/ktr.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
@ -56,6 +57,13 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/atomic.h>
|
||||
#include <machine/bus.h>
|
||||
#include <machine/cpufunc.h>
|
||||
#include <machine/md_var.h>
|
||||
|
||||
#define MAX_BPAGES 64
|
||||
#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
|
||||
#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
|
||||
|
||||
struct bounce_zone;
|
||||
|
||||
struct bus_dma_tag {
|
||||
bus_dma_tag_t parent;
|
||||
@ -81,8 +89,47 @@ struct bus_dma_tag {
|
||||
*/
|
||||
struct arm32_dma_range *ranges;
|
||||
int _nranges;
|
||||
struct bounce_zone *bounce_zone;
|
||||
};
|
||||
|
||||
struct bounce_page {
|
||||
vm_offset_t vaddr; /* kva of bounce buffer */
|
||||
vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */
|
||||
bus_addr_t busaddr; /* Physical address */
|
||||
vm_offset_t datavaddr; /* kva of client data */
|
||||
bus_size_t datacount; /* client data count */
|
||||
STAILQ_ENTRY(bounce_page) links;
|
||||
};
|
||||
|
||||
int busdma_swi_pending;
|
||||
|
||||
struct bounce_zone {
|
||||
STAILQ_ENTRY(bounce_zone) links;
|
||||
STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
|
||||
int total_bpages;
|
||||
int free_bpages;
|
||||
int reserved_bpages;
|
||||
int active_bpages;
|
||||
int total_bounced;
|
||||
int total_deferred;
|
||||
bus_size_t alignment;
|
||||
bus_size_t boundary;
|
||||
bus_addr_t lowaddr;
|
||||
char zoneid[8];
|
||||
char lowaddrid[20];
|
||||
struct sysctl_ctx_list sysctl_tree;
|
||||
struct sysctl_oid *sysctl_tree_top;
|
||||
};
|
||||
|
||||
static struct mtx bounce_lock;
|
||||
static int total_bpages;
|
||||
static int busdma_zonecount;
|
||||
static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
|
||||
|
||||
SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
|
||||
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
|
||||
"Total bounce pages");
|
||||
|
||||
#define DMAMAP_LINEAR 0x1
|
||||
#define DMAMAP_MBUF 0x2
|
||||
#define DMAMAP_UIO 0x4
|
||||
@ -90,6 +137,9 @@ struct bus_dma_tag {
|
||||
#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
|
||||
#define DMAMAP_COHERENT 0x8
|
||||
struct bus_dmamap {
|
||||
struct bp_list bpages;
|
||||
int pagesneeded;
|
||||
int pagesreserved;
|
||||
bus_dma_tag_t dmat;
|
||||
int flags;
|
||||
void *buffer;
|
||||
@ -97,8 +147,15 @@ struct bus_dmamap {
|
||||
void *allocbuffer;
|
||||
TAILQ_ENTRY(bus_dmamap) freelist;
|
||||
int len;
|
||||
STAILQ_ENTRY(bus_dmamap) links;
|
||||
bus_dmamap_callback_t *callback;
|
||||
void *callback_arg;
|
||||
|
||||
};
|
||||
|
||||
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
|
||||
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
|
||||
|
||||
static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
|
||||
TAILQ_HEAD_INITIALIZER(dmamap_freelist);
|
||||
|
||||
@ -109,6 +166,45 @@ static struct mtx busdma_mtx;
|
||||
|
||||
MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
|
||||
|
||||
static void init_bounce_pages(void *dummy);
|
||||
static int alloc_bounce_zone(bus_dma_tag_t dmat);
|
||||
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
|
||||
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
int commit);
|
||||
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
vm_offset_t vaddr, bus_size_t size);
|
||||
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
|
||||
|
||||
/* Default tag, as most drivers provide no parent tag. */
|
||||
bus_dma_tag_t arm_root_dma_tag;
|
||||
|
||||
/*
|
||||
* Return true if a match is made.
|
||||
*
|
||||
* To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
|
||||
*
|
||||
* If paddr is within the bounds of the dma tag then call the filter callback
|
||||
* to check for a match, if there is no filter callback then assume a match.
|
||||
*/
|
||||
static int
|
||||
run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = 0;
|
||||
|
||||
do {
|
||||
if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
|
||||
|| ((paddr & (dmat->alignment - 1)) != 0))
|
||||
&& (dmat->filter == NULL
|
||||
|| (*dmat->filter)(dmat->filterarg, paddr) != 0))
|
||||
retval = 1;
|
||||
|
||||
dmat = dmat->parent;
|
||||
} while (retval == 0 && dmat != NULL);
|
||||
return (retval);
|
||||
}
|
||||
|
||||
static void
|
||||
arm_dmamap_freelist_init(void *dummy)
|
||||
{
|
||||
@ -129,6 +225,19 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
|
||||
bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
|
||||
int flags, vm_offset_t *lastaddrp, int *segp);
|
||||
|
||||
static __inline int
|
||||
_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
|
||||
if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
|
||||
|| (lowaddr < phys_avail[i] &&
|
||||
highaddr > phys_avail[i]))
|
||||
return (1);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __inline struct arm32_dma_range *
|
||||
_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
|
||||
bus_addr_t curaddr)
|
||||
@ -195,11 +304,12 @@ _busdma_alloc_dmamap(void)
|
||||
TAILQ_REMOVE(&dmamap_freelist, map, freelist);
|
||||
mtx_unlock(&busdma_mtx);
|
||||
if (!map) {
|
||||
map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
|
||||
map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (map)
|
||||
map->flags = DMAMAP_ALLOCATED;
|
||||
} else
|
||||
map->flags = 0;
|
||||
STAILQ_INIT(&map->bpages);
|
||||
return (map);
|
||||
}
|
||||
|
||||
@ -232,6 +342,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
int error = 0;
|
||||
/* Return a NULL tag on failure */
|
||||
*dmat = NULL;
|
||||
if (!parent)
|
||||
parent = arm_root_dma_tag;
|
||||
|
||||
newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
|
||||
if (newtag == NULL) {
|
||||
@ -273,6 +385,9 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
else if (parent->boundary != 0)
|
||||
newtag->boundary = min(parent->boundary,
|
||||
newtag->boundary);
|
||||
if ((newtag->filter != NULL) ||
|
||||
((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
|
||||
newtag->flags |= BUS_DMA_COULD_BOUNCE;
|
||||
if (newtag->filter == NULL) {
|
||||
/*
|
||||
* Short circuit looking at our parent directly
|
||||
@ -285,8 +400,38 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
if (newtag->parent != NULL)
|
||||
atomic_add_int(&parent->ref_count, 1);
|
||||
}
|
||||
if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
|
||||
|| newtag->alignment > 1)
|
||||
newtag->flags |= BUS_DMA_COULD_BOUNCE;
|
||||
|
||||
*dmat = newtag;
|
||||
if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
|
||||
(flags & BUS_DMA_ALLOCNOW) != 0) {
|
||||
struct bounce_zone *bz;
|
||||
|
||||
/* Must bounce */
|
||||
|
||||
if ((error = alloc_bounce_zone(newtag)) != 0) {
|
||||
free(newtag, M_DEVBUF);
|
||||
return (error);
|
||||
}
|
||||
bz = newtag->bounce_zone;
|
||||
|
||||
if (ptoa(bz->total_bpages) < maxsize) {
|
||||
int pages;
|
||||
|
||||
pages = atop(maxsize) - bz->total_bpages;
|
||||
|
||||
/* Add pages to our bounce pool */
|
||||
if (alloc_bounce_pages(newtag, pages) < pages)
|
||||
error = ENOMEM;
|
||||
}
|
||||
/* Performed initial allocation */
|
||||
newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
|
||||
}
|
||||
if (error != 0)
|
||||
free(newtag, M_DEVBUF);
|
||||
else
|
||||
*dmat = newtag;
|
||||
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
|
||||
__func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
|
||||
|
||||
@ -327,6 +472,7 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
|
||||
return (0);
|
||||
}
|
||||
|
||||
#include <sys/kdb.h>
|
||||
/*
|
||||
* Allocate a handle for mapping from kva/uva/physical
|
||||
* address space into bus device space.
|
||||
@ -335,9 +481,7 @@ int
|
||||
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
{
|
||||
bus_dmamap_t newmap;
|
||||
#ifdef KTR
|
||||
int error = 0;
|
||||
#endif
|
||||
|
||||
newmap = _busdma_alloc_dmamap();
|
||||
if (newmap == NULL) {
|
||||
@ -349,6 +493,52 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
newmap->allocbuffer = NULL;
|
||||
dmat->map_count++;
|
||||
|
||||
/*
|
||||
* Bouncing might be required if the driver asks for an active
|
||||
* exclusion region, a data alignment that is stricter than 1, and/or
|
||||
* an active address boundary.
|
||||
*/
|
||||
if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
|
||||
|
||||
/* Must bounce */
|
||||
struct bounce_zone *bz;
|
||||
int maxpages;
|
||||
|
||||
if (dmat->bounce_zone == NULL) {
|
||||
if ((error = alloc_bounce_zone(dmat)) != 0) {
|
||||
_busdma_free_dmamap(newmap);
|
||||
*mapp = NULL;
|
||||
return (error);
|
||||
}
|
||||
}
|
||||
bz = dmat->bounce_zone;
|
||||
|
||||
/* Initialize the new map */
|
||||
STAILQ_INIT(&((*mapp)->bpages));
|
||||
|
||||
/*
|
||||
* Attempt to add pages to our pool on a per-instance
|
||||
* basis up to a sane limit.
|
||||
*/
|
||||
maxpages = MAX_BPAGES;
|
||||
if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
|
||||
|| (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
|
||||
int pages;
|
||||
|
||||
pages = MAX(atop(dmat->maxsize), 1);
|
||||
pages = MIN(maxpages - bz->total_bpages, pages);
|
||||
pages = MAX(pages, 1);
|
||||
if (alloc_bounce_pages(dmat, pages) < pages)
|
||||
error = ENOMEM;
|
||||
|
||||
if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
|
||||
if (error == 0)
|
||||
dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
|
||||
} else {
|
||||
error = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
|
||||
__func__, dmat, dmat->flags, error);
|
||||
|
||||
@ -364,6 +554,11 @@ bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
|
||||
_busdma_free_dmamap(map);
|
||||
if (STAILQ_FIRST(&map->bpages) != NULL) {
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
|
||||
__func__, dmat, EBUSY);
|
||||
return (EBUSY);
|
||||
}
|
||||
dmat->map_count--;
|
||||
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
|
||||
return (0);
|
||||
@ -399,7 +594,9 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
*mapp = newmap;
|
||||
newmap->dmat = dmat;
|
||||
|
||||
if (dmat->maxsize <= PAGE_SIZE) {
|
||||
if (dmat->maxsize <= PAGE_SIZE &&
|
||||
(dmat->alignment < dmat->maxsize) &&
|
||||
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
|
||||
*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
|
||||
} else {
|
||||
/*
|
||||
@ -452,7 +649,9 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
vaddr = map->origbuffer;
|
||||
arm_unmap_nocache(map->allocbuffer, dmat->maxsize);
|
||||
}
|
||||
if (dmat->maxsize <= PAGE_SIZE)
|
||||
if (dmat->maxsize <= PAGE_SIZE &&
|
||||
dmat->alignment < dmat->maxsize &&
|
||||
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
|
||||
free(vaddr, M_DEVBUF);
|
||||
else {
|
||||
contigfree(vaddr, dmat->maxsize, M_DEVBUF);
|
||||
@ -462,6 +661,64 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
|
||||
}
|
||||
|
||||
static int
|
||||
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
|
||||
bus_size_t buflen, int flags, int *nb)
|
||||
{
|
||||
vm_offset_t vaddr;
|
||||
vm_offset_t vendaddr;
|
||||
bus_addr_t paddr;
|
||||
int needbounce = *nb;
|
||||
|
||||
if ((map->pagesneeded == 0)) {
|
||||
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
|
||||
"alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
|
||||
dmat->boundary, dmat->alignment);
|
||||
CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
|
||||
map, &nobounce_dmamap, map->pagesneeded);
|
||||
/*
|
||||
* Count the number of bounce pages
|
||||
* needed in order to complete this transfer
|
||||
*/
|
||||
vaddr = trunc_page((vm_offset_t)buf);
|
||||
vendaddr = (vm_offset_t)buf + buflen;
|
||||
|
||||
while (vaddr < vendaddr) {
|
||||
paddr = pmap_kextract(vaddr);
|
||||
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
|
||||
run_filter(dmat, paddr) != 0) {
|
||||
needbounce = 1;
|
||||
map->pagesneeded++;
|
||||
}
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
|
||||
}
|
||||
|
||||
/* Reserve Necessary Bounce Pages */
|
||||
if (map->pagesneeded != 0) {
|
||||
mtx_lock(&bounce_lock);
|
||||
if (flags & BUS_DMA_NOWAIT) {
|
||||
if (reserve_bounce_pages(dmat, map, 0) != 0) {
|
||||
mtx_unlock(&bounce_lock);
|
||||
return (ENOMEM);
|
||||
}
|
||||
} else {
|
||||
if (reserve_bounce_pages(dmat, map, 1) != 0) {
|
||||
/* Queue us for resources */
|
||||
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
|
||||
map, links);
|
||||
mtx_unlock(&bounce_lock);
|
||||
return (EINPROGRESS);
|
||||
}
|
||||
}
|
||||
mtx_unlock(&bounce_lock);
|
||||
}
|
||||
|
||||
*nb = needbounce;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Utility function to load a linear buffer. lastaddrp holds state
|
||||
* between invocations (for multiple-buffer loads). segp contains
|
||||
@ -481,10 +738,17 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
|
||||
pd_entry_t *pde;
|
||||
pt_entry_t pte;
|
||||
pt_entry_t *ptep;
|
||||
int needbounce = 0;
|
||||
|
||||
lastaddr = *lastaddrp;
|
||||
bmask = ~(dmat->boundary - 1);
|
||||
|
||||
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
|
||||
error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags,
|
||||
&needbounce);
|
||||
if (error)
|
||||
return (error);
|
||||
}
|
||||
CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
|
||||
"alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
|
||||
|
||||
@ -531,20 +795,6 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
|
||||
map->flags &= ~DMAMAP_COHERENT;
|
||||
}
|
||||
|
||||
if (dmat->ranges) {
|
||||
struct arm32_dma_range *dr;
|
||||
|
||||
dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
|
||||
curaddr);
|
||||
if (dr == NULL)
|
||||
return (EINVAL);
|
||||
/*
|
||||
* In a valid DMA range. Translate the physical
|
||||
* memory address to an address in the DMA window.
|
||||
*/
|
||||
curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
|
||||
|
||||
}
|
||||
/*
|
||||
* Compute the segment size, and adjust counts.
|
||||
*/
|
||||
@ -560,12 +810,30 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
|
||||
if (sgsize > (baddr - curaddr))
|
||||
sgsize = (baddr - curaddr);
|
||||
}
|
||||
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
|
||||
map->pagesneeded != 0 && run_filter(dmat, curaddr))
|
||||
curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
|
||||
|
||||
if (dmat->ranges) {
|
||||
struct arm32_dma_range *dr;
|
||||
|
||||
dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
|
||||
curaddr);
|
||||
if (dr == NULL)
|
||||
return (EINVAL);
|
||||
/*
|
||||
* In a valid DMA range. Translate the physical
|
||||
* memory address to an address in the DMA window.
|
||||
*/
|
||||
curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert chunk into a segment, coalescing with
|
||||
* the previous segment if possible.
|
||||
*/
|
||||
if (seg >= 0 && curaddr == lastaddr &&
|
||||
if (needbounce == 0 && seg >= 0 && curaddr == lastaddr &&
|
||||
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
|
||||
(dmat->boundary == 0 ||
|
||||
(segs[seg].ds_addr & bmask) ==
|
||||
@ -615,6 +883,8 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
|
||||
|
||||
KASSERT(dmat != NULL, ("dmatag is NULL"));
|
||||
KASSERT(map != NULL, ("dmamap is NULL"));
|
||||
map->callback = callback;
|
||||
map->callback_arg = callback_arg;
|
||||
map->flags &= ~DMAMAP_TYPE_MASK;
|
||||
map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
|
||||
map->buffer = buf;
|
||||
@ -622,6 +892,8 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
|
||||
error = bus_dmamap_load_buffer(dmat,
|
||||
dm_segments, map, buf, buflen, kernel_pmap,
|
||||
flags, &lastaddr, &nsegs);
|
||||
if (error == EINPROGRESS)
|
||||
return (error);
|
||||
if (error)
|
||||
(*callback)(callback_arg, NULL, 0, error);
|
||||
else
|
||||
@ -797,26 +1069,93 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
|
||||
void
|
||||
_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
struct bounce_page *bpage;
|
||||
|
||||
map->flags &= ~DMAMAP_TYPE_MASK;
|
||||
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
|
||||
STAILQ_REMOVE_HEAD(&map->bpages, links);
|
||||
free_bounce_page(dmat, bpage);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
|
||||
{
|
||||
char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
|
||||
|
||||
if (op & BUS_DMASYNC_PREWRITE)
|
||||
cpu_dcache_wb_range((vm_offset_t)buf, len);
|
||||
if (op & BUS_DMASYNC_PREREAD) {
|
||||
if (op & BUS_DMASYNC_POSTREAD) {
|
||||
if ((vm_offset_t)buf & arm_dcache_align_mask)
|
||||
cpu_dcache_wbinv_range((vm_offset_t)buf &
|
||||
~arm_dcache_align_mask, arm_dcache_align);
|
||||
memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~
|
||||
arm_dcache_align_mask),
|
||||
(vm_offset_t)buf - ((vm_offset_t)buf &~
|
||||
arm_dcache_align_mask));
|
||||
if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
|
||||
cpu_dcache_wbinv_range(((vm_offset_t)buf + len) &
|
||||
~arm_dcache_align_mask, arm_dcache_align);
|
||||
}
|
||||
if (op & BUS_DMASYNC_POSTREAD)
|
||||
memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~
|
||||
arm_dcache_align_mask),
|
||||
(vm_offset_t)buf - ((vm_offset_t)buf &~
|
||||
arm_dcache_align_mask));
|
||||
if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
|
||||
memcpy(_tmp_clend, (void *)(((vm_offset_t)buf + len) & ~
|
||||
arm_dcache_align_mask),
|
||||
(vm_offset_t)buf +len - (((vm_offset_t)buf + len) &~
|
||||
arm_dcache_align_mask));
|
||||
cpu_dcache_inv_range((vm_offset_t)buf, len);
|
||||
if ((vm_offset_t)buf & arm_dcache_align_mask)
|
||||
memcpy((void *)((vm_offset_t)buf &
|
||||
~arm_dcache_align_mask),
|
||||
_tmp_cl,
|
||||
(vm_offset_t)buf - ((vm_offset_t)buf &~
|
||||
arm_dcache_align_mask));
|
||||
if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
|
||||
memcpy((void *)(((vm_offset_t)buf + len) & ~
|
||||
arm_dcache_align_mask), _tmp_clend,
|
||||
(vm_offset_t)buf +len - (((vm_offset_t)buf + len) &~
|
||||
arm_dcache_align_mask));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
{
|
||||
struct bounce_page *bpage;
|
||||
|
||||
STAILQ_FOREACH(bpage, &map->bpages, links) {
|
||||
if (op & BUS_DMASYNC_PREWRITE) {
|
||||
bcopy((void *)bpage->datavaddr,
|
||||
(void *)(bpage->vaddr_nocache != 0 ?
|
||||
bpage->vaddr_nocache : bpage->vaddr),
|
||||
bpage->datacount);
|
||||
if (bpage->vaddr_nocache == 0)
|
||||
cpu_dcache_wb_range(bpage->vaddr,
|
||||
bpage->datacount);
|
||||
}
|
||||
if (op & BUS_DMASYNC_POSTREAD) {
|
||||
if (bpage->vaddr_nocache == 0)
|
||||
cpu_dcache_inv_range(bpage->vaddr,
|
||||
bpage->datacount);
|
||||
bcopy((void *)(bpage->vaddr_nocache != 0 ?
|
||||
bpage->vaddr_nocache : bpage->vaddr),
|
||||
(void *)bpage->datavaddr, bpage->datacount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static __inline int
|
||||
_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
|
||||
{
|
||||
struct bounce_page *bpage;
|
||||
|
||||
STAILQ_FOREACH(bpage, &map->bpages, links) {
|
||||
if ((vm_offset_t)buf >= bpage->datavaddr &&
|
||||
(vm_offset_t)buf + len < bpage->datavaddr +
|
||||
bpage->datacount)
|
||||
return (1);
|
||||
}
|
||||
return (0);
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
@ -829,6 +1168,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
|
||||
if (op == BUS_DMASYNC_POSTWRITE)
|
||||
return;
|
||||
if (STAILQ_FIRST(&map->bpages))
|
||||
_bus_dmamap_sync_bp(dmat, map, op);
|
||||
if (map->flags & DMAMAP_COHERENT)
|
||||
return;
|
||||
if ((op && BUS_DMASYNC_POSTREAD) && (map->len >= 2 * PAGE_SIZE)) {
|
||||
@ -838,12 +1179,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
|
||||
switch(map->flags & DMAMAP_TYPE_MASK) {
|
||||
case DMAMAP_LINEAR:
|
||||
bus_dmamap_sync_buf(map->buffer, map->len, op);
|
||||
if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
|
||||
bus_dmamap_sync_buf(map->buffer, map->len, op);
|
||||
break;
|
||||
case DMAMAP_MBUF:
|
||||
m = map->buffer;
|
||||
while (m) {
|
||||
if (m->m_len > 0)
|
||||
if (m->m_len > 0 &&
|
||||
!(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
|
||||
bus_dmamap_sync_buf(m->m_data, m->m_len, op);
|
||||
m = m->m_next;
|
||||
}
|
||||
@ -856,8 +1199,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
bus_size_t minlen = resid < iov[i].iov_len ? resid :
|
||||
iov[i].iov_len;
|
||||
if (minlen > 0) {
|
||||
bus_dmamap_sync_buf(iov[i].iov_base, minlen,
|
||||
op);
|
||||
if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
|
||||
minlen))
|
||||
bus_dmamap_sync_buf(iov[i].iov_base,
|
||||
minlen, op);
|
||||
resid -= minlen;
|
||||
}
|
||||
}
|
||||
@ -867,3 +1212,247 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
}
|
||||
cpu_drain_writebuf();
|
||||
}
|
||||
|
||||
static void
|
||||
init_bounce_pages(void *dummy __unused)
|
||||
{
|
||||
|
||||
total_bpages = 0;
|
||||
STAILQ_INIT(&bounce_zone_list);
|
||||
STAILQ_INIT(&bounce_map_waitinglist);
|
||||
STAILQ_INIT(&bounce_map_callbacklist);
|
||||
mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
|
||||
}
|
||||
SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
|
||||
|
||||
static struct sysctl_ctx_list *
|
||||
busdma_sysctl_tree(struct bounce_zone *bz)
|
||||
{
|
||||
return (&bz->sysctl_tree);
|
||||
}
|
||||
|
||||
static struct sysctl_oid *
|
||||
busdma_sysctl_tree_top(struct bounce_zone *bz)
|
||||
{
|
||||
return (bz->sysctl_tree_top);
|
||||
}
|
||||
|
||||
static int
|
||||
alloc_bounce_zone(bus_dma_tag_t dmat)
|
||||
{
|
||||
struct bounce_zone *bz;
|
||||
|
||||
/* Check to see if we already have a suitable zone */
|
||||
STAILQ_FOREACH(bz, &bounce_zone_list, links) {
|
||||
if ((dmat->alignment <= bz->alignment)
|
||||
&& (dmat->boundary <= bz->boundary)
|
||||
&& (dmat->lowaddr >= bz->lowaddr)) {
|
||||
dmat->bounce_zone = bz;
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
|
||||
if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
|
||||
M_NOWAIT | M_ZERO)) == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
STAILQ_INIT(&bz->bounce_page_list);
|
||||
bz->free_bpages = 0;
|
||||
bz->reserved_bpages = 0;
|
||||
bz->active_bpages = 0;
|
||||
bz->lowaddr = dmat->lowaddr;
|
||||
bz->alignment = dmat->alignment;
|
||||
bz->boundary = dmat->boundary;
|
||||
snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
|
||||
busdma_zonecount++;
|
||||
snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
|
||||
STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
|
||||
dmat->bounce_zone = bz;
|
||||
|
||||
sysctl_ctx_init(&bz->sysctl_tree);
|
||||
bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
|
||||
SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
|
||||
CTLFLAG_RD, 0, "");
|
||||
if (bz->sysctl_tree_top == NULL) {
|
||||
sysctl_ctx_free(&bz->sysctl_tree);
|
||||
return (0); /* XXX error code? */
|
||||
}
|
||||
|
||||
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
|
||||
"Total bounce pages");
|
||||
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
|
||||
"Free bounce pages");
|
||||
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
|
||||
"Reserved bounce pages");
|
||||
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
|
||||
"Active bounce pages");
|
||||
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
|
||||
"Total bounce requests");
|
||||
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
|
||||
"Total bounce requests that were deferred");
|
||||
SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
|
||||
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"alignment", CTLFLAG_RD, &bz->alignment, 0, "");
|
||||
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
|
||||
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
|
||||
"boundary", CTLFLAG_RD, &bz->boundary, 0, "");
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
|
||||
{
|
||||
struct bounce_zone *bz;
|
||||
int count;
|
||||
|
||||
bz = dmat->bounce_zone;
|
||||
count = 0;
|
||||
while (numpages > 0) {
|
||||
struct bounce_page *bpage;
|
||||
|
||||
bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
|
||||
M_NOWAIT | M_ZERO);
|
||||
|
||||
if (bpage == NULL)
|
||||
break;
|
||||
bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
|
||||
M_NOWAIT, 0ul,
|
||||
bz->lowaddr,
|
||||
PAGE_SIZE,
|
||||
bz->boundary);
|
||||
if (bpage->vaddr == 0) {
|
||||
free(bpage, M_DEVBUF);
|
||||
break;
|
||||
}
|
||||
bpage->busaddr = pmap_kextract(bpage->vaddr);
|
||||
bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache(
|
||||
(void *)bpage->vaddr, PAGE_SIZE);
|
||||
mtx_lock(&bounce_lock);
|
||||
STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
|
||||
total_bpages++;
|
||||
bz->total_bpages++;
|
||||
bz->free_bpages++;
|
||||
mtx_unlock(&bounce_lock);
|
||||
count++;
|
||||
numpages--;
|
||||
}
|
||||
return (count);
|
||||
}
|
||||
|
||||
static int
|
||||
reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
|
||||
{
|
||||
struct bounce_zone *bz;
|
||||
int pages;
|
||||
|
||||
mtx_assert(&bounce_lock, MA_OWNED);
|
||||
bz = dmat->bounce_zone;
|
||||
pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
|
||||
if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
|
||||
return (map->pagesneeded - (map->pagesreserved + pages));
|
||||
bz->free_bpages -= pages;
|
||||
bz->reserved_bpages += pages;
|
||||
map->pagesreserved += pages;
|
||||
pages = map->pagesneeded - map->pagesreserved;
|
||||
|
||||
return (pages);
|
||||
}
|
||||
|
||||
static bus_addr_t
|
||||
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
|
||||
bus_size_t size)
|
||||
{
|
||||
struct bounce_zone *bz;
|
||||
struct bounce_page *bpage;
|
||||
|
||||
KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
|
||||
KASSERT(map != NULL && map != &nobounce_dmamap,
|
||||
("add_bounce_page: bad map %p", map));
|
||||
|
||||
bz = dmat->bounce_zone;
|
||||
if (map->pagesneeded == 0)
|
||||
panic("add_bounce_page: map doesn't need any pages");
|
||||
map->pagesneeded--;
|
||||
|
||||
if (map->pagesreserved == 0)
|
||||
panic("add_bounce_page: map doesn't need any pages");
|
||||
map->pagesreserved--;
|
||||
|
||||
mtx_lock(&bounce_lock);
|
||||
bpage = STAILQ_FIRST(&bz->bounce_page_list);
|
||||
if (bpage == NULL)
|
||||
panic("add_bounce_page: free page list is empty");
|
||||
|
||||
STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
|
||||
bz->reserved_bpages--;
|
||||
bz->active_bpages++;
|
||||
mtx_unlock(&bounce_lock);
|
||||
|
||||
bpage->datavaddr = vaddr;
|
||||
bpage->datacount = size;
|
||||
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
|
||||
return (bpage->busaddr);
|
||||
}
|
||||
|
||||
static void
|
||||
free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
|
||||
{
|
||||
struct bus_dmamap *map;
|
||||
struct bounce_zone *bz;
|
||||
|
||||
bz = dmat->bounce_zone;
|
||||
bpage->datavaddr = 0;
|
||||
bpage->datacount = 0;
|
||||
|
||||
mtx_lock(&bounce_lock);
|
||||
STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
|
||||
bz->free_bpages++;
|
||||
bz->active_bpages--;
|
||||
if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
|
||||
if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
|
||||
STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
|
||||
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
|
||||
map, links);
|
||||
busdma_swi_pending = 1;
|
||||
bz->total_deferred++;
|
||||
swi_sched(vm_ih, 0);
|
||||
}
|
||||
}
|
||||
mtx_unlock(&bounce_lock);
|
||||
}
|
||||
|
||||
void
|
||||
busdma_swi(void)
|
||||
{
|
||||
bus_dma_tag_t dmat;
|
||||
struct bus_dmamap *map;
|
||||
|
||||
mtx_lock(&bounce_lock);
|
||||
while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
|
||||
STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
|
||||
mtx_unlock(&bounce_lock);
|
||||
dmat = map->dmat;
|
||||
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
|
||||
bus_dmamap_load(map->dmat, map, map->buffer, map->len,
|
||||
map->callback, map->callback_arg, /*flags*/0);
|
||||
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
|
||||
mtx_lock(&bounce_lock);
|
||||
}
|
||||
mtx_unlock(&bounce_lock);
|
||||
}
|
||||
|
@ -70,6 +70,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/uma.h>
|
||||
#include <vm/uma_int.h>
|
||||
|
||||
#include <machine/md_var.h>
|
||||
|
||||
#ifndef NSFBUFS
|
||||
#define NSFBUFS (512 + maxusers * 16)
|
||||
#endif
|
||||
@ -371,6 +373,9 @@ cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
|
||||
void
|
||||
swi_vm(void *dummy)
|
||||
{
|
||||
|
||||
if (busdma_swi_pending)
|
||||
busdma_swi();
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -101,6 +101,8 @@ struct arm32_dma_range {
|
||||
struct arm32_dma_range *bus_dma_get_range(void);
|
||||
int bus_dma_get_range_nb(void);
|
||||
|
||||
extern bus_dma_tag_t arm_root_dma_tag;
|
||||
|
||||
#endif /* _ARM32_BUS_DMA_PRIVATE */
|
||||
|
||||
#endif /* _ARM_BUS_DMA_H */
|
||||
|
@ -46,4 +46,7 @@ extern int _min_bzero_size;
|
||||
#define SRC_IS_USER 0x2
|
||||
#define IS_PHYSICAL 0x4
|
||||
|
||||
extern int busdma_swi_pending;
|
||||
void busdma_swi(void);
|
||||
|
||||
#endif /* !_MACHINE_MD_VAR_H_ */
|
||||
|
@ -537,7 +537,7 @@ struct arm_small_page {
|
||||
|
||||
#endif
|
||||
|
||||
#define ARM_NOCACHE_KVA_SIZE 0x600000
|
||||
#define ARM_NOCACHE_KVA_SIZE 0x1000000
|
||||
extern vm_offset_t arm_nocache_startaddr;
|
||||
void *arm_remap_nocache(void *, vm_size_t);
|
||||
void arm_unmap_nocache(void *, vm_size_t);
|
||||
|
Loading…
Reference in New Issue
Block a user