1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-17 10:26:15 +00:00

o Rewrite softdma_process_tx() of Altera SoftDMA engine driver

so it does not require a bounce buffer. The only need for this was
  to align the buffer address. Implement unaligned access and we don't
  need to copy data twice.
o Remove contigmalloc-based bounce buffer from xDMA code since it is
  not suitable for arbitrary memory provided by platform, which is
  sometimes a dedicated piece of memory that is not managed by OS at all.

Sponsored by:	DARPA, AFRL
This commit is contained in:
Ruslan Bukin 2019-04-29 16:27:15 +00:00
parent 34f210d861
commit 5a51e5e49d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=346896
5 changed files with 131 additions and 114 deletions

View File

@ -1290,7 +1290,7 @@ atse_attach(device_t dev)
* Chapter 15. On-Chip FIFO Memory Core.
* Embedded Peripherals IP User Guide.
*/
caps = XCHAN_CAP_BUSDMA_NOSEG;
caps = XCHAN_CAP_NOSEG;
/* Alloc xDMA virtual channel. */
sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
@ -1457,6 +1457,11 @@ atse_detach(device_t dev)
mtx_destroy(&sc->atse_mtx);
xdma_channel_free(sc->xchan_tx);
xdma_channel_free(sc->xchan_rx);
xdma_put(sc->xdma_tx);
xdma_put(sc->xdma_rx);
return (0);
}

View File

@ -190,6 +190,18 @@ softdma_fill_level(struct softdma_softc *sc)
return (val);
}
static uint32_t
fifo_fill_level_wait(struct softdma_softc *sc)
{
uint32_t val;
do
val = softdma_fill_level(sc);
while (val == AVALON_FIFO_TX_BASIC_OPTS_DEPTH);
return (val);
}
static void
softdma_intr(void *arg)
{
@ -287,86 +299,96 @@ static int
softdma_process_tx(struct softdma_channel *chan, struct softdma_desc *desc)
{
struct softdma_softc *sc;
uint32_t src_offs, dst_offs;
uint64_t addr;
uint64_t buf;
uint32_t word;
uint32_t missing;
uint32_t reg;
uint32_t fill_level;
uint32_t leftm;
uint32_t tmp;
uint32_t val;
uint32_t c;
int got_bits;
int len;
sc = chan->sc;
fill_level = softdma_fill_level(sc);
while (fill_level == AVALON_FIFO_TX_BASIC_OPTS_DEPTH)
fill_level = softdma_fill_level(sc);
fifo_fill_level_wait(sc);
/* Set start of packet. */
if (desc->control & CONTROL_GEN_SOP) {
reg = 0;
reg |= A_ONCHIP_FIFO_MEM_CORE_SOP;
if (desc->control & CONTROL_GEN_SOP)
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
A_ONCHIP_FIFO_MEM_CORE_SOP);
got_bits = 0;
buf = 0;
addr = desc->src_addr;
len = desc->len;
if (addr & 1) {
buf = (buf << 8) | *(uint8_t *)addr;
got_bits += 8;
addr += 1;
len -= 1;
}
if (len >= 2 && addr & 2) {
buf = (buf << 16) | *(uint16_t *)addr;
got_bits += 16;
addr += 2;
len -= 2;
}
while (len >= 4) {
buf = (buf << 32) | (uint64_t)*(uint32_t *)addr;
addr += 4;
len -= 4;
word = (uint32_t)((buf >> got_bits) & 0xffffffff);
fifo_fill_level_wait(sc);
if (len == 0 && got_bits == 0 &&
(desc->control & CONTROL_GEN_EOP) != 0)
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
A_ONCHIP_FIFO_MEM_CORE_EOP);
bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
}
if (len & 2) {
buf = (buf << 16) | *(uint16_t *)addr;
got_bits += 16;
addr += 2;
len -= 2;
}
if (len & 1) {
buf = (buf << 8) | *(uint8_t *)addr;
got_bits += 8;
addr += 1;
len -= 1;
}
if (got_bits >= 32) {
got_bits -= 32;
word = (uint32_t)((buf >> got_bits) & 0xffffffff);
fifo_fill_level_wait(sc);
if (len == 0 && got_bits == 0 &&
(desc->control & CONTROL_GEN_EOP) != 0)
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
A_ONCHIP_FIFO_MEM_CORE_EOP);
bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
}
if (got_bits) {
missing = 32 - got_bits;
got_bits /= 8;
fifo_fill_level_wait(sc);
reg = A_ONCHIP_FIFO_MEM_CORE_EOP |
((4 - got_bits) << A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT);
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA, reg);
word = (uint32_t)((buf << missing) & 0xffffffff);
bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
}
src_offs = dst_offs = 0;
c = 0;
while ((desc->len - c) >= 4) {
val = *(uint32_t *)(desc->src_addr + src_offs);
bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, val);
if (desc->src_incr)
src_offs += 4;
if (desc->dst_incr)
dst_offs += 4;
fill_level += 1;
while (fill_level == AVALON_FIFO_TX_BASIC_OPTS_DEPTH) {
fill_level = softdma_fill_level(sc);
}
c += 4;
}
val = 0;
leftm = (desc->len - c);
switch (leftm) {
case 1:
val = *(uint8_t *)(desc->src_addr + src_offs);
val <<= 24;
src_offs += 1;
break;
case 2:
case 3:
val = *(uint16_t *)(desc->src_addr + src_offs);
val <<= 16;
src_offs += 2;
if (leftm == 3) {
tmp = *(uint8_t *)(desc->src_addr + src_offs);
val |= (tmp << 8);
src_offs += 1;
}
break;
case 0:
default:
break;
}
/* Set end of packet. */
reg = 0;
if (desc->control & CONTROL_GEN_EOP)
reg |= A_ONCHIP_FIFO_MEM_CORE_EOP;
reg |= ((4 - leftm) << A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT);
softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA, reg);
/* Ensure there is a FIFO entry available. */
fill_level = softdma_fill_level(sc);
while (fill_level == AVALON_FIFO_TX_BASIC_OPTS_DEPTH)
fill_level = softdma_fill_level(sc);
/* Final write */
bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, val);
return (dst_offs);
return (desc->len);
}
static int
@ -594,6 +616,8 @@ softdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
if (chan->used == 0) {
chan->xchan = xchan;
xchan->chan = (void *)chan;
xchan->caps |= XCHAN_CAP_NOBUFS;
xchan->caps |= XCHAN_CAP_NOSEG;
chan->index = i;
chan->idx_head = 0;
chan->idx_tail = 0;

View File

@ -84,7 +84,6 @@ struct xchan_buf {
bus_dmamap_t map;
uint32_t nsegs;
uint32_t nsegs_left;
void *cbuf;
};
struct xdma_request {
@ -130,7 +129,8 @@ struct xdma_channel {
uint32_t caps;
#define XCHAN_CAP_BUSDMA (1 << 0)
#define XCHAN_CAP_BUSDMA_NOSEG (1 << 1)
#define XCHAN_CAP_NOSEG (1 << 1)
#define XCHAN_CAP_NOBUFS (1 << 2)
/* A real hardware driver channel. */
void *chan;

View File

@ -136,19 +136,15 @@ xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr)
if (c == 1)
return (c); /* Nothing to do. */
if (xchan->caps & XCHAN_CAP_BUSDMA) {
if ((xchan->caps & XCHAN_CAP_BUSDMA_NOSEG) || \
(c > xchan->maxnsegs)) {
if ((m = m_defrag(xr->m, M_NOWAIT)) == NULL) {
device_printf(xdma->dma_dev,
"%s: Can't defrag mbuf\n",
__func__);
return (c);
}
xr->m = m;
c = 1;
}
if ((m = m_defrag(xr->m, M_NOWAIT)) == NULL) {
device_printf(xdma->dma_dev,
"%s: Can't defrag mbuf\n",
__func__);
return (c);
}
xr->m = m;
c = 1;
return (c);
}

View File

@ -69,14 +69,7 @@ _xchan_bufs_alloc(xdma_channel_t *xchan)
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
xr->buf.cbuf = contigmalloc(xchan->maxsegsize,
M_XDMA, 0, 0, ~0, PAGE_SIZE, 0);
if (xr->buf.cbuf == NULL) {
device_printf(xdma->dev,
"%s: Can't allocate contiguous kernel"
" physical memory\n", __func__);
return (-1);
}
/* TODO: bounce buffer */
}
return (0);
@ -179,7 +172,7 @@ xchan_bufs_free(xdma_channel_t *xchan)
} else {
for (i = 0; i < xchan->xr_num; i++) {
xr = &xchan->xr_mem[i];
contigfree(xr->buf.cbuf, xchan->maxsegsize, M_XDMA);
/* TODO: bounce buffer */
}
}
@ -245,17 +238,19 @@ xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
return (-1);
}
/* Allocate bufs. */
ret = xchan_bufs_alloc(xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't allocate bufs.\n", __func__);
/* Allocate buffers if required. */
if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
ret = xchan_bufs_alloc(xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't allocate bufs.\n", __func__);
/* Cleanup */
xchan_sglist_free(xchan);
xchan_bank_free(xchan);
/* Cleanup */
xchan_sglist_free(xchan);
xchan_bank_free(xchan);
return (-1);
return (-1);
}
}
xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
@ -442,14 +437,8 @@ _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
switch (xr->req_type) {
case XR_TYPE_MBUF:
if (xr->direction == XDMA_MEM_TO_DEV) {
m_copydata(m, 0, m->m_pkthdr.len, xr->buf.cbuf);
seg[0].ds_addr = (bus_addr_t)xr->buf.cbuf;
seg[0].ds_len = m->m_pkthdr.len;
} else {
seg[0].ds_addr = mtod(m, bus_addr_t);
seg[0].ds_len = m->m_pkthdr.len;
}
seg[0].ds_addr = mtod(m, bus_addr_t);
seg[0].ds_len = m->m_pkthdr.len;
break;
case XR_TYPE_BIO:
case XR_TYPE_VIRT:
@ -516,7 +505,9 @@ xdma_process(xdma_channel_t *xchan,
TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
switch (xr->req_type) {
case XR_TYPE_MBUF:
c = xdma_mbuf_defrag(xchan, xr);
if ((xchan->caps & XCHAN_CAP_NOSEG) ||
(c > xchan->maxnsegs))
c = xdma_mbuf_defrag(xchan, xr);
break;
case XR_TYPE_BIO:
case XR_TYPE_VIRT:
@ -571,7 +562,8 @@ xdma_queue_submit_sg(xdma_channel_t *xchan)
sg = xchan->sg;
if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
(xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
device_printf(xdma->dev,
"%s: Can't submit a transfer: no bufs\n",
__func__);