Change the RX EDMA path to first complete the FIFO, then re-populate it

with fresh descriptors, before handling the frames.

Wrap it all in the RX locks.

Since the FIFO is very shallow (16 for HP, 128 for LP) it needs to be
drained and replenished very quickly.  Ideally, I'll eventually move this
RX FIFO drain/fill into the interrupt handler, only deferring the actual
frame completion.
This commit is contained in:
Adrian Chadd 2012-07-14 02:52:48 +00:00
parent 8d467c41b0
commit 0b59717b4b
2 changed files with 70 additions and 21 deletions

View File

@ -156,6 +156,7 @@ ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
{
struct ath_hal *ah = sc->sc_ah;
ATH_RX_LOCK(sc);
ath_hal_stoppcurecv(ah);
ath_hal_setrxfilter(ah, 0);
ath_hal_stopdmarecv(ah);
@ -173,6 +174,7 @@ ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
}
ATH_RX_UNLOCK(sc);
}
/*
@ -187,6 +189,8 @@ ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
struct ath_buf *bf;
int i, j;
ATH_RX_LOCK_ASSERT(sc);
i = re->m_fifo_head;
for (j = 0; j < re->m_fifo_depth; j++) {
bf = re->m_fifo[i];
@ -221,6 +225,8 @@ ath_edma_startrecv(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
ATH_RX_LOCK(sc);
/* Enable RX FIFO */
ath_hal_rxena(ah);
@ -266,6 +272,9 @@ ath_edma_startrecv(struct ath_softc *sc)
ath_mode_init(sc);
ath_hal_startpcurecv(ah);
ATH_RX_UNLOCK(sc);
return (0);
}
@ -300,16 +309,21 @@ ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
struct ath_desc *ds;
struct ath_buf *bf;
struct mbuf *m;
HAL_STATUS status;
struct ath_hal *ah = sc->sc_ah;
uint64_t tsf;
int16_t nf;
int ngood = 0;
ath_bufhead rxlist;
struct ath_buf *next;
TAILQ_INIT(&rxlist);
tsf = ath_hal_gettsf64(ah);
nf = ath_hal_getchannoise(ah, sc->sc_curchan);
sc->sc_stats.ast_rx_noise = nf;
ATH_RX_LOCK(sc);
do {
bf = re->m_fifo[re->m_fifo_head];
/* This shouldn't occur! */
@ -330,12 +344,13 @@ ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTREAD);
rs = &bf->bf_status.ds_rxstat;
status = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, NULL, rs);
bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
NULL, rs);
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
ath_printrxbuf(sc, bf, 0, status == HAL_OK);
ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
#endif
if (status == HAL_EINPROGRESS)
if (bf->bf_rxstatus == HAL_EINPROGRESS)
break;
/*
@ -349,21 +364,11 @@ ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
"%s: Q%d: completed!\n", __func__, qtype);
/*
* Remove the FIFO entry!
* Remove the FIFO entry and place it on the completion
* queue.
*/
re->m_fifo[re->m_fifo_head] = NULL;
/*
* Skip the RX descriptor status - start at the data offset
*/
m_adj(m, sc->sc_rx_statuslen);
/* Handle the frame */
if (ath_rx_pkt(sc, rs, status, tsf, nf, qtype, bf))
ngood++;
/* Free the buffer/mbuf */
ath_edma_rxbuf_free(sc, bf);
TAILQ_INSERT_TAIL(&rxlist, bf, bf_list);
/* Bump the descriptor FIFO stats */
INCR(re->m_fifo_head, re->m_fifolen);
@ -371,6 +376,36 @@ ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
/* XXX check it doesn't fall below 0 */
} while (re->m_fifo_depth > 0);
/* Append some more fresh frames to the FIFO */
if (dosched)
ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
ATH_RX_UNLOCK(sc);
/* Handle the completed descriptors */
TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
/*
* Skip the RX descriptor status - start at the data offset
*/
m_adj(bf->bf_m, sc->sc_rx_statuslen);
/* Handle the frame */
/*
* Note: this may or may not free bf->bf_m and sync/unmap
* the frame.
*/
if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf))
ngood++;
}
/* Free in one set, inside the lock */
ATH_RX_LOCK(sc);
TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
/* Free the buffer/mbuf */
ath_edma_rxbuf_free(sc, bf);
}
ATH_RX_UNLOCK(sc);
/* Handle resched and kickpcu appropriately */
ATH_PCU_LOCK(sc);
if (dosched && sc->sc_kickpcu) {
@ -386,10 +421,6 @@ ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
}
ATH_PCU_UNLOCK(sc);
/* Append some more fresh frames to the FIFO */
if (dosched)
ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
return (ngood);
}
@ -435,6 +466,8 @@ ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
int error;
int len;
ATH_RX_LOCK_ASSERT(sc);
// device_printf(sc->sc_dev, "%s: called; bf=%p\n", __func__, bf);
m = m_getm(NULL, sc->sc_edma_bufsize, M_DONTWAIT, MT_DATA);
@ -498,6 +531,8 @@ ath_edma_rxbuf_alloc(struct ath_softc *sc)
struct ath_buf *bf;
int error;
ATH_RX_LOCK_ASSERT(sc);
/* Allocate buffer */
bf = TAILQ_FIRST(&sc->sc_rxbuf);
/* XXX shouldn't happen upon startup? */
@ -526,6 +561,9 @@ static void
ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
{
ATH_RX_LOCK_ASSERT(sc);
/* We're doing this multiple times? */
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
if (bf->bf_m) {
@ -550,6 +588,8 @@ ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
struct ath_buf *bf;
int i;
ATH_RX_LOCK_ASSERT(sc);
/*
* Allocate buffers until the FIFO is full or nbufs is reached.
*/
@ -616,6 +656,8 @@ ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
int i;
ATH_RX_LOCK_ASSERT(sc);
for (i = 0; i < re->m_fifolen; i++) {
if (re->m_fifo[i] != NULL) {
#ifdef ATH_DEBUG
@ -647,6 +689,8 @@ ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
ATH_RX_LOCK_ASSERT(sc);
if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
__func__,
@ -704,8 +748,10 @@ ath_edma_dma_rxsetup(struct ath_softc *sc)
if (error != 0)
return error;
ATH_RX_LOCK(sc);
(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
ATH_RX_UNLOCK(sc);
return (0);
}
@ -716,11 +762,13 @@ ath_edma_dma_rxteardown(struct ath_softc *sc)
device_printf(sc->sc_dev, "%s: called\n", __func__);
ATH_RX_LOCK(sc);
ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
ATH_RX_UNLOCK(sc);
/* Free RX ath_buf */
/* Free RX DMA tag */

View File

@ -189,6 +189,7 @@ struct ath_buf {
TAILQ_ENTRY(ath_buf) bf_list;
struct ath_buf * bf_next; /* next buffer in the aggregate */
int bf_nseg;
HAL_STATUS bf_rxstatus;
uint16_t bf_flags; /* status flags (below) */
struct ath_desc *bf_desc; /* virtual addr of desc */
struct ath_desc_status bf_status; /* tx/rx status */