1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-13 14:40:22 +00:00

- Use bus_dmamap_unload(), it is not optional.

- The new allocator won't return coherent memory for any size > PAGE_SIZE,
so don't assume we have coherent memory, and explicitely use
bus_dmamap_sync().
This commit is contained in:
Olivier Houchard 2013-10-22 21:51:07 +00:00
parent 6958ef4ef5
commit 63e950fe80
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=256943

View File

@ -507,7 +507,6 @@ npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
dma->name, error);
return error;
}
/* XXX COHERENT for now */
if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&dma->buf_map) != 0) {
@ -1073,6 +1072,7 @@ npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
m->m_pkthdr.len = m->m_len = 1536;
/* backload payload and align ip hdr */
m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
bus_dmamap_unload(dma->mtag, npe->ix_map);
error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
segs, &nseg, 0);
if (error != 0) {
@ -1085,6 +1085,8 @@ npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
/* NB: buffer length is shifted in word */
hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
hw->ix_ne[0].next = 0;
bus_dmamap_sync(dma->buf_tag, dma->buf_map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
npe->ix_m = m;
/* Flush the memory in the mbuf */
bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
@ -1110,6 +1112,8 @@ npe_rxdone(int qid, void *arg)
struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
struct mbuf *m;
bus_dmamap_sync(dma->buf_tag, dma->buf_map,
BUS_DMASYNC_POSTREAD);
DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
__func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
/*
@ -1130,7 +1134,6 @@ npe_rxdone(int qid, void *arg)
bus_dmamap_sync(dma->mtag, npe->ix_map,
BUS_DMASYNC_POSTREAD);
/* XXX flush hw buffer; works now 'cuz coherent */
/* set m_len etc. per rx frame size */
mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
mrx->m_pkthdr.len = mrx->m_len;
@ -1313,6 +1316,7 @@ npestart_locked(struct ifnet *ifp)
return;
}
npe = sc->tx_free;
bus_dmamap_unload(dma->mtag, npe->ix_map);
error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
m, segs, &nseg, 0);
if (error == EFBIG) {
@ -1355,7 +1359,8 @@ npestart_locked(struct ifnet *ifp)
next += sizeof(hw->ix_ne[0]);
}
hw->ix_ne[i-1].next = 0; /* zero last in chain */
/* XXX flush descriptor instead of using uncached memory */
bus_dmamap_sync(dma->buf_tag, dma->buf_map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
__func__, sc->tx_qid, npe->ix_neaddr,