mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-27 11:55:06 +00:00
- Use bus_describe_intr() to describe interrupt usage.
- Use bus_bind_intr() to bind interrupt to a CPU when RSS/TSS is used. - Use M_DONTWAIT for RSS/TSS buffer allocation. - Add statistic to track max DRBR queue depth. - Fix problem in bxe_change_mtu() which referenced the old MTU size in a debug print statement. MFC after: Two weeks
This commit is contained in:
parent
df7c03cf8b
commit
8fedd5eb65
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=221826
@ -1330,9 +1330,9 @@ bxe_interrupt_attach(struct bxe_softc *sc)
|
||||
/* Setup the slowpath deferred task queue. */
|
||||
TASK_INIT(&sc->task, 0, bxe_task_sp, sc);
|
||||
sc->tq = taskqueue_create_fast("bxe_spq", M_NOWAIT,
|
||||
taskqueue_thread_enqueue, &sc->tq);
|
||||
taskqueue_thread_enqueue, &sc->tq);
|
||||
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s spq",
|
||||
device_get_nameunit(sc->dev));
|
||||
device_get_nameunit(sc->dev));
|
||||
#endif
|
||||
|
||||
/* Setup interrupt handlers. */
|
||||
@ -1359,13 +1359,19 @@ bxe_interrupt_attach(struct bxe_softc *sc)
|
||||
goto bxe_interrupt_attach_exit;
|
||||
}
|
||||
|
||||
#if __FreeBSD_version >= 800504
|
||||
bus_describe_intr(sc->dev,
|
||||
sc->bxe_msix_res[0],
|
||||
sc->bxe_msix_tag[0],
|
||||
"sp");
|
||||
#endif
|
||||
|
||||
/* Now initialize the fastpath vectors. */
|
||||
for (i = 0; i < (sc->num_queues); i++) {
|
||||
fp = &sc->fp[i];
|
||||
DBPRINT(sc,
|
||||
(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
|
||||
"%s(): Enabling MSI-X[%d] vector.\n",
|
||||
__FUNCTION__, i + 1);
|
||||
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
|
||||
"%s(): Enabling MSI-X[%d] vector.\n",
|
||||
__FUNCTION__, i + 1);
|
||||
/*
|
||||
* Setup the interrupt handler. Note that we pass the
|
||||
* fastpath context to the interrupt handler in this
|
||||
@ -1377,8 +1383,7 @@ bxe_interrupt_attach(struct bxe_softc *sc)
|
||||
NULL,
|
||||
bxe_intr_fp,
|
||||
fp,
|
||||
&sc->bxe_msix_tag[i + 1]
|
||||
);
|
||||
&sc->bxe_msix_tag[i + 1]);
|
||||
|
||||
if (rc) {
|
||||
BXE_PRINTF(
|
||||
@ -1386,6 +1391,21 @@ bxe_interrupt_attach(struct bxe_softc *sc)
|
||||
__FILE__, __LINE__, (i + 1));
|
||||
goto bxe_interrupt_attach_exit;
|
||||
}
|
||||
|
||||
#if __FreeBSD_version >= 800504
|
||||
bus_describe_intr(sc->dev,
|
||||
sc->bxe_msix_res[i + 1],
|
||||
sc->bxe_msix_tag[i + 1],
|
||||
"fp[%02d]",
|
||||
i);
|
||||
#endif
|
||||
|
||||
/* Bind the fastpath instance to a CPU. */
|
||||
if (sc->num_queues > 1) {
|
||||
bus_bind_intr(sc->dev,
|
||||
sc->bxe_msix_res[i + 1], i);
|
||||
}
|
||||
|
||||
#ifdef BXE_TASK
|
||||
TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
|
||||
fp->tq = taskqueue_create_fast("bxe_fpq", M_NOWAIT,
|
||||
@ -1418,6 +1438,13 @@ bxe_interrupt_attach(struct bxe_softc *sc)
|
||||
goto bxe_interrupt_attach_exit;
|
||||
}
|
||||
|
||||
#if __FreeBSD_version >= 800504
|
||||
bus_describe_intr(sc->dev,
|
||||
sc->bxe_msi_res[0],
|
||||
sc->bxe_msi_tag[0],
|
||||
"sp");
|
||||
#endif
|
||||
|
||||
/* Now initialize the fastpath vectors. */
|
||||
for (i = 0; i < (sc->num_queues); i++) {
|
||||
fp = &sc->fp[i];
|
||||
@ -1445,6 +1472,15 @@ bxe_interrupt_attach(struct bxe_softc *sc)
|
||||
__FILE__, __LINE__, (i + 1));
|
||||
goto bxe_interrupt_attach_exit;
|
||||
}
|
||||
|
||||
#if __FreeBSD_version >= 800504
|
||||
bus_describe_intr(sc->dev,
|
||||
sc->bxe_msi_res[i + 1],
|
||||
sc->bxe_msi_tag[i + 1],
|
||||
"fp[%02d]",
|
||||
i);
|
||||
#endif
|
||||
|
||||
#ifdef BXE_TASK
|
||||
TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
|
||||
fp->tq = taskqueue_create_fast("bxe_fpq", M_NOWAIT,
|
||||
@ -3646,7 +3682,7 @@ bxe_alloc_buf_rings(struct bxe_softc *sc)
|
||||
|
||||
if (fp != NULL) {
|
||||
fp->br = buf_ring_alloc(BXE_BR_SIZE,
|
||||
M_DEVBUF, M_WAITOK, &fp->mtx);
|
||||
M_DEVBUF, M_DONTWAIT, &fp->mtx);
|
||||
if (fp->br == NULL) {
|
||||
rc = ENOMEM;
|
||||
return(rc);
|
||||
@ -9404,7 +9440,7 @@ bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
|
||||
|
||||
/*
|
||||
* Multiqueue (RSS) transmit routine.
|
||||
* Multiqueue (TSS) transmit routine.
|
||||
*
|
||||
* Returns:
|
||||
* 0 if transmit succeeds, !0 otherwise.
|
||||
@ -9415,14 +9451,18 @@ bxe_tx_mq_start_locked(struct ifnet *ifp,
|
||||
{
|
||||
struct bxe_softc *sc;
|
||||
struct mbuf *next;
|
||||
int rc = 0, tx_count = 0;
|
||||
int depth, rc = 0, tx_count = 0;
|
||||
|
||||
sc = fp->sc;
|
||||
|
||||
DBENTER(BXE_EXTREME_SEND);
|
||||
depth = drbr_inuse(ifp, fp->br);
|
||||
if (depth > fp->max_drbr_queue_depth) {
|
||||
fp->max_drbr_queue_depth = depth;
|
||||
}
|
||||
DBPRINT(sc, BXE_EXTREME_SEND,
|
||||
"%s(): fp[%02d], drbr queue depth=%d\n",
|
||||
__FUNCTION__, fp->index, drbr_inuse(ifp, fp->br));
|
||||
__FUNCTION__, fp->index, depth);
|
||||
|
||||
BXE_FP_LOCK_ASSERT(fp);
|
||||
|
||||
@ -10509,11 +10549,11 @@ bxe_alloc_mbuf(struct bxe_fastpath *fp, int size)
|
||||
|
||||
/* Check whether the allocation succeeded and handle a failure. */
|
||||
if (__predict_false(m_new == NULL)) {
|
||||
DBPRINT(sc, BXE_WARN, "%s(): mbuf allocation failure!\n",
|
||||
__FUNCTION__);
|
||||
DBPRINT(sc, BXE_WARN, "%s(): Failed to allocate %d byte "
|
||||
"mbuf on fp[%02d]!\n", __FUNCTION__, size, fp->index);
|
||||
fp->mbuf_alloc_failed++;
|
||||
goto bxe_alloc_mbuf_exit;
|
||||
}
|
||||
goto bxe_alloc_mbuf_exit;
|
||||
}
|
||||
|
||||
/* Do a little extra error checking when debugging. */
|
||||
DBRUN(M_ASSERTPKTHDR(m_new));
|
||||
@ -10556,7 +10596,7 @@ bxe_map_mbuf(struct bxe_fastpath *fp, struct mbuf *m, bus_dma_tag_t tag,
|
||||
__FUNCTION__);
|
||||
sc->debug_mbuf_sim_map_failed++;
|
||||
fp->mbuf_alloc_failed++;
|
||||
DBRUN(sc->debug_memory_allocated -= m->m_len);
|
||||
sc->debug_memory_allocated -= m->m_len;
|
||||
m_freem(m);
|
||||
rc = EINVAL;
|
||||
goto bxe_map_mbuf_exit;
|
||||
@ -10568,10 +10608,11 @@ bxe_map_mbuf(struct bxe_fastpath *fp, struct mbuf *m, bus_dma_tag_t tag,
|
||||
|
||||
/* Handle any mapping errors. */
|
||||
if (__predict_false(rc)) {
|
||||
DBPRINT(sc, BXE_WARN, "%s(): mbuf mapping failure (%d)!\n",
|
||||
__FUNCTION__, rc);
|
||||
m_freem(m);
|
||||
DBPRINT(sc, BXE_WARN, "%s(): mbuf mapping failure (%d) on "
|
||||
"fp[%02d]!\n", __FUNCTION__, rc, fp->index);
|
||||
fp->mbuf_alloc_failed++;
|
||||
DBRUN(sc->debug_memory_allocated -= m->m_len);
|
||||
m_freem(m);
|
||||
goto bxe_map_mbuf_exit;
|
||||
}
|
||||
|
||||
@ -10583,6 +10624,7 @@ bxe_map_mbuf(struct bxe_fastpath *fp, struct mbuf *m, bus_dma_tag_t tag,
|
||||
*seg = segs[0];
|
||||
|
||||
bxe_map_mbuf_exit:
|
||||
DBEXIT(BXE_INSANE);
|
||||
return (rc);
|
||||
}
|
||||
|
||||
@ -10961,6 +11003,19 @@ bxe_init_rx_chains(struct bxe_softc *sc)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ToDo: Need a cleanup path if memory allocation
|
||||
* fails during initializtion. This is especially
|
||||
* easy if multiqueue is used on a system with
|
||||
* jumbo frames and many CPUs. On my 16GB system
|
||||
* with 8 CPUs I get the following defaults:
|
||||
*
|
||||
* kern.ipc.nmbjumbo16: 3200
|
||||
* kern.ipc.nmbjumbo9: 6400
|
||||
* kern.ipc.nmbjumbop: 12800
|
||||
* kern.ipc.nmbclusters: 25600
|
||||
*/
|
||||
|
||||
DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
|
||||
}
|
||||
|
||||
@ -15554,7 +15609,7 @@ bxe_txeof(struct bxe_fastpath *fp)
|
||||
#endif
|
||||
|
||||
txbd =
|
||||
&fp->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)].start_bd;
|
||||
&fp->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)].start_bd;
|
||||
|
||||
#ifdef BXE_DEBUG
|
||||
if (txbd == NULL) {
|
||||
@ -15764,7 +15819,7 @@ bxe_change_mtu(struct bxe_softc *sc, int if_drv_running)
|
||||
sc->bxe_ifp->if_mtu = ifp->if_mtu;
|
||||
if (if_drv_running) {
|
||||
DBPRINT(sc, BXE_INFO_IOCTL, "%s(): Changing the MTU to %d.\n",
|
||||
__FUNCTION__, sc->port.ether_mtu);
|
||||
__FUNCTION__, sc->bxe_ifp->if_mtu);
|
||||
|
||||
bxe_stop_locked(sc, UNLOAD_NORMAL);
|
||||
bxe_init_locked(sc, LOAD_NORMAL);
|
||||
@ -16315,6 +16370,12 @@ bxe_add_sysctls(struct bxe_softc *sc)
|
||||
"tx_chain_lost_mbuf",
|
||||
CTLFLAG_RD, &fp->tx_chain_lost_mbuf,
|
||||
"Mbufs lost on TX chain count");
|
||||
|
||||
SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO,
|
||||
"max_drbr_queue_depth",
|
||||
CTLFLAG_RD, &fp->max_drbr_queue_depth,
|
||||
0, "Driver queue maximum dpeth");
|
||||
|
||||
#ifdef BXE_DEBUG
|
||||
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
|
||||
"null_cqe_flags",
|
||||
|
@ -1110,6 +1110,8 @@ struct bxe_fastpath {
|
||||
int sge_mbuf_alloc;
|
||||
int tpa_mbuf_alloc;
|
||||
|
||||
int max_drbr_queue_depth;
|
||||
|
||||
uint64_t tpa_queue_used;
|
||||
|
||||
unsigned long null_cqe_flags;
|
||||
@ -1148,7 +1150,6 @@ struct bxe_fastpath {
|
||||
#define BXE_TX_CHAIN_PAGE_SZ BCM_PAGE_SIZE
|
||||
#define BXE_RX_CHAIN_PAGE_SZ BCM_PAGE_SIZE
|
||||
|
||||
/* ToDo: Audit this structure for unused varaibles. */
|
||||
struct bxe_softc {
|
||||
struct ifnet *bxe_ifp;
|
||||
int media;
|
||||
|
Loading…
Reference in New Issue
Block a user