mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-17 15:27:36 +00:00
- enable multiple transmit queues
- invert sense of hw.cxgb.singleq tunable to hw.cxgb.multiq - don't wake up transmitting thread by default - add per tx queue ifaltq to handle ALTQ - remove several unused functions in cxgb_multiq.c - add several sysctls: multiq_tx_enable, coalesce_tx_enable, and wakeup_tx_thread - this obsoletes the hw.cxgb.snd_queue_len as ifq is replaced by a buf_ring
This commit is contained in:
parent
aea78d2094
commit
a02573bc06
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=185165
@ -260,7 +260,9 @@ struct sge_txq {
|
||||
*/
|
||||
struct mbuf_head cleanq;
|
||||
struct buf_ring *txq_mr;
|
||||
struct ifaltq *txq_ifq;
|
||||
struct mbuf *immpkt;
|
||||
|
||||
uint32_t txq_drops;
|
||||
uint32_t txq_skipped;
|
||||
uint32_t txq_coalesced;
|
||||
@ -604,7 +606,7 @@ static inline int offload_running(adapter_t *adapter)
|
||||
}
|
||||
|
||||
int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m);
|
||||
int cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *m);
|
||||
int cxgb_pcpu_transmit(struct ifnet *ifp, struct mbuf *m);
|
||||
void cxgb_pcpu_shutdown_threads(struct adapter *sc);
|
||||
void cxgb_pcpu_startup_threads(struct adapter *sc);
|
||||
|
||||
|
@ -32,6 +32,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
||||
#ifndef _CXGB_CONFIG_H_
|
||||
#define _CXGB_CONFIG_H_
|
||||
|
||||
#define CONFIG_CHELSIO_T3_CORE
|
||||
#define CONFIG_CHELSIO_T3_CORE
|
||||
#define IFNET_MULTIQUEUE
|
||||
|
||||
#endif
|
||||
|
@ -206,17 +206,17 @@ SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
|
||||
|
||||
/*
|
||||
* The driver uses an auto-queue algorithm by default.
|
||||
* To disable it and force a single queue-set per port, use singleq = 1.
|
||||
* To disable it and force a single queue-set per port, use multiq = 0
|
||||
*/
|
||||
static int singleq = 0;
|
||||
TUNABLE_INT("hw.cxgb.singleq", &singleq);
|
||||
SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
|
||||
"use a single queue-set per port");
|
||||
|
||||
static int multiq = 1;
|
||||
TUNABLE_INT("hw.cxgb.multiq", &multiq);
|
||||
SYSCTL_UINT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
|
||||
"use min(ncpus/ports, 8) queue-sets per port");
|
||||
|
||||
/*
|
||||
* The driver uses an auto-queue algorithm by default.
|
||||
* To disable it and force a single queue-set per port, use singleq = 1.
|
||||
* By default the driver will not update the firmware unless
|
||||
* it was compiled against a newer version
|
||||
*
|
||||
*/
|
||||
static int force_fw_update = 0;
|
||||
TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
|
||||
@ -228,15 +228,6 @@ TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
|
||||
SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
|
||||
&cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
|
||||
|
||||
/*
|
||||
* Tune the size of the output queue.
|
||||
*/
|
||||
int cxgb_snd_queue_len = IFQ_MAXLEN;
|
||||
TUNABLE_INT("hw.cxgb.snd_queue_len", &cxgb_snd_queue_len);
|
||||
SYSCTL_UINT(_hw_cxgb, OID_AUTO, snd_queue_len, CTLFLAG_RDTUN,
|
||||
&cxgb_snd_queue_len, 0, "send queue size ");
|
||||
|
||||
|
||||
enum {
|
||||
MAX_TXQ_ENTRIES = 16384,
|
||||
MAX_CTRL_TXQ_ENTRIES = 1024,
|
||||
@ -368,8 +359,8 @@ cxgb_controller_probe(device_t dev)
|
||||
ports = "ports";
|
||||
|
||||
snprintf(buf, sizeof(buf), "%s %sNIC, rev: %d nports: %d %s",
|
||||
ai->desc, is_offload(sc) ? "R" : "",
|
||||
sc->params.rev, nports, ports);
|
||||
ai->desc, is_offload(sc) ? "R" : "",
|
||||
sc->params.rev, nports, ports);
|
||||
device_set_desc_copy(dev, buf);
|
||||
return (BUS_PROBE_DEFAULT);
|
||||
}
|
||||
@ -415,8 +406,6 @@ cxgb_controller_attach(device_t dev)
|
||||
int msi_needed, reg;
|
||||
#endif
|
||||
int must_load = 0;
|
||||
char buf[80];
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
sc->dev = dev;
|
||||
sc->msi_count = 0;
|
||||
@ -537,7 +526,7 @@ cxgb_controller_attach(device_t dev)
|
||||
sc->cxgb_intr = t3b_intr;
|
||||
}
|
||||
|
||||
if ((sc->flags & USING_MSIX) && !singleq)
|
||||
if ((sc->flags & USING_MSIX) && multiq)
|
||||
port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
|
||||
|
||||
/* Create a private taskqueue thread for handling driver events */
|
||||
@ -629,11 +618,6 @@ cxgb_controller_attach(device_t dev)
|
||||
G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
|
||||
G_FW_VERSION_MICRO(vers));
|
||||
|
||||
snprintf(buf, sizeof(buf), "%s\t E/C: %s S/N: %s",
|
||||
ai->desc,
|
||||
sc->params.vpd.ec, sc->params.vpd.sn);
|
||||
device_set_desc_copy(dev, buf);
|
||||
|
||||
device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
|
||||
callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
|
||||
t3_add_attach_sysctls(sc);
|
||||
@ -843,15 +827,18 @@ cxgb_setup_msix(adapter_t *sc, int msix_count)
|
||||
device_printf(sc->dev, "Cannot set up "
|
||||
"interrupt for message %d\n", rid);
|
||||
return (EINVAL);
|
||||
|
||||
}
|
||||
#if 0
|
||||
#ifdef IFNET_MULTIQUEUE
|
||||
if (singleq == 0) {
|
||||
if (multiq) {
|
||||
int vector = rman_get_start(sc->msix_irq_res[k]);
|
||||
if (bootverbose)
|
||||
device_printf(sc->dev, "binding vector=%d to cpu=%d\n", vector, k % mp_ncpus);
|
||||
intr_bind(vector, k % mp_ncpus);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -941,16 +928,11 @@ cxgb_port_attach(device_t dev)
|
||||
ifp->if_ioctl = cxgb_ioctl;
|
||||
ifp->if_start = cxgb_start;
|
||||
|
||||
#if 0
|
||||
#ifdef IFNET_MULTIQUEUE
|
||||
ifp->if_flags |= IFF_MULTIQ;
|
||||
ifp->if_mq_start = cxgb_pcpu_start;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
ifp->if_timer = 0; /* Disable ifnet watchdog */
|
||||
ifp->if_watchdog = NULL;
|
||||
|
||||
ifp->if_snd.ifq_drv_maxlen = cxgb_snd_queue_len;
|
||||
ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
|
||||
IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
|
||||
IFQ_SET_READY(&ifp->if_snd);
|
||||
|
||||
@ -968,6 +950,8 @@ cxgb_port_attach(device_t dev)
|
||||
}
|
||||
|
||||
ether_ifattach(ifp, p->hw_addr);
|
||||
|
||||
ifp->if_transmit = cxgb_pcpu_transmit;
|
||||
/*
|
||||
* Only default to jumbo frames on 10GigE
|
||||
*/
|
||||
|
@ -27,8 +27,6 @@ POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
***************************************************************************/
|
||||
|
||||
#define DEBUG_BUFRING
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
@ -89,12 +87,11 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/mvec.h>
|
||||
|
||||
extern int txq_fills;
|
||||
extern struct sysctl_oid_list sysctl__hw_cxgb_children;
|
||||
static int cxgb_pcpu_tx_coalesce = 0;
|
||||
TUNABLE_INT("hw.cxgb.tx_coalesce", &cxgb_pcpu_tx_coalesce);
|
||||
SYSCTL_UINT(_hw_cxgb, OID_AUTO, tx_coalesce, CTLFLAG_RDTUN, &cxgb_pcpu_tx_coalesce, 0,
|
||||
"coalesce small packets into a single work request");
|
||||
int multiq_tx_enable = 1;
|
||||
int coalesce_tx_enable = 0;
|
||||
int wakeup_tx_thread = 0;
|
||||
|
||||
extern struct sysctl_oid_list sysctl__hw_cxgb_children;
|
||||
static int sleep_ticks = 1;
|
||||
TUNABLE_INT("hw.cxgb.sleep_ticks", &sleep_ticks);
|
||||
SYSCTL_UINT(_hw_cxgb, OID_AUTO, sleep_ticks, CTLFLAG_RDTUN, &sleep_ticks, 0,
|
||||
@ -106,22 +103,19 @@ SYSCTL_UINT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_s
|
||||
"size of per-queue mbuf ring");
|
||||
|
||||
|
||||
static inline int32_t cxgb_pcpu_calc_cookie(struct ifnet *ifp, struct mbuf *immpkt);
|
||||
static void cxgb_pcpu_start_proc(void *arg);
|
||||
#ifdef IFNET_MULTIQUEUE
|
||||
static int cxgb_pcpu_cookie_to_qidx(struct port_info *, uint32_t cookie);
|
||||
#endif
|
||||
static int cxgb_tx(struct sge_qset *qs, uint32_t txmax);
|
||||
|
||||
#ifdef IFNET_MULTIQUEUE
|
||||
static int cxgb_pcpu_cookie_to_qidx(struct port_info *pi, uint32_t cookie);
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
cxgb_pcpu_enqueue_packet_(struct sge_qset *qs, struct mbuf *m)
|
||||
{
|
||||
struct sge_txq *txq;
|
||||
int err = 0;
|
||||
|
||||
#ifndef IFNET_MULTIQUEUE
|
||||
panic("not expecting enqueue without multiqueue");
|
||||
#endif
|
||||
KASSERT(m != NULL, ("null mbuf"));
|
||||
KASSERT(m->m_type == MT_DATA, ("bad mbuf type %d", m->m_type));
|
||||
if (qs->qs_flags & QS_EXITING) {
|
||||
@ -134,9 +128,9 @@ cxgb_pcpu_enqueue_packet_(struct sge_qset *qs, struct mbuf *m)
|
||||
txq->txq_drops++;
|
||||
m_freem(m);
|
||||
}
|
||||
if ((qs->txq[TXQ_ETH].flags & TXQ_TRANSMITTING) == 0)
|
||||
if (wakeup_tx_thread && ((txq->flags & TXQ_TRANSMITTING) == 0))
|
||||
wakeup(qs);
|
||||
|
||||
|
||||
return (err);
|
||||
}
|
||||
|
||||
@ -149,29 +143,34 @@ cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m)
|
||||
#ifdef IFNET_MULTIQUEUE
|
||||
int32_t calc_cookie;
|
||||
|
||||
calc_cookie = m->m_pkthdr.rss_hash;
|
||||
calc_cookie = m->m_pkthdr.flowid;
|
||||
qidx = cxgb_pcpu_cookie_to_qidx(pi, calc_cookie);
|
||||
#else
|
||||
qidx = 0;
|
||||
#endif
|
||||
qs = &pi->adapter->sge.qs[qidx];
|
||||
err = cxgb_pcpu_enqueue_packet_(qs, m);
|
||||
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
|
||||
IFQ_ENQUEUE(qs->txq[0].txq_ifq, m, err);
|
||||
} else {
|
||||
err = cxgb_pcpu_enqueue_packet_(qs, m);
|
||||
}
|
||||
return (err);
|
||||
}
|
||||
|
||||
static int
|
||||
cxgb_dequeue_packet(struct sge_txq *txq, struct mbuf **m_vec)
|
||||
{
|
||||
struct mbuf *m;
|
||||
struct mbuf *m, *m0;
|
||||
struct sge_qset *qs;
|
||||
int count, size, coalesced;
|
||||
struct adapter *sc;
|
||||
|
||||
#ifndef IFNET_MULTIQUEUE
|
||||
struct port_info *pi = txq->port;
|
||||
|
||||
mtx_assert(&txq->lock, MA_OWNED);
|
||||
if (txq->immpkt != NULL)
|
||||
panic("immediate packet set");
|
||||
mtx_assert(&txq->lock, MA_OWNED);
|
||||
|
||||
IFQ_DRV_DEQUEUE(&pi->ifp->if_snd, m);
|
||||
if (m == NULL)
|
||||
@ -180,7 +179,16 @@ cxgb_dequeue_packet(struct sge_txq *txq, struct mbuf **m_vec)
|
||||
m_vec[0] = m;
|
||||
return (1);
|
||||
#endif
|
||||
|
||||
if (ALTQ_IS_ENABLED(txq->txq_ifq)) {
|
||||
IFQ_DRV_DEQUEUE(txq->txq_ifq, m);
|
||||
if (m == NULL)
|
||||
return (0);
|
||||
|
||||
m_vec[0] = m;
|
||||
return (1);
|
||||
}
|
||||
|
||||
mtx_assert(&txq->lock, MA_OWNED);
|
||||
coalesced = count = size = 0;
|
||||
qs = txq_to_qset(txq, TXQ_ETH);
|
||||
if (qs->qs_flags & QS_EXITING)
|
||||
@ -199,9 +207,10 @@ cxgb_dequeue_packet(struct sge_txq *txq, struct mbuf **m_vec)
|
||||
return (0);
|
||||
|
||||
count = 1;
|
||||
|
||||
m_vec[0] = m;
|
||||
if (m->m_pkthdr.tso_segsz > 0 || m->m_pkthdr.len > TX_WR_SIZE_MAX ||
|
||||
m->m_next != NULL || (cxgb_pcpu_tx_coalesce == 0)) {
|
||||
m->m_next != NULL || (coalesce_tx_enable == 0)) {
|
||||
return (count);
|
||||
}
|
||||
|
||||
@ -213,7 +222,11 @@ cxgb_dequeue_packet(struct sge_txq *txq, struct mbuf **m_vec)
|
||||
size + m->m_pkthdr.len > TX_WR_SIZE_MAX || m->m_next != NULL)
|
||||
break;
|
||||
|
||||
buf_ring_dequeue_sc(txq->txq_mr);
|
||||
m0 = buf_ring_dequeue_sc(txq->txq_mr);
|
||||
#ifdef DEBUG_BUFRING
|
||||
if (m0 != m)
|
||||
panic("peek and dequeue don't match");
|
||||
#endif
|
||||
size += m->m_pkthdr.len;
|
||||
m_vec[count++] = m;
|
||||
|
||||
@ -227,134 +240,6 @@ cxgb_dequeue_packet(struct sge_txq *txq, struct mbuf **m_vec)
|
||||
return (count);
|
||||
}
|
||||
|
||||
static int32_t
|
||||
cxgb_pcpu_get_cookie(struct ifnet *ifp, struct in6_addr *lip, uint16_t lport, struct in6_addr *rip, uint16_t rport, int ipv6)
|
||||
{
|
||||
uint32_t base;
|
||||
uint8_t buf[36];
|
||||
int count;
|
||||
int32_t cookie;
|
||||
|
||||
critical_enter();
|
||||
/*
|
||||
* Can definitely bypass bcopy XXX
|
||||
*/
|
||||
if (ipv6 == 0) {
|
||||
count = 12;
|
||||
bcopy(rip, &buf[0], 4);
|
||||
bcopy(lip, &buf[4], 4);
|
||||
bcopy(&rport, &buf[8], 2);
|
||||
bcopy(&lport, &buf[10], 2);
|
||||
} else {
|
||||
count = 36;
|
||||
bcopy(rip, &buf[0], 16);
|
||||
bcopy(lip, &buf[16], 16);
|
||||
bcopy(&rport, &buf[32], 2);
|
||||
bcopy(&lport, &buf[34], 2);
|
||||
}
|
||||
|
||||
base = 0xffffffff;
|
||||
base = update_crc32(base, buf, count);
|
||||
base = sctp_csum_finalize(base);
|
||||
|
||||
/*
|
||||
* Indirection table is 128 bits
|
||||
* -> cookie indexes into indirection table which maps connection to queue
|
||||
* -> RSS map maps queue to CPU
|
||||
*/
|
||||
cookie = (base & (RSS_TABLE_SIZE-1));
|
||||
critical_exit();
|
||||
|
||||
return (cookie);
|
||||
}
|
||||
|
||||
static int32_t
|
||||
cxgb_pcpu_calc_cookie(struct ifnet *ifp, struct mbuf *immpkt)
|
||||
{
|
||||
struct in6_addr lip, rip;
|
||||
uint16_t lport, rport;
|
||||
struct ether_header *eh;
|
||||
int32_t cookie;
|
||||
struct ip *ip;
|
||||
struct ip6_hdr *ip6;
|
||||
struct tcphdr *th;
|
||||
struct udphdr *uh;
|
||||
struct sctphdr *sh;
|
||||
uint8_t *next, proto;
|
||||
int etype;
|
||||
|
||||
if (immpkt == NULL)
|
||||
return -1;
|
||||
|
||||
#if 1
|
||||
/*
|
||||
* XXX perf test
|
||||
*/
|
||||
return (0);
|
||||
#endif
|
||||
rport = lport = 0;
|
||||
cookie = -1;
|
||||
next = NULL;
|
||||
eh = mtod(immpkt, struct ether_header *);
|
||||
etype = ntohs(eh->ether_type);
|
||||
|
||||
switch (etype) {
|
||||
case ETHERTYPE_IP:
|
||||
ip = (struct ip *)(eh + 1);
|
||||
next = (uint8_t *)(ip + 1);
|
||||
bcopy(&ip->ip_src, &lip, 4);
|
||||
bcopy(&ip->ip_dst, &rip, 4);
|
||||
proto = ip->ip_p;
|
||||
break;
|
||||
case ETHERTYPE_IPV6:
|
||||
ip6 = (struct ip6_hdr *)(eh + 1);
|
||||
next = (uint8_t *)(ip6 + 1);
|
||||
bcopy(&ip6->ip6_src, &lip, sizeof(struct in6_addr));
|
||||
bcopy(&ip6->ip6_dst, &rip, sizeof(struct in6_addr));
|
||||
if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
|
||||
struct ip6_hbh *hbh;
|
||||
|
||||
hbh = (struct ip6_hbh *)(ip6 + 1);
|
||||
proto = hbh->ip6h_nxt;
|
||||
} else
|
||||
proto = ip6->ip6_nxt;
|
||||
break;
|
||||
case ETHERTYPE_ARP:
|
||||
default:
|
||||
/*
|
||||
* Default to queue zero
|
||||
*/
|
||||
proto = cookie = 0;
|
||||
}
|
||||
if (proto) {
|
||||
switch (proto) {
|
||||
case IPPROTO_TCP:
|
||||
th = (struct tcphdr *)next;
|
||||
lport = th->th_sport;
|
||||
rport = th->th_dport;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
uh = (struct udphdr *)next;
|
||||
lport = uh->uh_sport;
|
||||
rport = uh->uh_dport;
|
||||
break;
|
||||
case IPPROTO_SCTP:
|
||||
sh = (struct sctphdr *)next;
|
||||
lport = sh->src_port;
|
||||
rport = sh->dest_port;
|
||||
break;
|
||||
default:
|
||||
/* nothing to do */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (cookie)
|
||||
cookie = cxgb_pcpu_get_cookie(ifp, &lip, lport, &rip, rport, (etype == ETHERTYPE_IPV6));
|
||||
|
||||
return (cookie);
|
||||
}
|
||||
|
||||
static void
|
||||
cxgb_pcpu_free(struct sge_qset *qs)
|
||||
{
|
||||
@ -377,10 +262,6 @@ cxgb_pcpu_reclaim_tx(struct sge_txq *txq)
|
||||
int reclaimable;
|
||||
struct sge_qset *qs = txq_to_qset(txq, TXQ_ETH);
|
||||
|
||||
#ifdef notyet
|
||||
KASSERT(qs->qs_cpuid == curcpu, ("cpu qset mismatch cpuid=%d curcpu=%d",
|
||||
qs->qs_cpuid, curcpu));
|
||||
#endif
|
||||
mtx_assert(&txq->lock, MA_OWNED);
|
||||
|
||||
reclaimable = desc_reclaimable(txq);
|
||||
@ -426,7 +307,8 @@ cxgb_pcpu_start_(struct sge_qset *qs, struct mbuf *immpkt, int tx_flush)
|
||||
initerr = ENXIO;
|
||||
else if (immpkt) {
|
||||
|
||||
if (!buf_ring_empty(txq->txq_mr))
|
||||
if (!buf_ring_empty(txq->txq_mr)
|
||||
|| ALTQ_IS_ENABLED(&pi->ifp->if_snd))
|
||||
initerr = cxgb_pcpu_enqueue_packet_(qs, immpkt);
|
||||
else
|
||||
txq->immpkt = immpkt;
|
||||
@ -491,7 +373,7 @@ cxgb_pcpu_start_(struct sge_qset *qs, struct mbuf *immpkt, int tx_flush)
|
||||
}
|
||||
|
||||
int
|
||||
cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *immpkt)
|
||||
cxgb_pcpu_transmit(struct ifnet *ifp, struct mbuf *immpkt)
|
||||
{
|
||||
uint32_t cookie;
|
||||
int err, qidx, locked, resid;
|
||||
@ -506,10 +388,10 @@ cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *immpkt)
|
||||
qidx = resid = err = cookie = locked = 0;
|
||||
|
||||
#ifdef IFNET_MULTIQUEUE
|
||||
if (immpkt && (immpkt->m_pkthdr.rss_hash != 0)) {
|
||||
cookie = immpkt->m_pkthdr.rss_hash;
|
||||
if (immpkt && (immpkt->m_pkthdr.flowid != 0)) {
|
||||
cookie = immpkt->m_pkthdr.flowid;
|
||||
qidx = cxgb_pcpu_cookie_to_qidx(pi, cookie);
|
||||
DPRINTF("hash=0x%x qidx=%d cpu=%d\n", immpkt->m_pkthdr.rss_hash, qidx, curcpu);
|
||||
DPRINTF("hash=0x%x qidx=%d cpu=%d\n", immpkt->m_pkthdr.flowid, qidx, curcpu);
|
||||
qs = &pi->adapter->sge.qs[qidx];
|
||||
} else
|
||||
#endif
|
||||
@ -519,7 +401,7 @@ cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *immpkt)
|
||||
|
||||
if (((sc->tunq_coalesce == 0) ||
|
||||
(buf_ring_count(txq->txq_mr) >= TX_WR_COUNT_MAX) ||
|
||||
(cxgb_pcpu_tx_coalesce == 0)) && mtx_trylock(&txq->lock)) {
|
||||
(coalesce_tx_enable == 0)) && mtx_trylock(&txq->lock)) {
|
||||
if (cxgb_debug)
|
||||
printf("doing immediate transmit\n");
|
||||
|
||||
@ -534,10 +416,6 @@ cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *immpkt)
|
||||
sc->tunq_coalesce, buf_ring_count(txq->txq_mr), mtx_owned(&txq->lock));
|
||||
err = cxgb_pcpu_enqueue_packet_(qs, immpkt);
|
||||
}
|
||||
|
||||
if (resid && (txq->flags & TXQ_TRANSMITTING) == 0)
|
||||
wakeup(qs);
|
||||
|
||||
return ((err == ENOSPC) ? 0 : err);
|
||||
}
|
||||
|
||||
@ -552,7 +430,7 @@ cxgb_start(struct ifnet *ifp)
|
||||
if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
||||
return;
|
||||
|
||||
cxgb_pcpu_start(ifp, NULL);
|
||||
cxgb_pcpu_transmit(ifp, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -562,11 +440,8 @@ cxgb_pcpu_start_proc(void *arg)
|
||||
struct thread *td;
|
||||
struct sge_txq *txq = &qs->txq[TXQ_ETH];
|
||||
int idleticks, err = 0;
|
||||
#ifdef notyet
|
||||
struct adapter *sc = qs->port->adapter;
|
||||
#endif
|
||||
td = curthread;
|
||||
|
||||
td = curthread;
|
||||
sleep_ticks = max(hz/1000, 1);
|
||||
qs->qs_flags |= QS_RUNNING;
|
||||
thread_lock(td);
|
||||
@ -642,6 +517,9 @@ cxgb_pcpu_cookie_to_qidx(struct port_info *pi, uint32_t cookie)
|
||||
{
|
||||
int qidx;
|
||||
uint32_t tmp;
|
||||
|
||||
if (multiq_tx_enable == 0)
|
||||
return (pi->first_qset);
|
||||
|
||||
/*
|
||||
* Will probably need to be changed for 4-port XXX
|
||||
@ -757,7 +635,7 @@ cxgb_tx(struct sge_qset *qs, uint32_t txmax)
|
||||
check_pkt_coalesce(qs);
|
||||
count = cxgb_dequeue_packet(txq, m_vec);
|
||||
if (count == 0) {
|
||||
err = ENOBUFS;
|
||||
err = ENOSPC;
|
||||
break;
|
||||
}
|
||||
ETHER_BPF_MTAP(ifp, m_vec[0]);
|
||||
@ -767,28 +645,10 @@ cxgb_tx(struct sge_qset *qs, uint32_t txmax)
|
||||
txq->txq_enqueued += count;
|
||||
m_vec[0] = NULL;
|
||||
}
|
||||
#if 0 /* !MULTIQ */
|
||||
if (__predict_false(err)) {
|
||||
if (err == ENOMEM) {
|
||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||
IFQ_LOCK(&ifp->if_snd);
|
||||
IFQ_DRV_PREPEND(&ifp->if_snd, m_vec[0]);
|
||||
IFQ_UNLOCK(&ifp->if_snd);
|
||||
}
|
||||
}
|
||||
else if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
|
||||
(ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
|
||||
setbit(&qs->txq_stopped, TXQ_ETH);
|
||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||
txq_fills++;
|
||||
err = ENOSPC;
|
||||
}
|
||||
#else
|
||||
if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC)) {
|
||||
err = ENOSPC;
|
||||
err = ENOBUFS;
|
||||
txq_fills++;
|
||||
setbit(&qs->txq_stopped, TXQ_ETH);
|
||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||
}
|
||||
if (err == ENOMEM) {
|
||||
int i;
|
||||
@ -799,7 +659,6 @@ cxgb_tx(struct sge_qset *qs, uint32_t txmax)
|
||||
for (i = 0; i < count; i++)
|
||||
m_freem(m_vec[i]);
|
||||
}
|
||||
#endif
|
||||
return (err);
|
||||
}
|
||||
|
||||
|
@ -26,8 +26,6 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
***************************************************************************/
|
||||
#define DEBUG_BUFRING
|
||||
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
@ -86,9 +84,9 @@ extern int cxgb_pcpu_cache_enable;
|
||||
extern int nmbjumbo4;
|
||||
extern int nmbjumbo9;
|
||||
extern int nmbjumbo16;
|
||||
|
||||
|
||||
|
||||
extern int multiq_tx_enable;
|
||||
extern int coalesce_tx_enable;
|
||||
extern int wakeup_tx_thread;
|
||||
|
||||
#define USE_GTS 0
|
||||
|
||||
@ -1275,7 +1273,6 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
||||
KASSERT(txsd->mi.mi_base == NULL,
|
||||
("overwriting valid entry mi_base==%p", txsd->mi.mi_base));
|
||||
if (count > 1) {
|
||||
panic("count > 1 not support in CVS\n");
|
||||
if ((err = busdma_map_sg_vec(m, &m0, segs, count)))
|
||||
return (err);
|
||||
nsegs = count;
|
||||
@ -1286,7 +1283,7 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
||||
}
|
||||
KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d count=%d", nsegs, count));
|
||||
|
||||
if (!(m0->m_pkthdr.len <= PIO_LEN)) {
|
||||
if ((m0->m_pkthdr.len > PIO_LEN) || (count > 1)) {
|
||||
mi_collapse_mbuf(&txsd->mi, m0);
|
||||
mi = &txsd->mi;
|
||||
}
|
||||
@ -1718,9 +1715,15 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q)
|
||||
|
||||
t3_free_tx_desc_all(&q->txq[TXQ_ETH]);
|
||||
|
||||
for (i = 0; i < SGE_TXQ_PER_SET; i++)
|
||||
for (i = 0; i < SGE_TXQ_PER_SET; i++) {
|
||||
if (q->txq[i].txq_mr != NULL)
|
||||
buf_ring_free(q->txq[i].txq_mr, M_DEVBUF);
|
||||
if (q->txq[i].txq_ifq != NULL) {
|
||||
ifq_detach(q->txq[i].txq_ifq);
|
||||
free(q->txq[i].txq_ifq, M_DEVBUF);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
|
||||
if (q->fl[i].desc) {
|
||||
mtx_lock_spin(&sc->sge.reg_lock);
|
||||
@ -1882,19 +1885,16 @@ t3_free_tx_desc(struct sge_txq *q, int reclaimable)
|
||||
bus_dmamap_unload(q->entry_tag, txsd->map);
|
||||
txsd->flags &= ~TX_SW_DESC_MAPPED;
|
||||
}
|
||||
m_freem_iovec(&txsd->mi);
|
||||
m_freem_iovec(&txsd->mi);
|
||||
#if 0
|
||||
buf_ring_scan(&q->txq_mr, txsd->mi.mi_base, __FILE__, __LINE__);
|
||||
#endif
|
||||
txsd->mi.mi_base = NULL;
|
||||
/*
|
||||
* XXX check for cache hit rate here
|
||||
*
|
||||
*/
|
||||
q->port->ifp->if_opackets++;
|
||||
#if defined(DIAGNOSTIC) && 0
|
||||
if (m_get_priority(txsd->m[0]) != cidx)
|
||||
printf("pri=%d cidx=%d\n",
|
||||
(int)m_get_priority(txsd->m[0]), cidx);
|
||||
#endif
|
||||
|
||||
} else
|
||||
q->txq_skipped++;
|
||||
|
||||
@ -2288,11 +2288,16 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
|
||||
device_printf(sc->dev, "failed to allocate mbuf ring\n");
|
||||
goto err;
|
||||
}
|
||||
if ((q->txq[i].txq_ifq =
|
||||
malloc(sizeof(struct ifaltq), M_DEVBUF, M_NOWAIT|M_ZERO))
|
||||
== NULL) {
|
||||
device_printf(sc->dev, "failed to allocate ifq\n");
|
||||
goto err;
|
||||
}
|
||||
ifq_attach(q->txq[i].txq_ifq, pi->ifp);
|
||||
}
|
||||
|
||||
init_qset_cntxt(q, id);
|
||||
q->idx = id;
|
||||
|
||||
if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
|
||||
sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
|
||||
&q->fl[0].desc, &q->fl[0].sdesc,
|
||||
@ -2880,7 +2885,7 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
|
||||
eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r);
|
||||
#endif
|
||||
#ifdef IFNET_MULTIQUEUE
|
||||
rspq->rspq_mh.mh_head->m_pkthdr.rss_hash = rss_hash;
|
||||
rspq->rspq_mh.mh_head->m_pkthdr.flowid = rss_hash;
|
||||
#endif
|
||||
ethpad = 2;
|
||||
} else {
|
||||
@ -3364,6 +3369,18 @@ t3_add_attach_sysctls(adapter_t *sc)
|
||||
"pcpu_cache_enable",
|
||||
CTLFLAG_RW, &cxgb_pcpu_cache_enable,
|
||||
0, "#enable driver local pcpu caches");
|
||||
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
|
||||
"multiq_tx_enable",
|
||||
CTLFLAG_RW, &multiq_tx_enable,
|
||||
0, "enable transmit by multiple tx queues");
|
||||
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
|
||||
"coalesce_tx_enable",
|
||||
CTLFLAG_RW, &coalesce_tx_enable,
|
||||
0, "coalesce small packets in work requests - WARNING ALPHA");
|
||||
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
|
||||
"wakeup_tx_thread",
|
||||
CTLFLAG_RW, &wakeup_tx_thread,
|
||||
0, "wakeup tx thread if no transmitter running");
|
||||
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
|
||||
"cache_alloc",
|
||||
CTLFLAG_RD, &cxgb_cached_allocations,
|
||||
|
Loading…
Reference in New Issue
Block a user