mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-16 10:20:30 +00:00
Provide a set of inline functions to manage simple mbuf(9) queues, based
on queue(3)'s STAILQ. Utilize them in cxgb(4) and Xen, deleting home grown implementations. Sponsored by: Netflix Sponsored by: Nginx, Inc.
This commit is contained in:
parent
8935302fe1
commit
c578b6aca0
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=278977
@ -59,7 +59,6 @@ POSSIBILITY OF SUCH DAMAGE.
|
||||
#include <dev/pci/pcivar.h>
|
||||
|
||||
#include <cxgb_osdep.h>
|
||||
#include <sys/mbufq.h>
|
||||
|
||||
struct adapter;
|
||||
struct sge_qset;
|
||||
@ -251,7 +250,7 @@ struct sge_txq {
|
||||
bus_dma_tag_t desc_tag;
|
||||
bus_dmamap_t desc_map;
|
||||
bus_dma_tag_t entry_tag;
|
||||
struct mbuf_head sendq;
|
||||
struct mbufq sendq;
|
||||
|
||||
struct buf_ring *txq_mr;
|
||||
struct ifaltq *txq_ifq;
|
||||
|
@ -1117,9 +1117,10 @@ init_qset_cntxt(struct sge_qset *qs, u_int id)
|
||||
qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
|
||||
qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
|
||||
|
||||
mbufq_init(&qs->txq[TXQ_ETH].sendq);
|
||||
mbufq_init(&qs->txq[TXQ_OFLD].sendq);
|
||||
mbufq_init(&qs->txq[TXQ_CTRL].sendq);
|
||||
/* XXX: a sane limit is needed instead of INT_MAX */
|
||||
mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
|
||||
mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
|
||||
mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
|
||||
}
|
||||
|
||||
|
||||
@ -1820,8 +1821,8 @@ check_desc_avail(adapter_t *adap, struct sge_txq *q,
|
||||
* the control queue is only used for binding qsets which happens
|
||||
* at init time so we are guaranteed enough descriptors
|
||||
*/
|
||||
if (__predict_false(!mbufq_empty(&q->sendq))) {
|
||||
addq_exit: mbufq_tail(&q->sendq, m);
|
||||
if (__predict_false(mbufq_len(&q->sendq))) {
|
||||
addq_exit: (void )mbufq_enqueue(&q->sendq, m);
|
||||
return 1;
|
||||
}
|
||||
if (__predict_false(q->size - q->in_use < ndesc)) {
|
||||
@ -1936,7 +1937,7 @@ again: reclaim_completed_tx_imm(q);
|
||||
}
|
||||
q->in_use++;
|
||||
}
|
||||
if (!mbufq_empty(&q->sendq)) {
|
||||
if (mbufq_len(&q->sendq)) {
|
||||
setbit(&qs->txq_stopped, TXQ_CTRL);
|
||||
|
||||
if (should_restart_tx(q) &&
|
||||
@ -2319,7 +2320,7 @@ restart_offloadq(void *data, int npending)
|
||||
TXQ_LOCK(qs);
|
||||
again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
|
||||
|
||||
while ((m = mbufq_peek(&q->sendq)) != NULL) {
|
||||
while ((m = mbufq_first(&q->sendq)) != NULL) {
|
||||
unsigned int gen, pidx;
|
||||
struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
|
||||
unsigned int ndesc = G_HDR_NDESC(oh->flags);
|
||||
@ -2485,7 +2486,7 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
|
||||
printf("error %d from alloc ring tx %i\n", ret, i);
|
||||
goto err;
|
||||
}
|
||||
mbufq_init(&q->txq[i].sendq);
|
||||
mbufq_init(&q->txq[i].sendq, INT_MAX);
|
||||
q->txq[i].gen = 1;
|
||||
q->txq[i].size = p->txq_size[i];
|
||||
}
|
||||
@ -3521,7 +3522,7 @@ t3_add_configured_sysctls(adapter_t *sc)
|
||||
CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
|
||||
"#tunneled packets dropped");
|
||||
SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
|
||||
CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
|
||||
CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
|
||||
0, "#tunneled packets waiting to be sent");
|
||||
#if 0
|
||||
SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
|
||||
|
@ -1,123 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007-2008, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
|
||||
#ifndef CXGB_MBUFQ_H_
|
||||
#define CXGB_MBUFQ_H_
|
||||
|
||||
struct mbuf_head {
|
||||
struct mbuf *head;
|
||||
struct mbuf *tail;
|
||||
uint32_t qlen;
|
||||
uint32_t qsize;
|
||||
struct mtx lock;
|
||||
};
|
||||
|
||||
static __inline void
|
||||
mbufq_init(struct mbuf_head *l)
|
||||
{
|
||||
l->head = l->tail = NULL;
|
||||
l->qlen = l->qsize = 0;
|
||||
}
|
||||
|
||||
static __inline int
|
||||
mbufq_empty(struct mbuf_head *l)
|
||||
{
|
||||
return (l->head == NULL);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
mbufq_len(struct mbuf_head *l)
|
||||
{
|
||||
return (l->qlen);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
mbufq_size(struct mbuf_head *l)
|
||||
{
|
||||
return (l->qsize);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
mbufq_head_size(struct mbuf_head *l)
|
||||
{
|
||||
return (l->head ? l->head->m_pkthdr.len : 0);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
mbufq_tail(struct mbuf_head *l, struct mbuf *m)
|
||||
{
|
||||
l->qlen++;
|
||||
if (l->head == NULL)
|
||||
l->head = m;
|
||||
else
|
||||
l->tail->m_nextpkt = m;
|
||||
l->tail = m;
|
||||
l->qsize += m->m_pkthdr.len;
|
||||
}
|
||||
|
||||
static __inline struct mbuf *
|
||||
mbufq_dequeue(struct mbuf_head *l)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
m = l->head;
|
||||
if (m) {
|
||||
if (m == l->tail)
|
||||
l->head = l->tail = NULL;
|
||||
else
|
||||
l->head = m->m_nextpkt;
|
||||
m->m_nextpkt = NULL;
|
||||
l->qlen--;
|
||||
l->qsize -= m->m_pkthdr.len;
|
||||
}
|
||||
|
||||
return (m);
|
||||
}
|
||||
|
||||
static __inline struct mbuf *
|
||||
mbufq_peek(const struct mbuf_head *l)
|
||||
{
|
||||
return (l->head);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
mbufq_append(struct mbuf_head *a, struct mbuf_head *b)
|
||||
{
|
||||
if (a->tail)
|
||||
a->tail->m_nextpkt = b->head;
|
||||
if (b->tail)
|
||||
a->tail = b->tail;
|
||||
a->qlen += b->qlen;
|
||||
a->qsize += b->qsize;
|
||||
|
||||
|
||||
}
|
||||
#endif /* CXGB_MBUFQ_H_ */
|
@ -1088,7 +1088,7 @@ send_reset(struct toepcb *toep)
|
||||
req->cmd = CPL_ABORT_SEND_RST;
|
||||
|
||||
if (tp->t_state == TCPS_SYN_SENT)
|
||||
mbufq_tail(&toep->out_of_order_queue, m); /* defer */
|
||||
(void )mbufq_enqueue(&toep->out_of_order_queue, m); /* defer */
|
||||
else
|
||||
l2t_send(sc, m, toep->tp_l2t);
|
||||
}
|
||||
|
@ -30,7 +30,7 @@
|
||||
#define CXGB_TOEPCB_H_
|
||||
#include <sys/bus.h>
|
||||
#include <sys/condvar.h>
|
||||
#include <sys/mbufq.h>
|
||||
#include <sys/limits.h>
|
||||
|
||||
#define TP_DATASENT (1 << 0)
|
||||
#define TP_TX_WAIT_IDLE (1 << 1)
|
||||
@ -64,26 +64,26 @@ struct toepcb {
|
||||
struct inpcb *tp_inp;
|
||||
struct mbuf *tp_m_last;
|
||||
|
||||
struct mbuf_head wr_list;
|
||||
struct mbuf_head out_of_order_queue;
|
||||
struct mbufq wr_list;
|
||||
struct mbufq out_of_order_queue;
|
||||
};
|
||||
|
||||
static inline void
|
||||
reset_wr_list(struct toepcb *toep)
|
||||
{
|
||||
mbufq_init(&toep->wr_list);
|
||||
mbufq_init(&toep->wr_list, INT_MAX); /* XXX: sane limit needed */
|
||||
}
|
||||
|
||||
static inline void
|
||||
enqueue_wr(struct toepcb *toep, struct mbuf *m)
|
||||
{
|
||||
mbufq_tail(&toep->wr_list, m);
|
||||
(void )mbufq_enqueue(&toep->wr_list, m);
|
||||
}
|
||||
|
||||
static inline struct mbuf *
|
||||
peek_wr(const struct toepcb *toep)
|
||||
{
|
||||
return (mbufq_peek(&toep->wr_list));
|
||||
return (mbufq_first(&toep->wr_list));
|
||||
}
|
||||
|
||||
static inline struct mbuf *
|
||||
|
@ -1,123 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
|
||||
#ifndef CXGB_MBUFQ_H_
|
||||
#define CXGB_MBUFQ_H_
|
||||
|
||||
struct mbuf_head {
|
||||
struct mbuf *head;
|
||||
struct mbuf *tail;
|
||||
uint32_t qlen;
|
||||
uint32_t qsize;
|
||||
struct mtx lock;
|
||||
};
|
||||
|
||||
static __inline void
|
||||
mbufq_init(struct mbuf_head *l)
|
||||
{
|
||||
l->head = l->tail = NULL;
|
||||
l->qlen = l->qsize = 0;
|
||||
}
|
||||
|
||||
static __inline int
|
||||
mbufq_empty(struct mbuf_head *l)
|
||||
{
|
||||
return (l->head == NULL);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
mbufq_len(struct mbuf_head *l)
|
||||
{
|
||||
return (l->qlen);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
mbufq_size(struct mbuf_head *l)
|
||||
{
|
||||
return (l->qsize);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
mbufq_head_size(struct mbuf_head *l)
|
||||
{
|
||||
return (l->head ? l->head->m_pkthdr.len : 0);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
mbufq_tail(struct mbuf_head *l, struct mbuf *m)
|
||||
{
|
||||
l->qlen++;
|
||||
if (l->head == NULL)
|
||||
l->head = m;
|
||||
else
|
||||
l->tail->m_nextpkt = m;
|
||||
l->tail = m;
|
||||
l->qsize += m->m_pkthdr.len;
|
||||
}
|
||||
|
||||
static __inline struct mbuf *
|
||||
mbufq_dequeue(struct mbuf_head *l)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
m = l->head;
|
||||
if (m) {
|
||||
if (m == l->tail)
|
||||
l->head = l->tail = NULL;
|
||||
else
|
||||
l->head = m->m_nextpkt;
|
||||
m->m_nextpkt = NULL;
|
||||
l->qlen--;
|
||||
l->qsize -= m->m_pkthdr.len;
|
||||
}
|
||||
|
||||
return (m);
|
||||
}
|
||||
|
||||
static __inline struct mbuf *
|
||||
mbufq_peek(struct mbuf_head *l)
|
||||
{
|
||||
return (l->head);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
mbufq_append(struct mbuf_head *a, struct mbuf_head *b)
|
||||
{
|
||||
if (a->tail)
|
||||
a->tail->m_nextpkt = b->head;
|
||||
if (b->tail)
|
||||
a->tail = b->tail;
|
||||
a->qlen += b->qlen;
|
||||
a->qsize += b->qsize;
|
||||
|
||||
|
||||
}
|
||||
#endif /* CXGB_MBUFQ_H_ */
|
@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/limits.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/module.h>
|
||||
@ -87,8 +88,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <machine/xen/xenvar.h>
|
||||
|
||||
#include <dev/xen/netfront/mbufq.h>
|
||||
|
||||
#include "xenbus_if.h"
|
||||
|
||||
/* Features supported by all backends. TSO and LRO can be negotiated */
|
||||
@ -277,7 +276,7 @@ struct netfront_info {
|
||||
int rx_ring_ref;
|
||||
uint8_t mac[ETHER_ADDR_LEN];
|
||||
struct xn_chain_data xn_cdata; /* mbufs */
|
||||
struct mbuf_head xn_rx_batch; /* head of the batch queue */
|
||||
struct mbufq xn_rx_batch; /* batch queue */
|
||||
|
||||
int xn_if_flags;
|
||||
struct callout xn_stat_ch;
|
||||
@ -837,7 +836,7 @@ network_alloc_rx_buffers(struct netfront_info *sc)
|
||||
m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
|
||||
|
||||
/* queue the mbufs allocated */
|
||||
mbufq_tail(&sc->xn_rx_batch, m_new);
|
||||
(void )mbufq_enqueue(&sc->xn_rx_batch, m_new);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -973,7 +972,7 @@ xn_rxeof(struct netfront_info *np)
|
||||
RING_IDX i, rp;
|
||||
multicall_entry_t *mcl;
|
||||
struct mbuf *m;
|
||||
struct mbuf_head rxq, errq;
|
||||
struct mbufq rxq, errq;
|
||||
int err, pages_flipped = 0, work_to_do;
|
||||
|
||||
do {
|
||||
@ -981,8 +980,9 @@ xn_rxeof(struct netfront_info *np)
|
||||
if (!netfront_carrier_ok(np))
|
||||
return;
|
||||
|
||||
mbufq_init(&errq);
|
||||
mbufq_init(&rxq);
|
||||
/* XXX: there should be some sane limit. */
|
||||
mbufq_init(&errq, INT_MAX);
|
||||
mbufq_init(&rxq, INT_MAX);
|
||||
|
||||
ifp = np->xn_ifp;
|
||||
|
||||
@ -1000,7 +1000,7 @@ xn_rxeof(struct netfront_info *np)
|
||||
|
||||
if (__predict_false(err)) {
|
||||
if (m)
|
||||
mbufq_tail(&errq, m);
|
||||
(void )mbufq_enqueue(&errq, m);
|
||||
np->stats.rx_errors++;
|
||||
continue;
|
||||
}
|
||||
@ -1022,7 +1022,7 @@ xn_rxeof(struct netfront_info *np)
|
||||
np->stats.rx_packets++;
|
||||
np->stats.rx_bytes += m->m_pkthdr.len;
|
||||
|
||||
mbufq_tail(&rxq, m);
|
||||
(void )mbufq_enqueue(&rxq, m);
|
||||
np->rx.rsp_cons = i;
|
||||
}
|
||||
|
||||
@ -1046,8 +1046,7 @@ xn_rxeof(struct netfront_info *np)
|
||||
}
|
||||
}
|
||||
|
||||
while ((m = mbufq_dequeue(&errq)))
|
||||
m_freem(m);
|
||||
mbufq_drain(&errq);
|
||||
|
||||
/*
|
||||
* Process all the mbufs after the remapping is complete.
|
||||
|
@ -1199,5 +1199,101 @@ rt_m_getfib(struct mbuf *m)
|
||||
#define M_PROFILE(m)
|
||||
#endif
|
||||
|
||||
struct mbufq {
|
||||
STAILQ_HEAD(, mbuf) mq_head;
|
||||
int mq_len;
|
||||
int mq_maxlen;
|
||||
};
|
||||
|
||||
static inline void
|
||||
mbufq_init(struct mbufq *mq, int maxlen)
|
||||
{
|
||||
|
||||
STAILQ_INIT(&mq->mq_head);
|
||||
mq->mq_maxlen = maxlen;
|
||||
mq->mq_len = 0;
|
||||
}
|
||||
|
||||
static inline struct mbuf *
|
||||
mbufq_flush(struct mbufq *mq)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
m = STAILQ_FIRST(&mq->mq_head);
|
||||
STAILQ_INIT(&mq->mq_head);
|
||||
mq->mq_len = 0;
|
||||
return (m);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mbufq_drain(struct mbufq *mq)
|
||||
{
|
||||
struct mbuf *m, *n;
|
||||
|
||||
n = mbufq_flush(mq);
|
||||
while ((m = n) != NULL) {
|
||||
n = STAILQ_NEXT(m, m_stailqpkt);
|
||||
m_freem(m);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct mbuf *
|
||||
mbufq_first(const struct mbufq *mq)
|
||||
{
|
||||
|
||||
return (STAILQ_FIRST(&mq->mq_head));
|
||||
}
|
||||
|
||||
static inline struct mbuf *
|
||||
mbufq_last(const struct mbufq *mq)
|
||||
{
|
||||
|
||||
return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt));
|
||||
}
|
||||
|
||||
static inline int
|
||||
mbufq_full(const struct mbufq *mq)
|
||||
{
|
||||
|
||||
return (mq->mq_len >= mq->mq_maxlen);
|
||||
}
|
||||
|
||||
static inline int
|
||||
mbufq_len(const struct mbufq *mq)
|
||||
{
|
||||
|
||||
return (mq->mq_len);
|
||||
}
|
||||
|
||||
static inline int
|
||||
mbufq_enqueue(struct mbufq *mq, struct mbuf *m)
|
||||
{
|
||||
|
||||
if (mbufq_full(mq))
|
||||
return (ENOBUFS);
|
||||
STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt);
|
||||
mq->mq_len++;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline struct mbuf *
|
||||
mbufq_dequeue(struct mbufq *mq)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
m = STAILQ_FIRST(&mq->mq_head);
|
||||
if (m) {
|
||||
STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt);
|
||||
mq->mq_len--;
|
||||
}
|
||||
return (m);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mbufq_prepend(struct mbufq *mq, struct mbuf *m)
|
||||
{
|
||||
|
||||
STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt);
|
||||
mq->mq_len++;
|
||||
}
|
||||
#endif /* !_SYS_MBUF_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user