1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-16 10:20:30 +00:00

For reasons that I have not delved in to Xen 3.2 netback now does header splitting

so packets > 128 bytes are now split in to multiple buffer. This fixes netfront
to handle multiple buffers per rx packet.

MFC after:	1 month
This commit is contained in:
Kip Macy 2008-08-21 02:40:26 +00:00
parent e676955476
commit 83b92f6e47
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=181945

View File

@ -135,7 +135,7 @@ static void xn_free_rx_ring(struct netfront_info *);
static void xn_free_tx_ring(struct netfront_info *); static void xn_free_tx_ring(struct netfront_info *);
static int xennet_get_responses(struct netfront_info *np, static int xennet_get_responses(struct netfront_info *np,
struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf_head *list, struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf **list,
int *pages_flipped_p); int *pages_flipped_p);
#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
@ -149,7 +149,7 @@ static int xennet_get_responses(struct netfront_info *np,
*/ */
struct xn_chain_data { struct xn_chain_data {
struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1];
struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1];
}; };
@ -405,7 +405,6 @@ netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id)
info = ifp->if_softc; info = ifp->if_softc;
dev->dev_driver_data = info; dev->dev_driver_data = info;
return 0; return 0;
} }
@ -749,7 +748,7 @@ network_alloc_rx_buffers(struct netfront_info *sc)
break; break;
m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
id = xennet_rxidx(req_prod + i); id = xennet_rxidx(req_prod + i);
@ -853,14 +852,13 @@ xn_rxeof(struct netfront_info *np)
RING_IDX i, rp; RING_IDX i, rp;
multicall_entry_t *mcl; multicall_entry_t *mcl;
struct mbuf *m; struct mbuf *m;
struct mbuf_head rxq, errq, tmpq; struct mbuf_head rxq, errq;
int err, pages_flipped = 0; int err, pages_flipped = 0;
XN_RX_LOCK_ASSERT(np); XN_RX_LOCK_ASSERT(np);
if (!netfront_carrier_ok(np)) if (!netfront_carrier_ok(np))
return; return;
mbufq_init(&tmpq);
mbufq_init(&errq); mbufq_init(&errq);
mbufq_init(&rxq); mbufq_init(&rxq);
@ -874,23 +872,19 @@ xn_rxeof(struct netfront_info *np)
memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
memset(extras, 0, sizeof(rinfo.extras)); memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(np, &rinfo, rp, &tmpq, m = NULL;
err = xennet_get_responses(np, &rinfo, rp, &m,
&pages_flipped); &pages_flipped);
if (unlikely(err)) { if (unlikely(err)) {
while ((m = mbufq_dequeue(&tmpq))) if (m)
mbufq_tail(&errq, m); mbufq_tail(&errq, m);
np->stats.rx_errors++; np->stats.rx_errors++;
i = np->rx.rsp_cons; i = np->rx.rsp_cons;
continue; continue;
} }
m = mbufq_dequeue(&tmpq);
m->m_data += rx->offset;/* (rx->addr & PAGE_MASK); */
m->m_pkthdr.len = m->m_len = rx->status;
m->m_pkthdr.rcvif = ifp; m->m_pkthdr.rcvif = ifp;
if ( rx->flags & NETRXF_data_validated ) { if ( rx->flags & NETRXF_data_validated ) {
/* Tell the stack the checksums are okay */ /* Tell the stack the checksums are okay */
/* /*
@ -905,7 +899,7 @@ xn_rxeof(struct netfront_info *np)
} }
np->stats.rx_packets++; np->stats.rx_packets++;
np->stats.rx_bytes += rx->status; np->stats.rx_bytes += m->m_pkthdr.len;
mbufq_tail(&rxq, m); mbufq_tail(&rxq, m);
np->rx.rsp_cons = ++i; np->rx.rsp_cons = ++i;
@ -1130,7 +1124,7 @@ xennet_get_extras(struct netfront_info *np,
static int static int
xennet_get_responses(struct netfront_info *np, xennet_get_responses(struct netfront_info *np,
struct netfront_rx_info *rinfo, RING_IDX rp, struct netfront_rx_info *rinfo, RING_IDX rp,
struct mbuf_head *list, struct mbuf **list,
int *pages_flipped_p) int *pages_flipped_p)
{ {
int pages_flipped = *pages_flipped_p; int pages_flipped = *pages_flipped_p;
@ -1139,21 +1133,34 @@ xennet_get_responses(struct netfront_info *np,
struct netif_rx_response *rx = &rinfo->rx; struct netif_rx_response *rx = &rinfo->rx;
struct netif_extra_info *extras = rinfo->extras; struct netif_extra_info *extras = rinfo->extras;
RING_IDX cons = np->rx.rsp_cons; RING_IDX cons = np->rx.rsp_cons;
struct mbuf *m = xennet_get_rx_mbuf(np, cons); struct mbuf *m, *m0, *m_prev;
grant_ref_t ref = xennet_get_rx_ref(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons);
int max = 24 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */; int max = 5 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */;
int frags = 1; int frags = 1;
int err = 0; int err = 0;
u_long ret; u_long ret;
m0 = m = m_prev = xennet_get_rx_mbuf(np, cons);
if (rx->flags & NETRXF_extra_info) { if (rx->flags & NETRXF_extra_info) {
err = xennet_get_extras(np, extras, rp); err = xennet_get_extras(np, extras, rp);
cons = np->rx.rsp_cons; cons = np->rx.rsp_cons;
} }
if (m0 != NULL) {
m0->m_pkthdr.len = 0;
m0->m_next = NULL;
}
for (;;) { for (;;) {
u_long mfn; u_long mfn;
#if 0
printf("rx->status=%hd rx->offset=%hu frags=%u\n",
rx->status, rx->offset, frags);
#endif
if (unlikely(rx->status < 0 || if (unlikely(rx->status < 0 ||
rx->offset + rx->status > PAGE_SIZE)) { rx->offset + rx->status > PAGE_SIZE)) {
#if 0 #if 0
@ -1165,7 +1172,7 @@ xennet_get_responses(struct netfront_info *np,
err = -EINVAL; err = -EINVAL;
goto next; goto next;
} }
/* /*
* This definitely indicates a bug, either in this driver or in * This definitely indicates a bug, either in this driver or in
* the backend driver. In future this should flag the bad * the backend driver. In future this should flag the bad
@ -1219,9 +1226,14 @@ xennet_get_responses(struct netfront_info *np,
} }
gnttab_release_grant_reference(&np->gref_rx_head, ref); gnttab_release_grant_reference(&np->gref_rx_head, ref);
mbufq_tail(list, m);
next: next:
if (m != NULL) {
m->m_len = rx->status;
m->m_data += rx->offset;
m0->m_pkthdr.len += rx->status;
}
if (!(rx->flags & NETRXF_more_data)) if (!(rx->flags & NETRXF_more_data))
break; break;
@ -1231,12 +1243,17 @@ xennet_get_responses(struct netfront_info *np,
err = -ENOENT; err = -ENOENT;
break; break;
} }
m_prev = m;
rx = RING_GET_RESPONSE(&np->rx, cons + frags); rx = RING_GET_RESPONSE(&np->rx, cons + frags);
m = xennet_get_rx_mbuf(np, cons + frags); m = xennet_get_rx_mbuf(np, cons + frags);
m_prev->m_next = m;
m->m_next = NULL;
ref = xennet_get_rx_ref(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags);
frags++; frags++;
} }
*list = m0;
if (unlikely(frags > max)) { if (unlikely(frags > max)) {
if (net_ratelimit()) if (net_ratelimit())