mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-12 09:58:36 +00:00
o add a flags parameter to netisr_register that is used to specify
whether or not the isr needs to hold Giant when running; Giant-less operation is also controlled by the setting of debug_mpsafenet o mark all netisr's except NETISR_IP as needing Giant o add a GIANT_REQUIRED assertion to the top of netisr's that need Giant o pickup Giant (when debug_mpsafenet is 1) inside ip_input before calling up with a packet o change netisr handling so swi_net runs w/o Giant; instead we grab Giant before invoking handlers based on whether the handler needs Giant o change netisr handling so that netisr's that are marked MPSAFE may have multiple instances active at a time o add netisr statistics for packets dropped because the isr is inactive Supported by: FreeBSD Foundation
This commit is contained in:
parent
2d633fc879
commit
7902224c6b
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=122320
@ -117,7 +117,7 @@ void usb_register_netisr()
|
||||
{
|
||||
if (mtx_inited)
|
||||
return;
|
||||
netisr_register(NETISR_USB, (netisr_t *)usbintr, NULL);
|
||||
netisr_register(NETISR_USB, (netisr_t *)usbintr, NULL, 0);
|
||||
mtx_init(&usbq_tx.ifq_mtx, "usbq_tx_mtx", NULL, MTX_DEF);
|
||||
mtx_init(&usbq_rx.ifq_mtx, "usbq_rx_mtx", NULL, MTX_DEF);
|
||||
mtx_inited++;
|
||||
|
@ -187,8 +187,8 @@ static void
|
||||
init_device_poll(void)
|
||||
{
|
||||
|
||||
netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL);
|
||||
netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL);
|
||||
netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL, 0);
|
||||
netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL, 0);
|
||||
}
|
||||
SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL)
|
||||
|
||||
|
@ -244,7 +244,7 @@ ppp_modevent(module_t mod, int type, void *data)
|
||||
case MOD_LOAD:
|
||||
if_clone_attach(&ppp_cloner);
|
||||
|
||||
netisr_register(NETISR_PPP, (netisr_t *)pppintr, NULL);
|
||||
netisr_register(NETISR_PPP, (netisr_t *)pppintr, NULL, 0);
|
||||
/*
|
||||
* XXX layering violation - if_ppp can work over any lower
|
||||
* level transport that cares to attach to it.
|
||||
@ -1130,6 +1130,8 @@ pppintr()
|
||||
int s;
|
||||
struct mbuf *m;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
LIST_FOREACH(sc, &ppp_softc_list, sc_list) {
|
||||
s = splimp();
|
||||
if (!(sc->sc_flags & SC_TBUSY)
|
||||
|
@ -68,9 +68,9 @@ volatile unsigned int netisr; /* scheduling bits for network */
|
||||
struct netisr {
|
||||
netisr_t *ni_handler;
|
||||
struct ifqueue *ni_queue;
|
||||
int ni_flags;
|
||||
} netisrs[32];
|
||||
|
||||
static struct mtx netisr_mtx;
|
||||
static void *net_ih;
|
||||
|
||||
void
|
||||
@ -80,37 +80,37 @@ legacy_setsoftnet(void)
|
||||
}
|
||||
|
||||
void
|
||||
netisr_register(int num, netisr_t *handler, struct ifqueue *inq)
|
||||
netisr_register(int num, netisr_t *handler, struct ifqueue *inq, int flags)
|
||||
{
|
||||
|
||||
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
|
||||
("bad isr %d", num));
|
||||
netisrs[num].ni_handler = handler;
|
||||
netisrs[num].ni_queue = inq;
|
||||
if ((flags & NETISR_MPSAFE) && !debug_mpsafenet)
|
||||
flags &= ~NETISR_MPSAFE;
|
||||
netisrs[num].ni_flags = flags;
|
||||
}
|
||||
|
||||
void
|
||||
netisr_unregister(int num)
|
||||
{
|
||||
struct netisr *ni;
|
||||
int s;
|
||||
|
||||
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
|
||||
("bad isr %d", num));
|
||||
ni = &netisrs[num];
|
||||
ni->ni_handler = NULL;
|
||||
if (ni->ni_queue != NULL) {
|
||||
s = splimp();
|
||||
if (ni->ni_queue != NULL)
|
||||
IF_DRAIN(ni->ni_queue);
|
||||
splx(s);
|
||||
}
|
||||
}
|
||||
|
||||
struct isrstat {
|
||||
int isrs_count; /* dispatch count */
|
||||
int isrs_directed; /* ...successfully dispatched */
|
||||
int isrs_directed; /* ...directly dispatched */
|
||||
int isrs_deferred; /* ...queued instead */
|
||||
int isrs_queued; /* intentionally queueued */
|
||||
int isrs_drop; /* dropped 'cuz no handler */
|
||||
int isrs_swi_count; /* swi_net handlers called */
|
||||
};
|
||||
static struct isrstat isrstat;
|
||||
@ -130,6 +130,8 @@ SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD,
|
||||
&isrstat.isrs_deferred, 0, "");
|
||||
SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD,
|
||||
&isrstat.isrs_queued, 0, "");
|
||||
SYSCTL_INT(_net_isr, OID_AUTO, drop, CTLFLAG_RD,
|
||||
&isrstat.isrs_drop, 0, "");
|
||||
SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD,
|
||||
&isrstat.isrs_swi_count, 0, "");
|
||||
|
||||
@ -153,46 +155,43 @@ netisr_processqueue(struct netisr *ni)
|
||||
|
||||
/*
|
||||
* Call the netisr directly instead of queueing the packet, if possible.
|
||||
*
|
||||
* Ideally, the permissibility of calling the routine would be determined
|
||||
* by checking if splnet() was asserted at the time the device interrupt
|
||||
* occurred; if so, this indicates that someone is in the network stack.
|
||||
*
|
||||
* However, bus_setup_intr uses INTR_TYPE_NET, which sets splnet before
|
||||
* calling the interrupt handler, so the previous mask is unavailable.
|
||||
* Approximate this by checking intr_nesting_level instead; if any SWI
|
||||
* handlers are running, the packet is queued instead.
|
||||
*/
|
||||
void
|
||||
netisr_dispatch(int num, struct mbuf *m)
|
||||
{
|
||||
struct netisr *ni;
|
||||
|
||||
isrstat.isrs_count++;
|
||||
isrstat.isrs_count++; /* XXX redundant */
|
||||
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
|
||||
("bad isr %d", num));
|
||||
ni = &netisrs[num];
|
||||
if (ni->ni_queue == NULL) {
|
||||
isrstat.isrs_drop++;
|
||||
m_freem(m);
|
||||
return;
|
||||
}
|
||||
if (netisr_enable && mtx_trylock(&netisr_mtx)) {
|
||||
/*
|
||||
* Do direct dispatch only for MPSAFE netisrs (and
|
||||
* only when enabled). Note that when a netisr is
|
||||
* marked MPSAFE we permit multiple concurrent instances
|
||||
* to run. We guarantee only the order in which
|
||||
* packets are processed for each "dispatch point" in
|
||||
* the system (i.e. call to netisr_dispatch or
|
||||
* netisr_queue). This insures ordering of packets
|
||||
* from an interface but does not guarantee ordering
|
||||
* between multiple places in the system (e.g. IP
|
||||
* dispatched from interfaces vs. IP queued from IPSec).
|
||||
*/
|
||||
if (netisr_enable && (ni->ni_flags & NETISR_MPSAFE)) {
|
||||
isrstat.isrs_directed++;
|
||||
/*
|
||||
* One slight problem here is that packets might bypass
|
||||
* each other in the stack, if an earlier one happened
|
||||
* to get stuck in the queue.
|
||||
*
|
||||
* we can either:
|
||||
* a. drain the queue before handling this packet,
|
||||
* b. fallback to queueing the packet,
|
||||
* c. sweep the issue under the rug and ignore it.
|
||||
*
|
||||
* Currently, we do a). Previously, we did c).
|
||||
* NB: We used to drain the queue before handling
|
||||
* the packet but now do not. Doing so here will
|
||||
* not preserve ordering so instead we fallback to
|
||||
* guaranteeing order only from dispatch points
|
||||
* in the system (see above).
|
||||
*/
|
||||
netisr_processqueue(ni);
|
||||
ni->ni_handler(m);
|
||||
mtx_unlock(&netisr_mtx);
|
||||
} else {
|
||||
isrstat.isrs_deferred++;
|
||||
if (IF_HANDOFF(ni->ni_queue, m, NULL))
|
||||
@ -214,6 +213,7 @@ netisr_queue(int num, struct mbuf *m)
|
||||
("bad isr %d", num));
|
||||
ni = &netisrs[num];
|
||||
if (ni->ni_queue == NULL) {
|
||||
isrstat.isrs_drop++;
|
||||
m_freem(m);
|
||||
return (1);
|
||||
}
|
||||
@ -236,7 +236,6 @@ swi_net(void *dummy)
|
||||
const int polling = 0;
|
||||
#endif
|
||||
|
||||
mtx_lock(&netisr_mtx);
|
||||
do {
|
||||
bits = atomic_readandclear_int(&netisr);
|
||||
if (bits == 0)
|
||||
@ -250,21 +249,28 @@ swi_net(void *dummy)
|
||||
printf("swi_net: unregistered isr %d.\n", i);
|
||||
continue;
|
||||
}
|
||||
if (ni->ni_queue == NULL)
|
||||
ni->ni_handler(NULL);
|
||||
else
|
||||
netisr_processqueue(ni);
|
||||
if ((ni->ni_flags & NETISR_MPSAFE) == 0) {
|
||||
mtx_lock(&Giant);
|
||||
if (ni->ni_queue == NULL)
|
||||
ni->ni_handler(NULL);
|
||||
else
|
||||
netisr_processqueue(ni);
|
||||
mtx_unlock(&Giant);
|
||||
} else {
|
||||
if (ni->ni_queue == NULL)
|
||||
ni->ni_handler(NULL);
|
||||
else
|
||||
netisr_processqueue(ni);
|
||||
}
|
||||
}
|
||||
} while (polling);
|
||||
mtx_unlock(&netisr_mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
start_netisr(void *dummy)
|
||||
{
|
||||
|
||||
mtx_init(&netisr_mtx, "netisr lock", NULL, MTX_DEF);
|
||||
if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, 0, &net_ih))
|
||||
if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, INTR_MPSAFE, &net_ih))
|
||||
panic("start_netisr");
|
||||
}
|
||||
SYSINIT(start_netisr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_netisr, NULL)
|
||||
|
@ -91,7 +91,8 @@ typedef void netisr_t (struct mbuf *);
|
||||
|
||||
void netisr_dispatch(int, struct mbuf *);
|
||||
int netisr_queue(int, struct mbuf *);
|
||||
void netisr_register(int, netisr_t *, struct ifqueue *);
|
||||
#define NETISR_MPSAFE 0x0001 /* ISR does not need Giant */
|
||||
void netisr_register(int, netisr_t *, struct ifqueue *, int);
|
||||
void netisr_unregister(int);
|
||||
|
||||
#endif
|
||||
|
@ -312,6 +312,8 @@ at_aarpinput( struct arpcom *ac, struct mbuf *m)
|
||||
int op;
|
||||
u_short net;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
ea = mtod( m, struct ether_aarp *);
|
||||
|
||||
/* Check to see if from my hardware address */
|
||||
|
@ -39,6 +39,7 @@ static void ddp_input(struct mbuf *, struct ifnet *, struct elaphdr *, int);
|
||||
void
|
||||
at2intr(struct mbuf *m)
|
||||
{
|
||||
GIANT_REQUIRED;
|
||||
|
||||
/*
|
||||
* Phase 2 packet handling
|
||||
@ -66,6 +67,8 @@ at1intr(struct mbuf *m)
|
||||
elhp = mtod(m, struct elaphdr *);
|
||||
m_adj(m, SZ_ELAPHDR);
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
if (elhp->el_type == ELAP_DDPEXTEND) {
|
||||
ddp_input(m, m->m_pkthdr.rcvif, NULL, 1);
|
||||
} else {
|
||||
|
@ -553,9 +553,9 @@ ddp_init(void)
|
||||
mtx_init(&atintrq1.ifq_mtx, "at1_inq", NULL, MTX_DEF);
|
||||
mtx_init(&atintrq2.ifq_mtx, "at2_inq", NULL, MTX_DEF);
|
||||
mtx_init(&aarpintrq.ifq_mtx, "aarp_inq", NULL, MTX_DEF);
|
||||
netisr_register(NETISR_ATALK1, at1intr, &atintrq1);
|
||||
netisr_register(NETISR_ATALK2, at2intr, &atintrq2);
|
||||
netisr_register(NETISR_AARP, aarpintr, &aarpintrq);
|
||||
netisr_register(NETISR_ATALK1, at1intr, &atintrq1, 0);
|
||||
netisr_register(NETISR_ATALK2, at2intr, &atintrq2, 0);
|
||||
netisr_register(NETISR_AARP, aarpintr, &aarpintrq, 0);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -553,9 +553,9 @@ ddp_init(void)
|
||||
mtx_init(&atintrq1.ifq_mtx, "at1_inq", NULL, MTX_DEF);
|
||||
mtx_init(&atintrq2.ifq_mtx, "at2_inq", NULL, MTX_DEF);
|
||||
mtx_init(&aarpintrq.ifq_mtx, "aarp_inq", NULL, MTX_DEF);
|
||||
netisr_register(NETISR_ATALK1, at1intr, &atintrq1);
|
||||
netisr_register(NETISR_ATALK2, at2intr, &atintrq2);
|
||||
netisr_register(NETISR_AARP, aarpintr, &aarpintrq);
|
||||
netisr_register(NETISR_ATALK1, at1intr, &atintrq1, 0);
|
||||
netisr_register(NETISR_ATALK2, at2intr, &atintrq2, 0);
|
||||
netisr_register(NETISR_AARP, aarpintr, &aarpintrq, 0);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -141,7 +141,7 @@ atm_initialize()
|
||||
|
||||
atm_intrq.ifq_maxlen = ATM_INTRQ_MAX;
|
||||
mtx_init(&atm_intrq.ifq_mtx, "atm_inq", NULL, MTX_DEF);
|
||||
netisr_register(NETISR_ATM, atm_intr, &atm_intrq);
|
||||
netisr_register(NETISR_ATM, atm_intr, &atm_intrq, 0);
|
||||
|
||||
/*
|
||||
* Initialize subsystems
|
||||
@ -557,6 +557,8 @@ atm_intr(struct mbuf *m)
|
||||
atm_intr_func_t func;
|
||||
void *token;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
/*
|
||||
* Get function to call and token value
|
||||
*/
|
||||
|
@ -2986,7 +2986,8 @@ ngb_mod_event(module_t mod, int event, void *data)
|
||||
mtx_init(&ng_idhash_mtx, "netgraph idhash mutex", NULL, 0);
|
||||
mtx_init(&ngq_mtx, "netgraph free item list mutex", NULL, 0);
|
||||
s = splimp();
|
||||
netisr_register(NETISR_NETGRAPH, (netisr_t *)ngintr, NULL);
|
||||
/* XXX could use NETISR_MPSAFE but need to verify code */
|
||||
netisr_register(NETISR_NETGRAPH, (netisr_t *)ngintr, NULL, 0);
|
||||
splx(s);
|
||||
break;
|
||||
case MOD_UNLOAD:
|
||||
|
@ -980,6 +980,6 @@ arp_init(void)
|
||||
mtx_init(&arpintrq.ifq_mtx, "arp_inq", NULL, MTX_DEF);
|
||||
LIST_INIT(&llinfo_arp);
|
||||
callout_init(&arp_callout, CALLOUT_MPSAFE);
|
||||
netisr_register(NETISR_ARP, arpintr, &arpintrq);
|
||||
netisr_register(NETISR_ARP, arpintr, &arpintrq, NETISR_MPSAFE);
|
||||
}
|
||||
SYSINIT(arp, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, arp_init, 0);
|
||||
|
@ -334,7 +334,7 @@ ip_init()
|
||||
#endif
|
||||
ipintrq.ifq_maxlen = ipqmaxlen;
|
||||
mtx_init(&ipintrq.ifq_mtx, "ip_inq", NULL, MTX_DEF);
|
||||
netisr_register(NETISR_IP, ip_input, &ipintrq);
|
||||
netisr_register(NETISR_IP, ip_input, &ipintrq, NETISR_MPSAFE);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1009,6 +1009,7 @@ DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/
|
||||
* Switch out to protocol's input routine.
|
||||
*/
|
||||
ipstat.ips_delivered++;
|
||||
NET_PICKUP_GIANT();
|
||||
if (args.next_hop && ip->ip_p == IPPROTO_TCP) {
|
||||
/* TCP needs IPFORWARD info if available */
|
||||
struct m_hdr tag;
|
||||
@ -1022,6 +1023,7 @@ DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/
|
||||
(struct mbuf *)&tag, hlen);
|
||||
} else
|
||||
(*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen);
|
||||
NET_DROP_GIANT();
|
||||
return;
|
||||
bad:
|
||||
m_freem(m);
|
||||
|
@ -196,7 +196,7 @@ ip6_init()
|
||||
#endif /* PFIL_HOOKS */
|
||||
ip6intrq.ifq_maxlen = ip6qmaxlen;
|
||||
mtx_init(&ip6intrq.ifq_mtx, "ip6_inq", NULL, MTX_DEF);
|
||||
netisr_register(NETISR_IPV6, ip6_input, &ip6intrq);
|
||||
netisr_register(NETISR_IPV6, ip6_input, &ip6intrq, 0);
|
||||
scope6_init();
|
||||
addrsel_policy_init();
|
||||
nd6_init();
|
||||
@ -249,6 +249,7 @@ ip6_input(m)
|
||||
#endif
|
||||
int srcrt = 0;
|
||||
|
||||
GIANT_REQUIRED; /* XXX for now */
|
||||
#ifdef IPSEC
|
||||
/*
|
||||
* should the inner packet be considered authentic?
|
||||
|
@ -119,7 +119,7 @@ ipx_init()
|
||||
|
||||
ipxintrq.ifq_maxlen = ipxqmaxlen;
|
||||
mtx_init(&ipxintrq.ifq_mtx, "ipx_inq", NULL, MTX_DEF);
|
||||
netisr_register(NETISR_IPX, ipxintr, &ipxintrq);
|
||||
netisr_register(NETISR_IPX, ipxintr, &ipxintrq, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -133,6 +133,8 @@ ipxintr(struct mbuf *m)
|
||||
struct ipx_ifaddr *ia;
|
||||
int len;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
/*
|
||||
* If no IPX addresses have been set yet but the interfaces
|
||||
* are receiving, can't do anything with incoming packets yet.
|
||||
|
@ -685,6 +685,8 @@ natmintr(struct mbuf *m)
|
||||
struct socket *so;
|
||||
struct natmpcb *npcb;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
M_ASSERTPKTHDR(m);
|
||||
#endif
|
||||
|
@ -122,7 +122,7 @@ natm_init(void)
|
||||
bzero(&natmintrq, sizeof(natmintrq));
|
||||
natmintrq.ifq_maxlen = natmqmaxlen;
|
||||
mtx_init(&natmintrq.ifq_mtx, "natm_inq", NULL, MTX_DEF);
|
||||
netisr_register(NETISR_NATM, natmintr, &natmintrq);
|
||||
netisr_register(NETISR_NATM, natmintr, &natmintrq, 0);
|
||||
}
|
||||
|
||||
#if defined(__FreeBSD__)
|
||||
|
Loading…
Reference in New Issue
Block a user