1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-03 12:35:02 +00:00

Big polling(4) cleanup.

o Axe poll in trap.

o Axe IFF_POLLING flag from if_flags.

o Rework revision 1.21 (Giant removal), in such a way that
  poll_mtx is not dropped during call to polling handler.
  This fixes problem with idle polling.

o Make registration and deregistration from polling in a
  functional way, insted of next tick/interrupt.

o Obsolete kern.polling.enable. Polling is turned on/off
  with ifconfig.

Detailed kern_poll.c changes:
  - Remove polling handler flags, introduced in 1.21. The are not
    needed now.
  - Forget and do not check if_flags, if_capenable and if_drv_flags.
  - Call all registered polling handlers unconditionally.
  - Do not drop poll_mtx, when entering polling handlers.
  - In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
  - In netisr_poll() axe the block, where polling code asks drivers
    to unregister.
  - In netisr_poll() and ether_poll() do polling always, if any
    handlers are present.
  - In ether_poll_[de]register() remove a lot of error hiding code. Assert
    that arguments are correct, instead.
  - In ether_poll_[de]register() use standard return values in case of
    error or success.
  - Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
    poll_switch() goes through interface list and enabled/disables polling.
    A message that kern.polling.enable is deprecated is printed.

Detailed driver changes:
  - On attach driver announces IFCAP_POLLING in if_capabilities, but
    not in if_capenable.
  - On detach driver calls ether_poll_deregister() if polling is enabled.
  - In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
    flag. If there is no, then unlocks and returns.
  - In ioctl handler driver checks for IFCAP_POLLING flag requested to
    be set or cleared. Driver first calls ether_poll_[de]register(), then
    obtains driver lock and [dis/en]ables interrupts.
  - In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
    If present, then returns.This is important to protect from spurious
    interrupts.

Reviewed by:	ru, sam, jhb
This commit is contained in:
Gleb Smirnoff 2005-10-01 18:56:19 +00:00
parent 5997cae9a4
commit 4092996774
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=150789
25 changed files with 853 additions and 751 deletions

View File

@ -146,11 +146,6 @@ SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
extern char *syscallnames[]; extern char *syscallnames[];
#endif #endif
#ifdef DEVICE_POLLING
extern u_int32_t poll_in_trap;
extern int ether_poll(int count);
#endif /* DEVICE_POLLING */
/* /*
* Exception, fault, and trap interface to the FreeBSD kernel. * Exception, fault, and trap interface to the FreeBSD kernel.
* This common code is called from assembly language IDT gate entry * This common code is called from assembly language IDT gate entry
@ -241,11 +236,6 @@ trap(frame)
trap_fatal(&frame, frame.tf_addr); trap_fatal(&frame, frame.tf_addr);
} }
#ifdef DEVICE_POLLING
if (poll_in_trap)
ether_poll(poll_in_trap);
#endif /* DEVICE_POLLING */
if (ISPL(frame.tf_cs) == SEL_UPL) { if (ISPL(frame.tf_cs) == SEL_UPL) {
/* user trap */ /* user trap */

View File

@ -2265,10 +2265,10 @@ dc_attach(device_t dev)
*/ */
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
ifp->if_capenable = ifp->if_capabilities;
callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0); callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0);
@ -2339,6 +2339,11 @@ dc_detach(device_t dev)
ifp = sc->dc_ifp; ifp = sc->dc_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */ /* These should only be active if attach succeeded */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
DC_LOCK(sc); DC_LOCK(sc);
@ -2704,7 +2709,7 @@ dc_rxeof(struct dc_softc *sc)
while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) & while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) &
DC_RXSTAT_OWN)) { DC_RXSTAT_OWN)) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
@ -3038,16 +3043,13 @@ dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{ {
struct dc_softc *sc = ifp->if_softc; struct dc_softc *sc = ifp->if_softc;
if (!(ifp->if_capenable & IFCAP_POLLING)) { DC_LOCK(sc);
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
} DC_UNLOCK(sc);
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
/* Re-enable interrupts. */
CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
return; return;
} }
DC_LOCK(sc);
sc->rxcycles = count; sc->rxcycles = count;
dc_rxeof(sc); dc_rxeof(sc);
dc_txeof(sc); dc_txeof(sc);
@ -3111,12 +3113,9 @@ dc_intr(void *arg)
DC_LOCK(sc); DC_LOCK(sc);
ifp = sc->dc_ifp; ifp = sc->dc_ifp;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING) {
goto done; DC_UNLOCK(sc);
if ((ifp->if_capenable & IFCAP_POLLING) && return;
ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
goto done;
} }
#endif #endif
@ -3183,10 +3182,6 @@ dc_intr(void *arg)
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
dc_start_locked(ifp); dc_start_locked(ifp);
#ifdef DEVICE_POLLING
done:
#endif
DC_UNLOCK(sc); DC_UNLOCK(sc);
} }
@ -3534,7 +3529,7 @@ dc_init_locked(struct dc_softc *sc)
* the case of polling. Some cards (e.g. fxp) turn interrupts on * the case of polling. Some cards (e.g. fxp) turn interrupts on
* after a reset. * after a reset.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_IMR, 0x00000000);
else else
#endif #endif
@ -3686,10 +3681,31 @@ dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
#endif #endif
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
DC_LOCK(sc); #ifdef DEVICE_POLLING
ifp->if_capenable &= ~IFCAP_POLLING; if (ifr->ifr_reqcap & IFCAP_POLLING &&
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; !(ifp->if_capenable & IFCAP_POLLING)) {
DC_UNLOCK(sc); error = ether_poll_register(dc_poll, ifp);
if (error)
return(error);
DC_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
ifp->if_capenable |= IFCAP_POLLING;
DC_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
DC_LOCK(sc);
CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
ifp->if_capenable &= ~IFCAP_POLLING;
DC_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -3744,9 +3760,6 @@ dc_stop(struct dc_softc *sc)
callout_stop(&sc->dc_stat_ch); callout_stop(&sc->dc_stat_ch);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif
DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON));
CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_IMR, 0x00000000);

View File

@ -197,6 +197,9 @@ static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
static void em_add_int_delay_sysctl(struct adapter *, const char *, static void em_add_int_delay_sysctl(struct adapter *, const char *,
const char *, struct em_int_delay_info *, const char *, struct em_int_delay_info *,
int, int); int, int);
#ifdef DEVICE_POLLING
static poll_handler_t em_poll;
#endif
/********************************************************************* /*********************************************************************
* FreeBSD Device Interface Entry Points * FreeBSD Device Interface Entry Points
@ -526,6 +529,11 @@ em_detach(device_t dev)
INIT_DEBUGOUT("em_detach: begin"); INIT_DEBUGOUT("em_detach: begin");
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
EM_LOCK(adapter); EM_LOCK(adapter);
adapter->in_detach = 1; adapter->in_detach = 1;
em_stop(adapter); em_stop(adapter);
@ -717,7 +725,7 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
em_initialize_receive_unit(adapter); em_initialize_receive_unit(adapter);
} }
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (!(ifp->if_flags & IFF_POLLING)) if (!(ifp->if_capenable & IFCAP_POLLING))
#endif #endif
em_enable_intr(adapter); em_enable_intr(adapter);
EM_UNLOCK(adapter); EM_UNLOCK(adapter);
@ -732,8 +740,26 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
reinit = 0; reinit = 0;
mask = ifr->ifr_reqcap ^ ifp->if_capenable; mask = ifr->ifr_reqcap ^ ifp->if_capenable;
if (mask & IFCAP_POLLING) #ifdef DEVICE_POLLING
ifp->if_capenable ^= IFCAP_POLLING; if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(em_poll, ifp);
if (error)
return(error);
EM_LOCK(adapter);
em_disable_intr(adapter);
ifp->if_capenable |= IFCAP_POLLING;
EM_UNLOCK(adapter);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupt even in error case */
EM_LOCK(adapter);
em_enable_intr(adapter);
ifp->if_capenable &= ~IFCAP_POLLING;
EM_UNLOCK(adapter);
}
}
#endif
if (mask & IFCAP_HWCSUM) { if (mask & IFCAP_HWCSUM) {
ifp->if_capenable ^= IFCAP_HWCSUM; ifp->if_capenable ^= IFCAP_HWCSUM;
reinit = 1; reinit = 1;
@ -895,7 +921,7 @@ em_init_locked(struct adapter * adapter)
* Only enable interrupts if we are not polling, make sure * Only enable interrupts if we are not polling, make sure
* they are off otherwise. * they are off otherwise.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
em_disable_intr(adapter); em_disable_intr(adapter);
else else
#endif /* DEVICE_POLLING */ #endif /* DEVICE_POLLING */
@ -920,8 +946,6 @@ em_init(void *arg)
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
static poll_handler_t em_poll;
static void static void
em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
{ {
@ -930,14 +954,6 @@ em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
mtx_assert(&adapter->mtx, MA_OWNED); mtx_assert(&adapter->mtx, MA_OWNED);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
em_enable_intr(adapter);
return;
}
if (cmd == POLL_AND_CHECK_STATUS) { if (cmd == POLL_AND_CHECK_STATUS) {
reg_icr = E1000_READ_REG(&adapter->hw, ICR); reg_icr = E1000_READ_REG(&adapter->hw, ICR);
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
@ -948,13 +964,10 @@ em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
callout_reset(&adapter->timer, hz, em_local_timer, adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter);
} }
} }
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { em_process_receive_interrupts(adapter, count);
em_process_receive_interrupts(adapter, count); em_clean_transmit_interrupts(adapter);
em_clean_transmit_interrupts(adapter);
}
if (ifp->if_drv_flags & IFF_DRV_RUNNING && if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp); em_start_locked(ifp);
} }
@ -964,7 +977,8 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct adapter *adapter = ifp->if_softc; struct adapter *adapter = ifp->if_softc;
EM_LOCK(adapter); EM_LOCK(adapter);
em_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
em_poll_locked(ifp, cmd, count);
EM_UNLOCK(adapter); EM_UNLOCK(adapter);
} }
#endif /* DEVICE_POLLING */ #endif /* DEVICE_POLLING */
@ -987,18 +1001,10 @@ em_intr(void *arg)
ifp = adapter->ifp; ifp = adapter->ifp;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
EM_UNLOCK(adapter); EM_UNLOCK(adapter);
return; return;
} }
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(em_poll, ifp)) {
em_disable_intr(adapter);
em_poll_locked(ifp, 0, 1);
EM_UNLOCK(adapter);
return;
}
#endif /* DEVICE_POLLING */ #endif /* DEVICE_POLLING */
reg_icr = E1000_READ_REG(&adapter->hw, ICR); reg_icr = E1000_READ_REG(&adapter->hw, ICR);
@ -1718,9 +1724,7 @@ em_stop(void *arg)
mtx_assert(&adapter->mtx, MA_OWNED); mtx_assert(&adapter->mtx, MA_OWNED);
INIT_DEBUGOUT("em_stop: begin"); INIT_DEBUGOUT("em_stop: begin");
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif
em_disable_intr(adapter); em_disable_intr(adapter);
em_reset_hw(&adapter->hw); em_reset_hw(&adapter->hw);
callout_stop(&adapter->timer); callout_stop(&adapter->timer);
@ -1976,7 +1980,6 @@ em_setup_interface(device_t dev, struct adapter * adapter)
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
ifp->if_capenable |= IFCAP_POLLING;
#endif #endif
/* /*

View File

@ -70,9 +70,6 @@ IEEE 1394 support for FreeBSD-5.X and 4.X.
It also has DEVICE_POLLING[5] support. To enable it, edit your It also has DEVICE_POLLING[5] support. To enable it, edit your
kernel config file and Makefile.fwe then rebuild kernel and if_fwe.ko. kernel config file and Makefile.fwe then rebuild kernel and if_fwe.ko.
Note this driver checks kern.polling.enable only when enabling the
interface. When you enable polling after the interface is up,
try 'ifconfig fwe0 down;ifconfig fwe0 up'.
5. FireWire for Kernel Hackers 5. FireWire for Kernel Hackers

View File

@ -100,19 +100,6 @@ TUNABLE_INT("hw.firewire.fwe.tx_speed", &tx_speed);
TUNABLE_INT("hw.firewire.fwe.rx_queue_len", &rx_queue_len); TUNABLE_INT("hw.firewire.fwe.rx_queue_len", &rx_queue_len);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
#define FWE_POLL_REGISTER(func, fwe, ifp) \
if (ether_poll_register(func, ifp)) { \
struct firewire_comm *fc = (fwe)->fd.fc; \
fc->set_intr(fc, 0); \
}
#define FWE_POLL_DEREGISTER(fwe, ifp) \
do { \
struct firewire_comm *fc = (fwe)->fd.fc; \
ether_poll_deregister(ifp); \
fc->set_intr(fc, 1); \
} while(0) \
static poll_handler_t fwe_poll; static poll_handler_t fwe_poll;
static void static void
@ -121,19 +108,15 @@ fwe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct fwe_softc *fwe; struct fwe_softc *fwe;
struct firewire_comm *fc; struct firewire_comm *fc;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe; fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe;
fc = fwe->fd.fc; fc = fwe->fd.fc;
if (cmd == POLL_DEREGISTER) {
/* enable interrupts */
fc->set_intr(fc, 1);
return;
}
fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count); fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
} }
#else #endif /* DEVICE_POLLING */
#define FWE_POLL_REGISTER(func, fwe, ifp)
#define FWE_POLL_DEREGISTER(fwe, ifp)
#endif
static void static void
fwe_identify(driver_t *driver, device_t parent) fwe_identify(driver_t *driver, device_t parent)
{ {
@ -242,7 +225,7 @@ fwe_attach(device_t dev)
/* Tell the upper layer(s) we support long frames. */ /* Tell the upper layer(s) we support long frames. */
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_VLAN_MTU & IFCAP_POLLING;
ifp->if_capenable |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU;
#endif #endif
@ -262,8 +245,6 @@ fwe_stop(struct fwe_softc *fwe)
fc = fwe->fd.fc; fc = fwe->fd.fc;
FWE_POLL_DEREGISTER(fwe, ifp);
if (fwe->dma_ch >= 0) { if (fwe->dma_ch >= 0) {
xferq = fc->ir[fwe->dma_ch]; xferq = fc->ir[fwe->dma_ch];
@ -305,6 +286,11 @@ fwe_detach(device_t dev)
fwe = device_get_softc(dev); fwe = device_get_softc(dev);
ifp = fwe->eth_softc.ifp; ifp = fwe->eth_softc.ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
s = splimp(); s = splimp();
fwe_stop(fwe); fwe_stop(fwe);
@ -416,7 +402,6 @@ fwe_init(void *arg)
ifp->if_flags &= ~IFF_OACTIVE; ifp->if_flags &= ~IFF_OACTIVE;
#endif #endif
FWE_POLL_REGISTER(fwe_poll, fwe, ifp);
#if 0 #if 0
/* attempt to start output */ /* attempt to start output */
fwe_start(ifp); fwe_start(ifp);
@ -468,6 +453,34 @@ fwe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
fwe->stream_ch, fwe->dma_ch); fwe->stream_ch, fwe->dma_ch);
splx(s); splx(s);
break; break;
case SIOCSIFCAP:
#ifdef DEVICE_POLLING
{
struct ifreq *ifr = (struct ifreq *) data;
struct firewire_comm *fc = fc = fwe->fd.fc;
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(fwe_poll, ifp);
if (error)
return(error);
/* Disable interrupts */
fc->set_intr(fc, 0);
ifp->if_capenable |= IFCAP_POLLING;
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
fc->set_intr(fc, 1);
ifp->if_capenable &= ~IFCAP_POLLING;
return (error);
}
}
#endif /* DEVICE_POLLING */
break;
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
default: default:
#else #else
@ -634,9 +647,7 @@ fwe_as_input(struct fw_xferq *xferq)
fwe = (struct fwe_softc *)xferq->sc; fwe = (struct fwe_softc *)xferq->sc;
ifp = fwe->eth_softc.ifp; ifp = fwe->eth_softc.ifp;
#if 0
FWE_POLL_REGISTER(fwe_poll, fwe, ifp);
#endif
while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
STAILQ_REMOVE_HEAD(&xferq->stvalid, link); STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
fp = mtod(sxfer->mbuf, struct fw_pkt *); fp = mtod(sxfer->mbuf, struct fw_pkt *);

View File

@ -107,19 +107,6 @@ SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len,
TUNABLE_INT("hw.firewire.fwip.rx_queue_len", &rx_queue_len); TUNABLE_INT("hw.firewire.fwip.rx_queue_len", &rx_queue_len);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
#define FWIP_POLL_REGISTER(func, fwip, ifp) \
if (ether_poll_register(func, ifp)) { \
struct firewire_comm *fc = (fwip)->fd.fc; \
fc->set_intr(fc, 0); \
}
#define FWIP_POLL_DEREGISTER(fwip, ifp) \
do { \
struct firewire_comm *fc = (fwip)->fd.fc; \
ether_poll_deregister(ifp); \
fc->set_intr(fc, 1); \
} while(0) \
static poll_handler_t fwip_poll; static poll_handler_t fwip_poll;
static void static void
@ -128,19 +115,15 @@ fwip_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct fwip_softc *fwip; struct fwip_softc *fwip;
struct firewire_comm *fc; struct firewire_comm *fc;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
fc = fwip->fd.fc; fc = fwip->fd.fc;
if (cmd == POLL_DEREGISTER) {
/* enable interrupts */
fc->set_intr(fc, 1);
return;
}
fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count); fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
} }
#else #endif /* DEVICE_POLLING */
#define FWIP_POLL_REGISTER(func, fwip, ifp)
#define FWIP_POLL_DEREGISTER(fwip, ifp)
#endif
static void static void
fwip_identify(driver_t *driver, device_t parent) fwip_identify(driver_t *driver, device_t parent)
{ {
@ -214,6 +197,9 @@ fwip_attach(device_t dev)
ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST| ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST|
IFF_NEEDSGIANT); IFF_NEEDSGIANT);
ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE;
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
#endif
s = splimp(); s = splimp();
firewire_ifattach(ifp, hwaddr); firewire_ifattach(ifp, hwaddr);
@ -234,8 +220,6 @@ fwip_stop(struct fwip_softc *fwip)
fc = fwip->fd.fc; fc = fwip->fd.fc;
FWIP_POLL_DEREGISTER(fwip, ifp);
if (fwip->dma_ch >= 0) { if (fwip->dma_ch >= 0) {
xferq = fc->ir[fwip->dma_ch]; xferq = fc->ir[fwip->dma_ch];
@ -279,14 +263,22 @@ static int
fwip_detach(device_t dev) fwip_detach(device_t dev)
{ {
struct fwip_softc *fwip; struct fwip_softc *fwip;
struct ifnet *ifp;
int s; int s;
fwip = (struct fwip_softc *)device_get_softc(dev); fwip = (struct fwip_softc *)device_get_softc(dev);
ifp = fwip->fw_softc.fwip_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
s = splimp(); s = splimp();
fwip_stop(fwip); fwip_stop(fwip);
firewire_ifdetach(fwip->fw_softc.fwip_ifp); firewire_ifdetach(ifp);
if_free(fwip->fw_softc.fwip_ifp); if_free(ifp);
splx(s); splx(s);
return 0; return 0;
@ -408,7 +400,6 @@ fwip_init(void *arg)
ifp->if_flags &= ~IFF_OACTIVE; ifp->if_flags &= ~IFF_OACTIVE;
#endif #endif
FWIP_POLL_REGISTER(fwip_poll, fwip, ifp);
#if 0 #if 0
/* attempt to start output */ /* attempt to start output */
fwip_start(ifp); fwip_start(ifp);
@ -444,7 +435,34 @@ fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
case SIOCADDMULTI: case SIOCADDMULTI:
case SIOCDELMULTI: case SIOCDELMULTI:
break; break;
case SIOCSIFCAP:
#ifdef DEVICE_POLLING
{
struct ifreq *ifr = (struct ifreq *) data;
struct firewire_comm *fc = fc = fwip->fd.fc;
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(fwip_poll, ifp);
if (error)
return(error);
/* Disable interrupts */
fc->set_intr(fc, 0);
ifp->if_capenable |= IFCAP_POLLING;
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
fc->set_intr(fc, 1);
ifp->if_capenable &= ~IFCAP_POLLING;
return (error);
}
}
#endif /* DEVICE_POLLING */
break;
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
default: default:
#else #else
@ -757,9 +775,7 @@ fwip_stream_input(struct fw_xferq *xferq)
fwip = (struct fwip_softc *)xferq->sc; fwip = (struct fwip_softc *)xferq->sc;
ifp = fwip->fw_softc.fwip_ifp; ifp = fwip->fw_softc.fwip_ifp;
#if 0
FWIP_POLL_REGISTER(fwip_poll, fwip, ifp);
#endif
while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
STAILQ_REMOVE_HEAD(&xferq->stvalid, link); STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
fp = mtod(sxfer->mbuf, struct fw_pkt *); fp = mtod(sxfer->mbuf, struct fw_pkt *);

View File

@ -773,7 +773,6 @@ fxp_attach(device_t dev)
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
/* Inform the world we support polling. */ /* Inform the world we support polling. */
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
ifp->if_capenable |= IFCAP_POLLING;
#endif #endif
/* /*
@ -891,6 +890,11 @@ fxp_detach(device_t dev)
{ {
struct fxp_softc *sc = device_get_softc(dev); struct fxp_softc *sc = device_get_softc(dev);
#ifdef DEVICE_POLLING
if (sc->ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(sc->ifp);
#endif
FXP_LOCK(sc); FXP_LOCK(sc);
sc->suspended = 1; /* Do same thing as we do for suspend */ sc->suspended = 1; /* Do same thing as we do for suspend */
/* /*
@ -1448,15 +1452,11 @@ fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
uint8_t statack; uint8_t statack;
FXP_LOCK(sc); FXP_LOCK(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
FXP_UNLOCK(sc); FXP_UNLOCK(sc);
return; return;
} }
statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA | statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
FXP_SCB_STATACK_FR; FXP_SCB_STATACK_FR;
if (cmd == POLL_AND_CHECK_STATUS) { if (cmd == POLL_AND_CHECK_STATUS) {
@ -1495,18 +1495,10 @@ fxp_intr(void *xsc)
} }
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
FXP_UNLOCK(sc); FXP_UNLOCK(sc);
return; return;
} }
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(fxp_poll, ifp)) {
/* disable interrupts */
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
FXP_UNLOCK(sc);
fxp_poll(ifp, 0, 1);
return;
}
#endif #endif
while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
/* /*
@ -1837,9 +1829,6 @@ fxp_stop(struct fxp_softc *sc)
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
ifp->if_timer = 0; ifp->if_timer = 0;
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif
/* /*
* Cancel stats updater. * Cancel stats updater.
*/ */
@ -2163,7 +2152,7 @@ fxp_init_body(struct fxp_softc *sc)
* ... but only do that if we are not polling. And because (presumably) * ... but only do that if we are not polling. And because (presumably)
* the default is interrupts on, we need to disable them explicitly! * the default is interrupts on, we need to disable them explicitly!
*/ */
if ( ifp->if_flags & IFF_POLLING ) if (ifp->if_capenable & IFCAP_POLLING )
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
else else
#endif /* DEVICE_POLLING */ #endif /* DEVICE_POLLING */
@ -2418,11 +2407,30 @@ fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
FXP_LOCK(sc);
mask = ifp->if_capenable ^ ifr->ifr_reqcap; mask = ifp->if_capenable ^ ifr->ifr_reqcap;
if (mask & IFCAP_POLLING) #ifdef DEVICE_POLLING
ifp->if_capenable ^= IFCAP_POLLING; if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(fxp_poll, ifp);
if (error)
return(error);
FXP_LOCK(sc);
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
FXP_SCB_INTR_DISABLE);
ifp->if_capenable |= IFCAP_POLLING;
FXP_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts in any case */
FXP_LOCK(sc);
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
ifp->if_capenable &= ~IFCAP_POLLING;
FXP_UNLOCK(sc);
}
}
#endif
if (mask & IFCAP_VLAN_MTU) { if (mask & IFCAP_VLAN_MTU) {
FXP_LOCK(sc);
ifp->if_capenable ^= IFCAP_VLAN_MTU; ifp->if_capenable ^= IFCAP_VLAN_MTU;
if (sc->revision != FXP_REV_82557) if (sc->revision != FXP_REV_82557)
flag = FXP_FLAG_LONG_PKT_EN; flag = FXP_FLAG_LONG_PKT_EN;
@ -2431,8 +2439,8 @@ fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
sc->flags ^= flag; sc->flags ^= flag;
if (ifp->if_flags & IFF_UP) if (ifp->if_flags & IFF_UP)
fxp_init_body(sc); fxp_init_body(sc);
FXP_UNLOCK(sc);
} }
FXP_UNLOCK(sc);
break; break;
default: default:

View File

@ -196,18 +196,17 @@ all PRO/10GbE adapters.
options DEVICE_POLLING options DEVICE_POLLING
options HZ=1000 options HZ=1000
At runtime, use the following command to turn on polling mode. Similarly, At runtime, use the following command to turn on polling mode.
turn off polling mode by setting the variable to 0:
sysctl kern.polling.enable=1 ifconfig ixgb0 polling
Similarly, turn off polling mode by removing IFCAP_POLLING flag from
interface:
NOTES: DEVICE POLLING is only valid for non-SMP kernels. ifconfig ixgb0 -polling
The driver has to be built into the kernel for DEVICE POLLING to be
enabled in the driver.
The driver has to be built into the kernel for DEVICE POLLING to be
enabled in the driver.
Support Support
======= =======

View File

@ -141,6 +141,9 @@ static int
ixgb_dma_malloc(struct adapter *, bus_size_t, ixgb_dma_malloc(struct adapter *, bus_size_t,
struct ixgb_dma_alloc *, int); struct ixgb_dma_alloc *, int);
static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *); static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
#ifdef DEVICE_POLLING
static poll_handler_t ixgb_poll;
#endif
/********************************************************************* /*********************************************************************
* FreeBSD Device Interface Entry Points * FreeBSD Device Interface Entry Points
@ -368,6 +371,11 @@ ixgb_detach(device_t dev)
INIT_DEBUGOUT("ixgb_detach: begin"); INIT_DEBUGOUT("ixgb_detach: begin");
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
IXGB_LOCK(adapter); IXGB_LOCK(adapter);
adapter->in_detach = 1; adapter->in_detach = 1;
@ -557,6 +565,26 @@ ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
case SIOCSIFCAP: case SIOCSIFCAP:
IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
mask = ifr->ifr_reqcap ^ ifp->if_capenable; mask = ifr->ifr_reqcap ^ ifp->if_capenable;
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(ixgb_poll, ifp);
if (error)
return(error);
IXGB_LOCK(adapter);
ixgb_disable_intr(adapter);
ifp->if_capenable |= IFCAP_POLLING;
IXGB_UNLOCK(adapter);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupt even in error case */
IXGB_LOCK(adapter);
ixgb_enable_intr(adapter);
ifp->if_capenable &= ~IFCAP_POLLING;
IXGB_UNLOCK(adapter);
}
}
#endif /* DEVICE_POLLING */
if (mask & IFCAP_HWCSUM) { if (mask & IFCAP_HWCSUM) {
if (IFCAP_HWCSUM & ifp->if_capenable) if (IFCAP_HWCSUM & ifp->if_capenable)
ifp->if_capenable &= ~IFCAP_HWCSUM; ifp->if_capenable &= ~IFCAP_HWCSUM;
@ -695,10 +723,10 @@ ixgb_init_locked(struct adapter *adapter)
* Only disable interrupts if we are polling, make sure they are on * Only disable interrupts if we are polling, make sure they are on
* otherwise. * otherwise.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
ixgb_disable_intr(adapter); ixgb_disable_intr(adapter);
else else
#endif /* DEVICE_POLLING */ #endif
ixgb_enable_intr(adapter); ixgb_enable_intr(adapter);
return; return;
@ -716,8 +744,6 @@ ixgb_init(void *arg)
} }
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
static poll_handler_t ixgb_poll;
static void static void
ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count) ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
{ {
@ -726,15 +752,6 @@ ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
IXGB_LOCK_ASSERT(adapter); IXGB_LOCK_ASSERT(adapter);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
ixgb_enable_intr(adapter);
return;
}
if (cmd == POLL_AND_CHECK_STATUS) { if (cmd == POLL_AND_CHECK_STATUS) {
reg_icr = IXGB_READ_REG(&adapter->hw, ICR); reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
@ -745,12 +762,10 @@ ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
adapter); adapter);
} }
} }
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_process_receive_interrupts(adapter, count);
ixgb_process_receive_interrupts(adapter, count); ixgb_clean_transmit_interrupts(adapter);
ixgb_clean_transmit_interrupts(adapter);
} if (ifp->if_snd.ifq_head != NULL)
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
ifp->if_snd.ifq_head != NULL)
ixgb_start_locked(ifp); ixgb_start_locked(ifp);
} }
@ -760,10 +775,11 @@ ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
struct adapter *adapter = ifp->if_softc; struct adapter *adapter = ifp->if_softc;
IXGB_LOCK(adapter); IXGB_LOCK(adapter);
ixgb_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixgb_poll_locked(ifp, cmd, count);
IXGB_UNLOCK(adapter); IXGB_UNLOCK(adapter);
} }
#endif /* DEVICE_POLLING */ #endif /* DEVICE_POLLING */
/********************************************************************* /*********************************************************************
* *
@ -785,19 +801,11 @@ ixgb_intr(void *arg)
ifp = adapter->ifp; ifp = adapter->ifp;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
IXGB_UNLOCK(adapter); IXGB_UNLOCK(adapter);
return; return;
} }
#endif
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(ixgb_poll, ifp)) {
ixgb_disable_intr(adapter);
ixgb_poll_locked(ifp, 0, 1);
IXGB_UNLOCK(adapter);
return;
}
#endif /* DEVICE_POLLING */
reg_icr = IXGB_READ_REG(&adapter->hw, ICR); reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
if (reg_icr == 0) { if (reg_icr == 0) {
@ -1355,9 +1363,6 @@ ixgb_setup_interface(device_t dev, struct adapter * adapter)
#endif #endif
ifp->if_capabilities = IFCAP_HWCSUM; ifp->if_capabilities = IFCAP_HWCSUM;
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
#endif
/* /*
* Tell the upper layer(s) we support long frames. * Tell the upper layer(s) we support long frames.
@ -1370,6 +1375,10 @@ ixgb_setup_interface(device_t dev, struct adapter * adapter)
ifp->if_capenable = ifp->if_capabilities; ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
#endif
/* /*
* Specify the media types supported by this adapter and register * Specify the media types supported by this adapter and register
* callbacks to update media and link information * callbacks to update media and link information

View File

@ -869,10 +869,10 @@ nge_attach(dev)
ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1; ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1;
ifp->if_hwassist = NGE_CSUM_FEATURES; ifp->if_hwassist = NGE_CSUM_FEATURES;
ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING;
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
ifp->if_capenable = ifp->if_capabilities;
/* /*
* Do MII setup. * Do MII setup.
@ -958,6 +958,10 @@ nge_detach(dev)
sc = device_get_softc(dev); sc = device_get_softc(dev);
ifp = sc->nge_ifp; ifp = sc->nge_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
NGE_LOCK(sc); NGE_LOCK(sc);
nge_reset(sc); nge_reset(sc);
nge_stop(sc); nge_stop(sc);
@ -1126,12 +1130,12 @@ nge_rxeof(sc)
u_int32_t extsts; u_int32_t extsts;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
cur_rx = &sc->nge_ldata->nge_rx_list[i]; cur_rx = &sc->nge_ldata->nge_rx_list[i];
rxstat = cur_rx->nge_rxstat; rxstat = cur_rx->nge_rxstat;
@ -1376,12 +1380,7 @@ nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct nge_softc *sc = ifp->if_softc; struct nge_softc *sc = ifp->if_softc;
NGE_LOCK(sc); NGE_LOCK(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
CSR_WRITE_4(sc, NGE_IER, 1);
NGE_UNLOCK(sc); NGE_UNLOCK(sc);
return; return;
} }
@ -1433,18 +1432,11 @@ nge_intr(arg)
NGE_LOCK(sc); NGE_LOCK(sc);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
NGE_UNLOCK(sc); NGE_UNLOCK(sc);
return; return;
} }
if ((ifp->if_capenable & IFCAP_POLLING) && #endif
ether_poll_register(nge_poll, ifp)) { /* ok, disable interrupts */
CSR_WRITE_4(sc, NGE_IER, 0);
NGE_UNLOCK(sc);
nge_poll(ifp, 0, 1);
return;
}
#endif /* DEVICE_POLLING */
/* Supress unwanted interrupts */ /* Supress unwanted interrupts */
if (!(ifp->if_flags & IFF_UP)) { if (!(ifp->if_flags & IFF_UP)) {
@ -1840,10 +1832,10 @@ nge_init_locked(sc)
* ... only enable interrupts if we are not polling, make sure * ... only enable interrupts if we are not polling, make sure
* they are off otherwise. * they are off otherwise.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_4(sc, NGE_IER, 0); CSR_WRITE_4(sc, NGE_IER, 0);
else else
#endif /* DEVICE_POLLING */ #endif
CSR_WRITE_4(sc, NGE_IER, 1); CSR_WRITE_4(sc, NGE_IER, 1);
/* Enable receiver and transmitter. */ /* Enable receiver and transmitter. */
@ -2047,8 +2039,31 @@ nge_ioctl(ifp, command, data)
} }
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
ifp->if_capenable &= ~IFCAP_POLLING; #ifdef DEVICE_POLLING
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(nge_poll, ifp);
if (error)
return(error);
NGE_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_4(sc, NGE_IER, 0);
ifp->if_capenable |= IFCAP_POLLING;
NGE_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
NGE_LOCK(sc);
CSR_WRITE_4(sc, NGE_IER, 1);
ifp->if_capenable &= ~IFCAP_POLLING;
NGE_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -2105,9 +2120,6 @@ nge_stop(sc)
} }
callout_stop(&sc->nge_stat_ch); callout_stop(&sc->nge_stat_ch);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif
CSR_WRITE_4(sc, NGE_IER, 0); CSR_WRITE_4(sc, NGE_IER, 0);
CSR_WRITE_4(sc, NGE_IMR, 0); CSR_WRITE_4(sc, NGE_IMR, 0);
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);

View File

@ -1202,6 +1202,7 @@ re_attach(dev)
ifp->if_start = re_start; ifp->if_start = re_start;
ifp->if_hwassist = /*RE_CSUM_FEATURES*/0; ifp->if_hwassist = /*RE_CSUM_FEATURES*/0;
ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
ifp->if_capenable = ifp->if_capabilities & ~IFCAP_HWCSUM;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
@ -1214,7 +1215,6 @@ re_attach(dev)
IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
IFQ_SET_READY(&ifp->if_snd); IFQ_SET_READY(&ifp->if_snd);
ifp->if_capenable = ifp->if_capabilities & ~IFCAP_HWCSUM;
/* /*
* Call MI attach routine. * Call MI attach routine.
@ -1264,6 +1264,11 @@ re_detach(dev)
ifp = sc->rl_ifp; ifp = sc->rl_ifp;
KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */ /* These should only be active if attach succeeded */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
RL_LOCK(sc); RL_LOCK(sc);
@ -1756,7 +1761,8 @@ re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct rl_softc *sc = ifp->if_softc; struct rl_softc *sc = ifp->if_softc;
RL_LOCK(sc); RL_LOCK(sc);
re_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
re_poll_locked(ifp, cmd, count);
RL_UNLOCK(sc); RL_UNLOCK(sc);
} }
@ -1767,15 +1773,6 @@ re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
RL_LOCK_ASSERT(sc); RL_LOCK_ASSERT(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
return;
}
sc->rxcycles = count; sc->rxcycles = count;
re_rxeof(sc); re_rxeof(sc);
re_txeof(sc); re_txeof(sc);
@ -1822,15 +1819,9 @@ re_intr(arg)
goto done_locked; goto done_locked;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
goto done_locked; goto done_locked;
if ((ifp->if_capenable & IFCAP_POLLING) && #endif
ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */
CSR_WRITE_2(sc, RL_IMR, 0x0000);
re_poll_locked(ifp, 0, 1);
goto done_locked;
}
#endif /* DEVICE_POLLING */
for (;;) { for (;;) {
@ -2171,10 +2162,10 @@ re_init_locked(sc)
/* /*
* Disable interrupts if we are polling. * Disable interrupts if we are polling.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_2(sc, RL_IMR, 0); CSR_WRITE_2(sc, RL_IMR, 0);
else /* otherwise ... */ else /* otherwise ... */
#endif /* DEVICE_POLLING */ #endif
/* /*
* Enable interrupts. * Enable interrupts.
*/ */
@ -2289,7 +2280,7 @@ re_ioctl(ifp, command, data)
struct rl_softc *sc = ifp->if_softc; struct rl_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data; struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii; struct mii_data *mii;
int error; int error = 0;
switch (command) { switch (command) {
case SIOCSIFMTU: case SIOCSIFMTU:
@ -2298,7 +2289,6 @@ re_ioctl(ifp, command, data)
error = EINVAL; error = EINVAL;
ifp->if_mtu = ifr->ifr_mtu; ifp->if_mtu = ifr->ifr_mtu;
RL_UNLOCK(sc); RL_UNLOCK(sc);
error = 0;
break; break;
case SIOCSIFFLAGS: case SIOCSIFFLAGS:
RL_LOCK(sc); RL_LOCK(sc);
@ -2307,14 +2297,12 @@ re_ioctl(ifp, command, data)
else if (ifp->if_drv_flags & IFF_DRV_RUNNING) else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
re_stop(sc); re_stop(sc);
RL_UNLOCK(sc); RL_UNLOCK(sc);
error = 0;
break; break;
case SIOCADDMULTI: case SIOCADDMULTI:
case SIOCDELMULTI: case SIOCDELMULTI:
RL_LOCK(sc); RL_LOCK(sc);
re_setmulti(sc); re_setmulti(sc);
RL_UNLOCK(sc); RL_UNLOCK(sc);
error = 0;
break; break;
case SIOCGIFMEDIA: case SIOCGIFMEDIA:
case SIOCSIFMEDIA: case SIOCSIFMEDIA:
@ -2322,18 +2310,42 @@ re_ioctl(ifp, command, data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
RL_LOCK(sc); {
ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_POLLING); int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
ifp->if_capenable |= #ifdef DEVICE_POLLING
ifr->ifr_reqcap & (IFCAP_HWCSUM | IFCAP_POLLING); if (mask & IFCAP_POLLING) {
if (ifp->if_capenable & IFCAP_TXCSUM) if (ifr->ifr_reqcap & IFCAP_POLLING) {
ifp->if_hwassist = RE_CSUM_FEATURES; error = ether_poll_register(re_poll, ifp);
else if (error)
ifp->if_hwassist = 0; return(error);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) RL_LOCK(sc);
re_init_locked(sc); /* Disable interrupts */
RL_UNLOCK(sc); CSR_WRITE_2(sc, RL_IMR, 0x0000);
error = 0; ifp->if_capenable |= IFCAP_POLLING;
RL_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
RL_LOCK(sc);
CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
ifp->if_capenable &= ~IFCAP_POLLING;
RL_UNLOCK(sc);
}
}
#endif /* DEVICE_POLLING */
if (mask & IFCAP_HWCSUM) {
RL_LOCK(sc);
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_HWCSUM;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist = RE_CSUM_FEATURES;
else
ifp->if_hwassist = 0;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
re_init_locked(sc);
RL_UNLOCK(sc);
}
}
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -2379,9 +2391,6 @@ re_stop(sc)
callout_stop(&sc->rl_stat_callout); callout_stop(&sc->rl_stat_callout);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
CSR_WRITE_1(sc, RL_COMMAND, 0x00); CSR_WRITE_1(sc, RL_COMMAND, 0x00);
CSR_WRITE_2(sc, RL_IMR, 0x0000); CSR_WRITE_2(sc, RL_IMR, 0x0000);

View File

@ -165,11 +165,9 @@ static int sf_miibus_readreg(device_t, int, int);
static int sf_miibus_writereg(device_t, int, int, int); static int sf_miibus_writereg(device_t, int, int, int);
static void sf_miibus_statchg(device_t); static void sf_miibus_statchg(device_t);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
int count); static void sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
static void sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, #endif
int count);
#endif /* DEVICE_POLLING */
static u_int32_t csr_read_4(struct sf_softc *, int); static u_int32_t csr_read_4(struct sf_softc *, int);
static void csr_write_4(struct sf_softc *, int, u_int32_t); static void csr_write_4(struct sf_softc *, int, u_int32_t);
@ -560,10 +558,31 @@ sf_ioctl(ifp, command, data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
SF_LOCK(sc); #ifdef DEVICE_POLLING
ifp->if_capenable &= ~IFCAP_POLLING; if (ifr->ifr_reqcap & IFCAP_POLLING &&
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; !(ifp->if_capenable & IFCAP_POLLING)) {
SF_UNLOCK(sc); error = ether_poll_register(sf_poll, ifp);
if (error)
return(error);
SF_LOCK(sc);
/* Disable interrupts */
csr_write_4(sc, SF_IMR, 0x00000000);
ifp->if_capenable |= IFCAP_POLLING;
SF_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
SF_LOCK(sc);
csr_write_4(sc, SF_IMR, SF_INTRS);
ifp->if_capenable &= ~IFCAP_POLLING;
SF_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -749,10 +768,10 @@ sf_attach(dev)
IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
IFQ_SET_READY(&ifp->if_snd); IFQ_SET_READY(&ifp->if_snd);
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif /* DEVICE_POLLING */ #endif
ifp->if_capenable = ifp->if_capabilities;
/* /*
* Call MI attach routine. * Call MI attach routine.
@ -794,6 +813,11 @@ sf_detach(dev)
KASSERT(mtx_initialized(&sc->sf_mtx), ("sf mutex not initialized")); KASSERT(mtx_initialized(&sc->sf_mtx), ("sf mutex not initialized"));
ifp = sc->sf_ifp; ifp = sc->sf_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */ /* These should only be active if attach succeeded */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
SF_LOCK(sc); SF_LOCK(sc);
@ -946,12 +970,12 @@ sf_rxeof(sc)
struct mbuf *m0; struct mbuf *m0;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx]; cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx];
desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx]; desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx];
@ -1068,7 +1092,8 @@ sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct sf_softc *sc = ifp->if_softc; struct sf_softc *sc = ifp->if_softc;
SF_LOCK(sc); SF_LOCK(sc);
sf_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
sf_poll_locked(ifp, cmd, count);
SF_UNLOCK(sc); SF_UNLOCK(sc);
} }
@ -1079,17 +1104,6 @@ sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
SF_LOCK_ASSERT(sc); SF_LOCK_ASSERT(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) {
/* Final call, enable interrupts. */
csr_write_4(sc, SF_IMR, SF_INTRS);
return;
}
sc->rxcycles = count; sc->rxcycles = count;
sf_rxeof(sc); sf_rxeof(sc);
sf_txeof(sc); sf_txeof(sc);
@ -1131,17 +1145,11 @@ sf_intr(arg)
ifp = sc->sf_ifp; ifp = sc->sf_ifp;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING) {
goto done_locked; SF_UNLOCK(sc);
return;
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(sf_poll, ifp)) {
/* OK, disable interrupts. */
csr_write_4(sc, SF_IMR, 0x00000000);
sf_poll_locked(ifp, 0, 1);
goto done_locked;
} }
#endif /* DEVICE_POLLING */ #endif
if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) { if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) {
SF_UNLOCK(sc); SF_UNLOCK(sc);
@ -1185,9 +1193,6 @@ sf_intr(arg)
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
sf_start_locked(ifp); sf_start_locked(ifp);
#ifdef DEVICE_POLLING
done_locked:
#endif /* DEVICE_POLLING */
SF_UNLOCK(sc); SF_UNLOCK(sc);
} }
@ -1296,10 +1301,10 @@ sf_init_locked(sc)
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */ /* Disable interrupts if we are polling. */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
csr_write_4(sc, SF_IMR, 0x00000000); csr_write_4(sc, SF_IMR, 0x00000000);
else else
#endif /* DEVICE_POLLING */ #endif
/* Enable interrupts. */ /* Enable interrupts. */
csr_write_4(sc, SF_IMR, SF_INTRS); csr_write_4(sc, SF_IMR, SF_INTRS);
@ -1478,10 +1483,6 @@ sf_stop(sc)
callout_stop(&sc->sf_stat_callout); callout_stop(&sc->sf_stat_callout);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
csr_write_4(sc, SF_GEN_ETH_CTL, 0); csr_write_4(sc, SF_GEN_ETH_CTL, 0);
csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_CONSIDX, 0);
csr_write_4(sc, SF_CQ_PRODIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0);

View File

@ -1051,16 +1051,14 @@ vge_attach(dev)
ifp->if_start = vge_start; ifp->if_start = vge_start;
ifp->if_hwassist = VGE_CSUM_FEATURES; ifp->if_hwassist = VGE_CSUM_FEATURES;
ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
#ifdef IFCAP_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif
#endif #endif
ifp->if_watchdog = vge_watchdog; ifp->if_watchdog = vge_watchdog;
ifp->if_init = vge_init; ifp->if_init = vge_init;
ifp->if_baudrate = 1000000000; ifp->if_baudrate = 1000000000;
ifp->if_snd.ifq_maxlen = VGE_IFQ_MAXLEN; ifp->if_snd.ifq_maxlen = VGE_IFQ_MAXLEN;
ifp->if_capenable = ifp->if_capabilities;
TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp); TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp);
@ -1105,6 +1103,11 @@ vge_detach(dev)
KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
ifp = sc->vge_ifp; ifp = sc->vge_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */ /* These should only be active if attach succeeded */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
vge_stop(sc); vge_stop(sc);
@ -1351,12 +1354,12 @@ vge_rxeof(sc)
while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
cur_rx = &sc->vge_ldata.vge_rx_list[i]; cur_rx = &sc->vge_ldata.vge_rx_list[i];
m = sc->vge_ldata.vge_rx_mbuf[i]; m = sc->vge_ldata.vge_rx_mbuf[i];
@ -1617,18 +1620,8 @@ vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
struct vge_softc *sc = ifp->if_softc; struct vge_softc *sc = ifp->if_softc;
VGE_LOCK(sc); VGE_LOCK(sc);
#ifdef IFCAP_POLLING if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
#endif
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
goto done; goto done;
}
sc->rxcycles = count; sc->rxcycles = count;
vge_rxeof(sc); vge_rxeof(sc);
@ -1692,20 +1685,11 @@ vge_intr(arg)
} }
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING) {
goto done; VGE_UNLOCK(sc);
if ( return;
#ifdef IFCAP_POLLING
(ifp->if_capenable & IFCAP_POLLING) &&
#endif
ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
CSR_WRITE_4(sc, VGE_IMR, 0);
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
vge_poll(ifp, 0, 1);
goto done;
} }
#endif
#endif /* DEVICE_POLLING */
/* Disable interrupts */ /* Disable interrupts */
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
@ -1745,9 +1729,6 @@ vge_intr(arg)
/* Re-enable interrupts */ /* Re-enable interrupts */
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
#ifdef DEVICE_POLLING
done:
#endif
VGE_UNLOCK(sc); VGE_UNLOCK(sc);
#if __FreeBSD_version < 502114 #if __FreeBSD_version < 502114
@ -2104,11 +2085,11 @@ vge_init(xsc)
/* /*
* Disable interrupts if we are polling. * Disable interrupts if we are polling.
*/ */
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
CSR_WRITE_4(sc, VGE_IMR, 0); CSR_WRITE_4(sc, VGE_IMR, 0);
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
} else /* otherwise ... */ } else /* otherwise ... */
#endif /* DEVICE_POLLING */ #endif
{ {
/* /*
* Enable interrupts. * Enable interrupts.
@ -2268,23 +2249,42 @@ vge_ioctl(ifp, command, data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
#ifdef IFCAP_POLLING {
ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_POLLING); int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
#else #ifdef DEVICE_POLLING
ifp->if_capenable &= ~(IFCAP_HWCSUM); if (mask & IFCAP_POLLING) {
#endif if (ifr->ifr_reqcap & IFCAP_POLLING) {
ifp->if_capenable |= error = ether_poll_register(vge_poll, ifp);
#ifdef IFCAP_POLLING if (error)
ifr->ifr_reqcap & (IFCAP_HWCSUM | IFCAP_POLLING); return(error);
#else VGE_LOCK(sc);
ifr->ifr_reqcap & (IFCAP_HWCSUM); /* Disable interrupts */
#endif CSR_WRITE_4(sc, VGE_IMR, 0);
if (ifp->if_capenable & IFCAP_TXCSUM) CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
ifp->if_hwassist = VGE_CSUM_FEATURES; ifp->if_capenable |= IFCAP_POLLING;
else VGE_UNLOCK(sc);
ifp->if_hwassist = 0; } else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) error = ether_poll_deregister(ifp);
vge_init(sc); /* Enable interrupts. */
VGE_LOCK(sc);
CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
ifp->if_capenable &= ~IFCAP_POLLING;
VGE_UNLOCK(sc);
}
}
#endif /* DEVICE_POLLING */
if (mask & IFCAP_HWCSUM) {
ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM);
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist = VGE_CSUM_FEATURES;
else
ifp->if_hwassist = 0;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
vge_init(sc);
}
}
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -2331,9 +2331,6 @@ vge_stop(sc)
ifp->if_timer = 0; ifp->if_timer = 0;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);

View File

@ -742,10 +742,10 @@ vr_attach(dev)
IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1); IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1);
ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
IFQ_SET_READY(&ifp->if_snd); IFQ_SET_READY(&ifp->if_snd);
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
ifp->if_capenable = ifp->if_capabilities;
/* Do MII setup. */ /* Do MII setup. */
if (mii_phy_probe(dev, &sc->vr_miibus, if (mii_phy_probe(dev, &sc->vr_miibus,
@ -794,6 +794,11 @@ vr_detach(device_t dev)
KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
VR_LOCK(sc); VR_LOCK(sc);
sc->suspended = 1; sc->suspended = 1;
@ -952,12 +957,12 @@ vr_rxeof(struct vr_softc *sc)
while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
VR_RXSTAT_OWN)) { VR_RXSTAT_OWN)) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
m0 = NULL; m0 = NULL;
cur_rx = sc->vr_cdata.vr_rx_head; cur_rx = sc->vr_cdata.vr_rx_head;
sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
@ -1151,7 +1156,8 @@ vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct vr_softc *sc = ifp->if_softc; struct vr_softc *sc = ifp->if_softc;
VR_LOCK(sc); VR_LOCK(sc);
vr_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
vr_poll_locked(ifp, cmd, count);
VR_UNLOCK(sc); VR_UNLOCK(sc);
} }
@ -1162,17 +1168,6 @@ vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
VR_LOCK_ASSERT(sc); VR_LOCK_ASSERT(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) {
/* Final call, enable interrupts. */
CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
return;
}
sc->rxcycles = count; sc->rxcycles = count;
vr_rxeof(sc); vr_rxeof(sc);
vr_txeof(sc); vr_txeof(sc);
@ -1249,17 +1244,9 @@ vr_intr(void *arg)
} }
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
goto done_locked; goto done_locked;
#endif
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(vr_poll, ifp)) {
/* OK, disable interrupts. */
CSR_WRITE_2(sc, VR_IMR, 0x0000);
vr_poll_locked(ifp, 0, 1);
goto done_locked;
}
#endif /* DEVICE_POLLING */
/* Suppress unwanted interrupts. */ /* Suppress unwanted interrupts. */
if (!(ifp->if_flags & IFF_UP)) { if (!(ifp->if_flags & IFF_UP)) {
@ -1534,10 +1521,10 @@ vr_init_locked(struct vr_softc *sc)
/* /*
* Disable interrupts if we are polling. * Disable interrupts if we are polling.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_2(sc, VR_IMR, 0); CSR_WRITE_2(sc, VR_IMR, 0);
else else
#endif /* DEVICE_POLLING */ #endif
/* /*
* Enable interrupts. * Enable interrupts.
*/ */
@ -1615,7 +1602,31 @@ vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
ifp->if_capenable = ifr->ifr_reqcap; #ifdef DEVICE_POLLING
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(vr_poll, ifp);
if (error)
return(error);
VR_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, VR_IMR, 0x0000);
ifp->if_capenable |= IFCAP_POLLING;
VR_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
VR_LOCK(sc);
CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
ifp->if_capenable &= ~IFCAP_POLLING;
VR_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -1662,9 +1673,6 @@ vr_stop(struct vr_softc *sc)
untimeout(vr_tick, sc, sc->vr_stat_ch); untimeout(vr_tick, sc, sc->vr_stat_ch);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));

View File

@ -160,11 +160,6 @@ SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
extern char *syscallnames[]; extern char *syscallnames[];
#endif #endif
#ifdef DEVICE_POLLING
extern u_int32_t poll_in_trap;
extern int ether_poll(int count);
#endif /* DEVICE_POLLING */
/* /*
* Exception, fault, and trap interface to the FreeBSD kernel. * Exception, fault, and trap interface to the FreeBSD kernel.
* This common code is called from assembly language IDT gate entry * This common code is called from assembly language IDT gate entry
@ -272,11 +267,6 @@ trap(frame)
trap_fatal(&frame, eva); trap_fatal(&frame, eva);
} }
#ifdef DEVICE_POLLING
if (poll_in_trap)
ether_poll(poll_in_trap);
#endif /* DEVICE_POLLING */
if ((ISPL(frame.tf_cs) == SEL_UPL) || if ((ISPL(frame.tf_cs) == SEL_UPL) ||
((frame.tf_eflags & PSL_VM) && ((frame.tf_eflags & PSL_VM) &&
!(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL))) { !(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL))) {

View File

@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h> #include <sys/systm.h>
#include <sys/kernel.h> #include <sys/kernel.h>
#include <sys/socket.h> /* needed by net/if.h */ #include <sys/socket.h> /* needed by net/if.h */
#include <sys/sockio.h>
#include <sys/sysctl.h> #include <sys/sysctl.h>
#include <sys/syslog.h> #include <sys/syslog.h>
@ -44,14 +45,15 @@ __FBSDID("$FreeBSD$");
static void netisr_poll(void); /* the two netisr handlers */ static void netisr_poll(void); /* the two netisr handlers */
static void netisr_pollmore(void); static void netisr_pollmore(void);
static int poll_switch(SYSCTL_HANDLER_ARGS);
void hardclock_device_poll(void); /* hook from hardclock */ void hardclock_device_poll(void); /* hook from hardclock */
void ether_poll(int); /* polling while in trap */ void ether_poll(int); /* polling in idle loop */
/* /*
* Polling support for [network] device drivers. * Polling support for [network] device drivers.
* *
* Drivers which support this feature try to register with the * Drivers which support this feature can register with the
* polling code. * polling code.
* *
* If registration is successful, the driver must disable interrupts, * If registration is successful, the driver must disable interrupts,
@ -64,10 +66,6 @@ void ether_poll(int); /* polling while in trap */
* POLL_AND_CHECK_STATUS: as above, plus check status registers or do * POLL_AND_CHECK_STATUS: as above, plus check status registers or do
* other more expensive operations. This command is issued periodically * other more expensive operations. This command is issued periodically
* but less frequently than POLL_ONLY. * but less frequently than POLL_ONLY.
* POLL_DEREGISTER: deregister and return to interrupt mode.
*
* The first two commands are only issued if the interface is marked as
* 'IFF_UP and IFF_DRV_RUNNING', the last one only if IFF_DRV_RUNNING is set.
* *
* The count limit specifies how much work the handler can do during the * The count limit specifies how much work the handler can do during the
* call -- typically this is the number of packets to be received, or * call -- typically this is the number of packets to be received, or
@ -75,11 +73,9 @@ void ether_poll(int); /* polling while in trap */
* as the max time spent in the function grows roughly linearly with the * as the max time spent in the function grows roughly linearly with the
* count). * count).
* *
* Deregistration can be requested by the driver itself (typically in the * Polling is enabled and disabled via setting IFCAP_POLLING flag on
* *_stop() routine), or by the polling code, by invoking the handler. * the interface. The driver ioctl handler should register interface
* * with polling and disable interrupts, if registration was successful.
* Polling can be globally enabled or disabled with the sysctl variable
* kern.polling.enable (default is 0, disabled)
* *
* A second variable controls the sharing of CPU between polling/kernel * A second variable controls the sharing of CPU between polling/kernel
* network processing, and other activities (typically userlevel tasks): * network processing, and other activities (typically userlevel tasks):
@ -91,7 +87,7 @@ void ether_poll(int); /* polling while in trap */
* The following constraints hold * The following constraints hold
* *
* 1 <= poll_each_burst <= poll_burst <= poll_burst_max * 1 <= poll_each_burst <= poll_burst <= poll_burst_max
* 0 <= poll_in_trap <= poll_each_burst * 0 <= poll_each_burst
* MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
*/ */
@ -117,10 +113,6 @@ static u_int32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */
SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW, SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
&poll_in_idle_loop, 0, "Enable device polling in idle loop"); &poll_in_idle_loop, 0, "Enable device polling in idle loop");
u_int32_t poll_in_trap; /* used in trap.c */
SYSCTL_UINT(_kern_polling, OID_AUTO, poll_in_trap, CTLFLAG_RW,
&poll_in_trap, 0, "Poll burst size during a trap");
static u_int32_t user_frac = 50; static u_int32_t user_frac = 50;
SYSCTL_UINT(_kern_polling, OID_AUTO, user_frac, CTLFLAG_RW, SYSCTL_UINT(_kern_polling, OID_AUTO, user_frac, CTLFLAG_RW,
&user_frac, 0, "Desired user fraction of cpu time"); &user_frac, 0, "Desired user fraction of cpu time");
@ -149,9 +141,9 @@ static u_int32_t poll_handlers; /* next free entry in pr[]. */
SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD, SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
&poll_handlers, 0, "Number of registered poll handlers"); &poll_handlers, 0, "Number of registered poll handlers");
static int polling = 0; /* global polling enable */ static int polling = 0;
SYSCTL_UINT(_kern_polling, OID_AUTO, enable, CTLFLAG_RW, SYSCTL_PROC(_kern_polling, OID_AUTO, enable, CTLTYPE_UINT | CTLFLAG_RW,
&polling, 0, "Polling enabled"); 0, sizeof(int), poll_switch, "I", "Switch polling for all interfaces");
static u_int32_t phase; static u_int32_t phase;
SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RW, SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RW,
@ -174,23 +166,9 @@ SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
struct pollrec { struct pollrec {
poll_handler_t *handler; poll_handler_t *handler;
struct ifnet *ifp; struct ifnet *ifp;
/*
* Flags of polling record (protected by poll_mtx).
* PRF_RUNNING means that the handler is now executing.
* PRF_LEAVING means that the handler is now deregistering.
*/
#define PRF_RUNNING 0x1
#define PRF_LEAVING 0x2
uint32_t flags;
}; };
static struct pollrec pr[POLL_LIST_LEN]; static struct pollrec pr[POLL_LIST_LEN];
#define PR_VALID(i) (pr[(i)].handler != NULL && \
!(pr[(i)].flags & (PRF_RUNNING|PRF_LEAVING)) && \
(pr[(i)].ifp->if_drv_flags & IFF_DRV_RUNNING) &&\
(pr[(i)].ifp->if_flags & IFF_UP))
static struct mtx poll_mtx; static struct mtx poll_mtx;
static void static void
@ -258,30 +236,24 @@ hardclock_device_poll(void)
} }
/* /*
* ether_poll is called from the idle loop or from the trap handler. * ether_poll is called from the idle loop.
*/ */
void void
ether_poll(int count) ether_poll(int count)
{ {
int i; int i;
NET_LOCK_GIANT();
mtx_lock(&poll_mtx); mtx_lock(&poll_mtx);
if (count > poll_each_burst) if (count > poll_each_burst)
count = poll_each_burst; count = poll_each_burst;
for (i = 0 ; i < poll_handlers ; i++) { for (i = 0 ; i < poll_handlers ; i++)
if (PR_VALID(i)) { pr[i].handler(pr[i].ifp, POLL_ONLY, count);
pr[i].flags |= PRF_RUNNING;
mtx_unlock(&poll_mtx);
NET_LOCK_GIANT();
pr[i].handler(pr[i].ifp, POLL_ONLY, count);
NET_UNLOCK_GIANT();
mtx_lock(&poll_mtx);
pr[i].flags &= ~PRF_RUNNING;
}
}
mtx_unlock(&poll_mtx); mtx_unlock(&poll_mtx);
NET_UNLOCK_GIANT();
} }
/* /*
@ -403,60 +375,29 @@ netisr_poll(void)
residual_burst : poll_each_burst; residual_burst : poll_each_burst;
residual_burst -= cycles; residual_burst -= cycles;
if (polling) { for (i = 0 ; i < poll_handlers ; i++)
for (i = 0 ; i < poll_handlers ; i++) { pr[i].handler(pr[i].ifp, arg, cycles);
if (PR_VALID(i)) {
pr[i].flags |= PRF_RUNNING;
mtx_unlock(&poll_mtx);
pr[i].handler(pr[i].ifp, arg, cycles);
mtx_lock(&poll_mtx);
pr[i].flags &= ~PRF_RUNNING;
}
}
} else { /* unregister */
for (i = 0 ; i < poll_handlers ; i++) {
if (pr[i].handler != NULL &&
pr[i].ifp->if_drv_flags & IFF_DRV_RUNNING) {
pr[i].ifp->if_flags &= ~IFF_POLLING;
pr[i].flags |= PRF_LEAVING;
mtx_unlock(&poll_mtx);
pr[i].handler(pr[i].ifp, POLL_DEREGISTER, 1);
mtx_lock(&poll_mtx);
pr[i].flags &= ~PRF_LEAVING;
}
pr[i].handler = NULL;
}
residual_burst = 0;
poll_handlers = 0;
}
phase = 4; phase = 4;
mtx_unlock(&poll_mtx); mtx_unlock(&poll_mtx);
} }
/* /*
* Try to register routine for polling. Returns 1 if successful * Try to register routine for polling. Returns 0 if successful
* (and polling should be enabled), 0 otherwise. * (and polling should be enabled), error code otherwise.
* A device is not supposed to register itself multiple times. * A device is not supposed to register itself multiple times.
* *
* This is called from within the *_intr() functions, so we do not need * This is called from within the *_ioctl() functions.
* further ifnet locking.
*/ */
int int
ether_poll_register(poll_handler_t *h, struct ifnet *ifp) ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
{ {
int i; int i;
NET_ASSERT_GIANT(); KASSERT(h != NULL, ("%s: handler is NULL", __func__));
KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
if (polling == 0) /* polling disabled, cannot register */ NET_ASSERT_GIANT();
return 0;
if (h == NULL || ifp == NULL) /* bad arguments */
return 0;
if ( !(ifp->if_flags & IFF_UP) ) /* must be up */
return 0;
if (ifp->if_flags & IFF_POLLING) /* already polling */
return 0;
mtx_lock(&poll_mtx); mtx_lock(&poll_mtx);
if (poll_handlers >= POLL_LIST_LEN) { if (poll_handlers >= POLL_LIST_LEN) {
@ -474,7 +415,7 @@ ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
verbose--; verbose--;
} }
mtx_unlock(&poll_mtx); mtx_unlock(&poll_mtx);
return 0; /* no polling for you */ return (ENOMEM); /* no polling for you */
} }
for (i = 0 ; i < poll_handlers ; i++) for (i = 0 ; i < poll_handlers ; i++)
@ -482,45 +423,39 @@ ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
mtx_unlock(&poll_mtx); mtx_unlock(&poll_mtx);
log(LOG_DEBUG, "ether_poll_register: %s: handler" log(LOG_DEBUG, "ether_poll_register: %s: handler"
" already registered\n", ifp->if_xname); " already registered\n", ifp->if_xname);
return (0); return (EEXIST);
} }
pr[poll_handlers].handler = h; pr[poll_handlers].handler = h;
pr[poll_handlers].ifp = ifp; pr[poll_handlers].ifp = ifp;
poll_handlers++; poll_handlers++;
ifp->if_flags |= IFF_POLLING;
mtx_unlock(&poll_mtx); mtx_unlock(&poll_mtx);
if (idlepoll_sleeping) if (idlepoll_sleeping)
wakeup(&idlepoll_sleeping); wakeup(&idlepoll_sleeping);
return 1; /* polling enabled in next call */ return (0);
} }
/* /*
* Remove interface from the polling list. Normally called by *_stop(). * Remove interface from the polling list. Called from *_ioctl(), too.
* It is not an error to call it with IFF_POLLING clear, the call is
* sufficiently rare to be preferable to save the space for the extra
* test in each driver in exchange of one additional function call.
*/ */
int int
ether_poll_deregister(struct ifnet *ifp) ether_poll_deregister(struct ifnet *ifp)
{ {
int i; int i;
NET_ASSERT_GIANT(); KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
if ( !ifp || !(ifp->if_flags & IFF_POLLING) ) { NET_ASSERT_GIANT();
return 0;
}
mtx_lock(&poll_mtx); mtx_lock(&poll_mtx);
for (i = 0 ; i < poll_handlers ; i++) for (i = 0 ; i < poll_handlers ; i++)
if (pr[i].ifp == ifp) /* found it */ if (pr[i].ifp == ifp) /* found it */
break; break;
ifp->if_flags &= ~IFF_POLLING; /* found or not... */
if (i == poll_handlers) { if (i == poll_handlers) {
mtx_unlock(&poll_mtx);
log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n", log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
ifp->if_xname); ifp->if_xname);
return (0); mtx_unlock(&poll_mtx);
return (ENOENT);
} }
poll_handlers--; poll_handlers--;
if (i < poll_handlers) { /* Last entry replaces this one. */ if (i < poll_handlers) { /* Last entry replaces this one. */
@ -528,7 +463,60 @@ ether_poll_deregister(struct ifnet *ifp)
pr[i].ifp = pr[poll_handlers].ifp; pr[i].ifp = pr[poll_handlers].ifp;
} }
mtx_unlock(&poll_mtx); mtx_unlock(&poll_mtx);
return (1); return (0);
}
/*
* Legacy interface for turning polling on all interfaces at one time.
*/
static int
poll_switch(SYSCTL_HANDLER_ARGS)
{
struct ifnet *ifp;
int error;
int val;
mtx_lock(&poll_mtx);
val = polling;
mtx_unlock(&poll_mtx);
error = sysctl_handle_int(oidp, &val, sizeof(int), req);
if (error || !req->newptr )
return (error);
if (val == polling)
return (0);
if (val < 0 || val > 1)
return (EINVAL);
mtx_lock(&poll_mtx);
polling = val;
mtx_unlock(&poll_mtx);
NET_LOCK_GIANT();
IFNET_RLOCK();
TAILQ_FOREACH(ifp, &ifnet, if_link) {
if (ifp->if_capabilities & IFCAP_POLLING) {
struct ifreq ifr;
if (val == 1)
ifr.ifr_reqcap =
ifp->if_capenable | IFCAP_POLLING;
else
ifr.ifr_reqcap =
ifp->if_capenable & ~IFCAP_POLLING;
IFF_LOCKGIANT(ifp); /* LOR here */
(void) (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
IFF_UNLOCKGIANT(ifp);
}
}
IFNET_RUNLOCK();
NET_UNLOCK_GIANT();
log(LOG_ERR, "kern.polling.enable is deprecated. Use ifconfig(8)");
return (0);
} }
static void static void

View File

@ -148,7 +148,7 @@ struct if_data {
#define IFF_LINK2 0x4000 /* per link layer defined bit */ #define IFF_LINK2 0x4000 /* per link layer defined bit */
#define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */ #define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */
#define IFF_MULTICAST 0x8000 /* (i) supports multicast */ #define IFF_MULTICAST 0x8000 /* (i) supports multicast */
#define IFF_POLLING 0x10000 /* (n) Interface is in polling mode. */ /* 0x10000 */
#define IFF_PPROMISC 0x20000 /* (n) user-requested promisc mode */ #define IFF_PPROMISC 0x20000 /* (n) user-requested promisc mode */
#define IFF_MONITOR 0x40000 /* (n) user-requested monitor mode */ #define IFF_MONITOR 0x40000 /* (n) user-requested monitor mode */
#define IFF_STATICARP 0x80000 /* (n) static ARP */ #define IFF_STATICARP 0x80000 /* (n) static ARP */
@ -166,8 +166,7 @@ struct if_data {
/* flags set internally only: */ /* flags set internally only: */
#define IFF_CANTCHANGE \ #define IFF_CANTCHANGE \
(IFF_BROADCAST|IFF_POINTOPOINT|IFF_DRV_RUNNING|IFF_DRV_OACTIVE|\ (IFF_BROADCAST|IFF_POINTOPOINT|IFF_DRV_RUNNING|IFF_DRV_OACTIVE|\
IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI|IFF_SMART|IFF_PROMISC|\ IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI|IFF_SMART|IFF_PROMISC)
IFF_POLLING)
/* /*
* Values for if_link_state. * Values for if_link_state.

View File

@ -660,7 +660,7 @@ void if_deregister_com_alloc(u_char type);
LLADDR((struct sockaddr_dl *) ifaddr_byindex((ifp)->if_index)->ifa_addr) LLADDR((struct sockaddr_dl *) ifaddr_byindex((ifp)->if_index)->ifa_addr)
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS, POLL_DEREGISTER }; enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS };
typedef void poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count); typedef void poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count);
int ether_poll_register(poll_handler_t *h, struct ifnet *ifp); int ether_poll_register(poll_handler_t *h, struct ifnet *ifp);

View File

@ -2265,10 +2265,10 @@ dc_attach(device_t dev)
*/ */
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
ifp->if_capenable = ifp->if_capabilities;
callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0); callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0);
@ -2339,6 +2339,11 @@ dc_detach(device_t dev)
ifp = sc->dc_ifp; ifp = sc->dc_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */ /* These should only be active if attach succeeded */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
DC_LOCK(sc); DC_LOCK(sc);
@ -2704,7 +2709,7 @@ dc_rxeof(struct dc_softc *sc)
while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) & while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) &
DC_RXSTAT_OWN)) { DC_RXSTAT_OWN)) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
@ -3038,16 +3043,13 @@ dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{ {
struct dc_softc *sc = ifp->if_softc; struct dc_softc *sc = ifp->if_softc;
if (!(ifp->if_capenable & IFCAP_POLLING)) { DC_LOCK(sc);
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
} DC_UNLOCK(sc);
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
/* Re-enable interrupts. */
CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
return; return;
} }
DC_LOCK(sc);
sc->rxcycles = count; sc->rxcycles = count;
dc_rxeof(sc); dc_rxeof(sc);
dc_txeof(sc); dc_txeof(sc);
@ -3111,12 +3113,9 @@ dc_intr(void *arg)
DC_LOCK(sc); DC_LOCK(sc);
ifp = sc->dc_ifp; ifp = sc->dc_ifp;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING) {
goto done; DC_UNLOCK(sc);
if ((ifp->if_capenable & IFCAP_POLLING) && return;
ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
goto done;
} }
#endif #endif
@ -3183,10 +3182,6 @@ dc_intr(void *arg)
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
dc_start_locked(ifp); dc_start_locked(ifp);
#ifdef DEVICE_POLLING
done:
#endif
DC_UNLOCK(sc); DC_UNLOCK(sc);
} }
@ -3534,7 +3529,7 @@ dc_init_locked(struct dc_softc *sc)
* the case of polling. Some cards (e.g. fxp) turn interrupts on * the case of polling. Some cards (e.g. fxp) turn interrupts on
* after a reset. * after a reset.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_IMR, 0x00000000);
else else
#endif #endif
@ -3686,10 +3681,31 @@ dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
#endif #endif
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
DC_LOCK(sc); #ifdef DEVICE_POLLING
ifp->if_capenable &= ~IFCAP_POLLING; if (ifr->ifr_reqcap & IFCAP_POLLING &&
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; !(ifp->if_capenable & IFCAP_POLLING)) {
DC_UNLOCK(sc); error = ether_poll_register(dc_poll, ifp);
if (error)
return(error);
DC_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
ifp->if_capenable |= IFCAP_POLLING;
DC_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
DC_LOCK(sc);
CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
ifp->if_capenable &= ~IFCAP_POLLING;
DC_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -3744,9 +3760,6 @@ dc_stop(struct dc_softc *sc)
callout_stop(&sc->dc_stat_ch); callout_stop(&sc->dc_stat_ch);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif
DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON));
CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_IMR, 0x00000000);

View File

@ -195,10 +195,8 @@ static int rl_miibus_readreg(device_t, int, int);
static void rl_miibus_statchg(device_t); static void rl_miibus_statchg(device_t);
static int rl_miibus_writereg(device_t, int, int, int); static int rl_miibus_writereg(device_t, int, int, int);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
static void rl_poll(struct ifnet *ifp, enum poll_cmd cmd, static void rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
int count); static void rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
static void rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd,
int count);
#endif #endif
static int rl_probe(device_t); static int rl_probe(device_t);
static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int); static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
@ -956,10 +954,10 @@ rl_attach(device_t dev)
ifp->if_init = rl_init; ifp->if_init = rl_init;
ifp->if_baudrate = 10000000; ifp->if_baudrate = 10000000;
ifp->if_capabilities = IFCAP_VLAN_MTU; ifp->if_capabilities = IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
ifp->if_capenable = ifp->if_capabilities;
IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
IFQ_SET_READY(&ifp->if_snd); IFQ_SET_READY(&ifp->if_snd);
@ -1002,6 +1000,10 @@ rl_detach(device_t dev)
KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */ /* These should only be active if attach succeeded */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
RL_LOCK(sc); RL_LOCK(sc);
@ -1115,12 +1117,12 @@ rl_rxeof(struct rl_softc *sc)
while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
rxstat = le32toh(*(uint32_t *)rxbufpos); rxstat = le32toh(*(uint32_t *)rxbufpos);
@ -1283,7 +1285,8 @@ rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct rl_softc *sc = ifp->if_softc; struct rl_softc *sc = ifp->if_softc;
RL_LOCK(sc); RL_LOCK(sc);
rl_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
rl_poll_locked(ifp, cmd, count);
RL_UNLOCK(sc); RL_UNLOCK(sc);
} }
@ -1294,17 +1297,6 @@ rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
RL_LOCK_ASSERT(sc); RL_LOCK_ASSERT(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) {
/* Final call; enable interrupts. */
CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
return;
}
sc->rxcycles = count; sc->rxcycles = count;
rl_rxeof(sc); rl_rxeof(sc);
rl_txeof(sc); rl_txeof(sc);
@ -1345,17 +1337,9 @@ rl_intr(void *arg)
goto done_locked; goto done_locked;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
goto done_locked; goto done_locked;
#endif
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(rl_poll, ifp)) {
/* Disable interrupts. */
CSR_WRITE_2(sc, RL_IMR, 0x0000);
rl_poll_locked(ifp, 0, 1);
goto done_locked;
}
#endif /* DEVICE_POLLING */
for (;;) { for (;;) {
status = CSR_READ_2(sc, RL_ISR); status = CSR_READ_2(sc, RL_ISR);
@ -1574,10 +1558,10 @@ rl_init_locked(struct rl_softc *sc)
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */ /* Disable interrupts if we are polling. */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_2(sc, RL_IMR, 0); CSR_WRITE_2(sc, RL_IMR, 0);
else else
#endif /* DEVICE_POLLING */ #endif
/* Enable interrupts. */ /* Enable interrupts. */
CSR_WRITE_2(sc, RL_IMR, RL_INTRS); CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
@ -1669,8 +1653,31 @@ rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
ifp->if_capenable &= ~IFCAP_POLLING; #ifdef DEVICE_POLLING
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(rl_poll, ifp);
if (error)
return(error);
RL_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, RL_IMR, 0x0000);
ifp->if_capenable |= IFCAP_POLLING;
RL_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
RL_LOCK(sc);
CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
ifp->if_capenable &= ~IFCAP_POLLING;
RL_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -1712,9 +1719,6 @@ rl_stop(struct rl_softc *sc)
ifp->if_timer = 0; ifp->if_timer = 0;
callout_stop(&sc->rl_stat_callout); callout_stop(&sc->rl_stat_callout);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
CSR_WRITE_1(sc, RL_COMMAND, 0x00); CSR_WRITE_1(sc, RL_COMMAND, 0x00);
CSR_WRITE_2(sc, RL_IMR, 0x0000); CSR_WRITE_2(sc, RL_IMR, 0x0000);

View File

@ -165,11 +165,9 @@ static int sf_miibus_readreg(device_t, int, int);
static int sf_miibus_writereg(device_t, int, int, int); static int sf_miibus_writereg(device_t, int, int, int);
static void sf_miibus_statchg(device_t); static void sf_miibus_statchg(device_t);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
int count); static void sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
static void sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, #endif
int count);
#endif /* DEVICE_POLLING */
static u_int32_t csr_read_4(struct sf_softc *, int); static u_int32_t csr_read_4(struct sf_softc *, int);
static void csr_write_4(struct sf_softc *, int, u_int32_t); static void csr_write_4(struct sf_softc *, int, u_int32_t);
@ -560,10 +558,31 @@ sf_ioctl(ifp, command, data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
SF_LOCK(sc); #ifdef DEVICE_POLLING
ifp->if_capenable &= ~IFCAP_POLLING; if (ifr->ifr_reqcap & IFCAP_POLLING &&
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; !(ifp->if_capenable & IFCAP_POLLING)) {
SF_UNLOCK(sc); error = ether_poll_register(sf_poll, ifp);
if (error)
return(error);
SF_LOCK(sc);
/* Disable interrupts */
csr_write_4(sc, SF_IMR, 0x00000000);
ifp->if_capenable |= IFCAP_POLLING;
SF_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
SF_LOCK(sc);
csr_write_4(sc, SF_IMR, SF_INTRS);
ifp->if_capenable &= ~IFCAP_POLLING;
SF_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -749,10 +768,10 @@ sf_attach(dev)
IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
IFQ_SET_READY(&ifp->if_snd); IFQ_SET_READY(&ifp->if_snd);
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif /* DEVICE_POLLING */ #endif
ifp->if_capenable = ifp->if_capabilities;
/* /*
* Call MI attach routine. * Call MI attach routine.
@ -794,6 +813,11 @@ sf_detach(dev)
KASSERT(mtx_initialized(&sc->sf_mtx), ("sf mutex not initialized")); KASSERT(mtx_initialized(&sc->sf_mtx), ("sf mutex not initialized"));
ifp = sc->sf_ifp; ifp = sc->sf_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */ /* These should only be active if attach succeeded */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
SF_LOCK(sc); SF_LOCK(sc);
@ -946,12 +970,12 @@ sf_rxeof(sc)
struct mbuf *m0; struct mbuf *m0;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx]; cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx];
desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx]; desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx];
@ -1068,7 +1092,8 @@ sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct sf_softc *sc = ifp->if_softc; struct sf_softc *sc = ifp->if_softc;
SF_LOCK(sc); SF_LOCK(sc);
sf_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
sf_poll_locked(ifp, cmd, count);
SF_UNLOCK(sc); SF_UNLOCK(sc);
} }
@ -1079,17 +1104,6 @@ sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
SF_LOCK_ASSERT(sc); SF_LOCK_ASSERT(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) {
/* Final call, enable interrupts. */
csr_write_4(sc, SF_IMR, SF_INTRS);
return;
}
sc->rxcycles = count; sc->rxcycles = count;
sf_rxeof(sc); sf_rxeof(sc);
sf_txeof(sc); sf_txeof(sc);
@ -1131,17 +1145,11 @@ sf_intr(arg)
ifp = sc->sf_ifp; ifp = sc->sf_ifp;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING) {
goto done_locked; SF_UNLOCK(sc);
return;
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(sf_poll, ifp)) {
/* OK, disable interrupts. */
csr_write_4(sc, SF_IMR, 0x00000000);
sf_poll_locked(ifp, 0, 1);
goto done_locked;
} }
#endif /* DEVICE_POLLING */ #endif
if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) { if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) {
SF_UNLOCK(sc); SF_UNLOCK(sc);
@ -1185,9 +1193,6 @@ sf_intr(arg)
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
sf_start_locked(ifp); sf_start_locked(ifp);
#ifdef DEVICE_POLLING
done_locked:
#endif /* DEVICE_POLLING */
SF_UNLOCK(sc); SF_UNLOCK(sc);
} }
@ -1296,10 +1301,10 @@ sf_init_locked(sc)
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */ /* Disable interrupts if we are polling. */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
csr_write_4(sc, SF_IMR, 0x00000000); csr_write_4(sc, SF_IMR, 0x00000000);
else else
#endif /* DEVICE_POLLING */ #endif
/* Enable interrupts. */ /* Enable interrupts. */
csr_write_4(sc, SF_IMR, SF_INTRS); csr_write_4(sc, SF_IMR, SF_INTRS);
@ -1478,10 +1483,6 @@ sf_stop(sc)
callout_stop(&sc->sf_stat_callout); callout_stop(&sc->sf_stat_callout);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
csr_write_4(sc, SF_GEN_ETH_CTL, 0); csr_write_4(sc, SF_GEN_ETH_CTL, 0);
csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_CONSIDX, 0);
csr_write_4(sc, SF_CQ_PRODIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0);

View File

@ -1217,11 +1217,10 @@ sis_attach(device_t dev)
*/ */
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
ifp->if_capenable = ifp->if_capabilities;
/* Hook interrupt last to avoid having to lock softc */ /* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE, error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE,
@ -1257,6 +1256,11 @@ sis_detach(device_t dev)
KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized")); KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized"));
ifp = sc->sis_ifp; ifp = sc->sis_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded. */ /* These should only be active if attach succeeded. */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
SIS_LOCK(sc); SIS_LOCK(sc);
@ -1404,12 +1408,12 @@ sis_rxeof(struct sis_softc *sc)
cur_rx = cur_rx->sis_nextdesc) { cur_rx = cur_rx->sis_nextdesc) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
rxstat = cur_rx->sis_rxstat; rxstat = cur_rx->sis_rxstat;
bus_dmamap_sync(sc->sis_tag, bus_dmamap_sync(sc->sis_tag,
cur_rx->sis_map, BUS_DMASYNC_POSTWRITE); cur_rx->sis_map, BUS_DMASYNC_POSTWRITE);
@ -1574,13 +1578,9 @@ sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct sis_softc *sc = ifp->if_softc; struct sis_softc *sc = ifp->if_softc;
SIS_LOCK(sc); SIS_LOCK(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ether_poll_deregister(ifp); SIS_UNLOCK(sc);
cmd = POLL_DEREGISTER; return;
}
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
CSR_WRITE_4(sc, SIS_IER, 1);
goto done;
} }
/* /*
@ -1613,7 +1613,7 @@ sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
sis_initl(sc); sis_initl(sc);
} }
} }
done:
SIS_UNLOCK(sc); SIS_UNLOCK(sc);
} }
#endif /* DEVICE_POLLING */ #endif /* DEVICE_POLLING */
@ -1633,14 +1633,11 @@ sis_intr(void *arg)
SIS_LOCK(sc); SIS_LOCK(sc);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING) {
goto done; SIS_UNLOCK(sc);
if ((ifp->if_capenable & IFCAP_POLLING) && return;
ether_poll_register(sis_poll, ifp)) { /* ok, disable interrupts */
CSR_WRITE_4(sc, SIS_IER, 0);
goto done;
} }
#endif /* DEVICE_POLLING */ #endif
/* Disable interrupts. */ /* Disable interrupts. */
CSR_WRITE_4(sc, SIS_IER, 0); CSR_WRITE_4(sc, SIS_IER, 0);
@ -1679,9 +1676,6 @@ sis_intr(void *arg)
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
sis_startl(ifp); sis_startl(ifp);
#ifdef DEVICE_POLLING
done:
#endif /* DEVICE_POLLING */
SIS_UNLOCK(sc); SIS_UNLOCK(sc);
} }
@ -2033,10 +2027,10 @@ sis_initl(struct sis_softc *sc)
* ... only enable interrupts if we are not polling, make sure * ... only enable interrupts if we are not polling, make sure
* they are off otherwise. * they are off otherwise.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_4(sc, SIS_IER, 0); CSR_WRITE_4(sc, SIS_IER, 0);
else else
#endif /* DEVICE_POLLING */ #endif
CSR_WRITE_4(sc, SIS_IER, 1); CSR_WRITE_4(sc, SIS_IER, 1);
/* Enable receiver and transmitter. */ /* Enable receiver and transmitter. */
@ -2133,10 +2127,32 @@ sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
SIS_LOCK(sc); /* ok, disable interrupts */
ifp->if_capenable &= ~IFCAP_POLLING; #ifdef DEVICE_POLLING
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; if (ifr->ifr_reqcap & IFCAP_POLLING &&
SIS_UNLOCK(sc); !(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(sis_poll, ifp);
if (error)
return(error);
SIS_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_4(sc, SIS_IER, 0);
ifp->if_capenable |= IFCAP_POLLING;
SIS_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
SIS_LOCK(sc);
CSR_WRITE_4(sc, SIS_IER, 1);
ifp->if_capenable &= ~IFCAP_POLLING;
SIS_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -2192,9 +2208,6 @@ sis_stop(struct sis_softc *sc)
callout_stop(&sc->sis_stat_ch); callout_stop(&sc->sis_stat_ch);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif
CSR_WRITE_4(sc, SIS_IER, 0); CSR_WRITE_4(sc, SIS_IER, 0);
CSR_WRITE_4(sc, SIS_IMR, 0); CSR_WRITE_4(sc, SIS_IMR, 0);
CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */

View File

@ -621,7 +621,8 @@ ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct ste_softc *sc = ifp->if_softc; struct ste_softc *sc = ifp->if_softc;
STE_LOCK(sc); STE_LOCK(sc);
ste_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ste_poll_locked(ifp, cmd, count);
STE_UNLOCK(sc); STE_UNLOCK(sc);
} }
@ -631,14 +632,6 @@ ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct ste_softc *sc = ifp->if_softc; struct ste_softc *sc = ifp->if_softc;
STE_LOCK_ASSERT(sc); STE_LOCK_ASSERT(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
return;
}
sc->rxcycles = count; sc->rxcycles = count;
if (cmd == POLL_AND_CHECK_STATUS) if (cmd == POLL_AND_CHECK_STATUS)
@ -685,15 +678,11 @@ ste_intr(xsc)
ifp = sc->ste_ifp; ifp = sc->ste_ifp;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING) {
goto done; STE_UNLOCK(sc);
if ((ifp->if_capenable & IFCAP_POLLING) && return;
ether_poll_register(ste_poll, ifp)) { /* ok, disable interrupts */
CSR_WRITE_2(sc, STE_IMR, 0);
ste_poll_locked(ifp, 0, 1);
goto done;
} }
#endif /* DEVICE_POLLING */ #endif
/* See if this is really our interrupt. */ /* See if this is really our interrupt. */
if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) { if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) {
@ -739,9 +728,6 @@ ste_intr(xsc)
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
ste_start_locked(ifp); ste_start_locked(ifp);
#ifdef DEVICE_POLLING
done:
#endif /* DEVICE_POLLING */
STE_UNLOCK(sc); STE_UNLOCK(sc);
return; return;
@ -791,12 +777,12 @@ ste_rxeof(sc)
while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status) while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
& STE_RXSTAT_DMADONE) { & STE_RXSTAT_DMADONE) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
if ((STE_RX_LIST_CNT - count) < 3) { if ((STE_RX_LIST_CNT - count) < 3) {
break; break;
} }
@ -1115,10 +1101,10 @@ ste_attach(dev)
*/ */
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
ifp->if_capenable = ifp->if_capabilities;
/* Hook interrupt last to avoid having to lock softc */ /* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE, error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE,
@ -1155,6 +1141,11 @@ ste_detach(dev)
KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized"));
ifp = sc->ste_ifp; ifp = sc->ste_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */ /* These should only be active if attach succeeded */
if (device_is_attached(dev)) { if (device_is_attached(dev)) {
STE_LOCK(sc); STE_LOCK(sc);
@ -1386,10 +1377,10 @@ ste_init_locked(sc)
CSR_WRITE_2(sc, STE_ISR, 0xFFFF); CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */ /* Disable interrupts if we are polling. */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_2(sc, STE_IMR, 0); CSR_WRITE_2(sc, STE_IMR, 0);
else else
#endif /* DEVICE_POLLING */ #endif
/* Enable interrupts. */ /* Enable interrupts. */
CSR_WRITE_2(sc, STE_IMR, STE_INTRS); CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
@ -1418,9 +1409,6 @@ ste_stop(sc)
callout_stop(&sc->ste_stat_callout); callout_stop(&sc->ste_stat_callout);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
CSR_WRITE_2(sc, STE_IMR, 0); CSR_WRITE_2(sc, STE_IMR, 0);
STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
@ -1539,10 +1527,31 @@ ste_ioctl(ifp, command, data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
STE_LOCK(sc); #ifdef DEVICE_POLLING
ifp->if_capenable &= ~IFCAP_POLLING; if (ifr->ifr_reqcap & IFCAP_POLLING &&
ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; !(ifp->if_capenable & IFCAP_POLLING)) {
STE_UNLOCK(sc); error = ether_poll_register(ste_poll, ifp);
if (error)
return(error);
STE_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, STE_IMR, 0);
ifp->if_capenable |= IFCAP_POLLING;
STE_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
STE_LOCK(sc);
CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
ifp->if_capenable &= ~IFCAP_POLLING;
STE_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);

View File

@ -742,10 +742,10 @@ vr_attach(dev)
IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1); IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1);
ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
IFQ_SET_READY(&ifp->if_snd); IFQ_SET_READY(&ifp->if_snd);
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif #endif
ifp->if_capenable = ifp->if_capabilities;
/* Do MII setup. */ /* Do MII setup. */
if (mii_phy_probe(dev, &sc->vr_miibus, if (mii_phy_probe(dev, &sc->vr_miibus,
@ -794,6 +794,11 @@ vr_detach(device_t dev)
KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
VR_LOCK(sc); VR_LOCK(sc);
sc->suspended = 1; sc->suspended = 1;
@ -952,12 +957,12 @@ vr_rxeof(struct vr_softc *sc)
while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
VR_RXSTAT_OWN)) { VR_RXSTAT_OWN)) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
m0 = NULL; m0 = NULL;
cur_rx = sc->vr_cdata.vr_rx_head; cur_rx = sc->vr_cdata.vr_rx_head;
sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
@ -1151,7 +1156,8 @@ vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct vr_softc *sc = ifp->if_softc; struct vr_softc *sc = ifp->if_softc;
VR_LOCK(sc); VR_LOCK(sc);
vr_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
vr_poll_locked(ifp, cmd, count);
VR_UNLOCK(sc); VR_UNLOCK(sc);
} }
@ -1162,17 +1168,6 @@ vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
VR_LOCK_ASSERT(sc); VR_LOCK_ASSERT(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) {
/* Final call, enable interrupts. */
CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
return;
}
sc->rxcycles = count; sc->rxcycles = count;
vr_rxeof(sc); vr_rxeof(sc);
vr_txeof(sc); vr_txeof(sc);
@ -1249,17 +1244,9 @@ vr_intr(void *arg)
} }
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
goto done_locked; goto done_locked;
#endif
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(vr_poll, ifp)) {
/* OK, disable interrupts. */
CSR_WRITE_2(sc, VR_IMR, 0x0000);
vr_poll_locked(ifp, 0, 1);
goto done_locked;
}
#endif /* DEVICE_POLLING */
/* Suppress unwanted interrupts. */ /* Suppress unwanted interrupts. */
if (!(ifp->if_flags & IFF_UP)) { if (!(ifp->if_flags & IFF_UP)) {
@ -1534,10 +1521,10 @@ vr_init_locked(struct vr_softc *sc)
/* /*
* Disable interrupts if we are polling. * Disable interrupts if we are polling.
*/ */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_2(sc, VR_IMR, 0); CSR_WRITE_2(sc, VR_IMR, 0);
else else
#endif /* DEVICE_POLLING */ #endif
/* /*
* Enable interrupts. * Enable interrupts.
*/ */
@ -1615,7 +1602,31 @@ vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
ifp->if_capenable = ifr->ifr_reqcap; #ifdef DEVICE_POLLING
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(vr_poll, ifp);
if (error)
return(error);
VR_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, VR_IMR, 0x0000);
ifp->if_capenable |= IFCAP_POLLING;
VR_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
VR_LOCK(sc);
CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
ifp->if_capenable &= ~IFCAP_POLLING;
VR_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break; break;
default: default:
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
@ -1662,9 +1673,6 @@ vr_stop(struct vr_softc *sc)
untimeout(vr_tick, sc, sc->vr_stat_ch); untimeout(vr_tick, sc, sc->vr_stat_ch);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));

View File

@ -249,7 +249,7 @@ static int xl_resume(device_t);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
#endif /* DEVICE_POLLING */ #endif
static int xl_ifmedia_upd(struct ifnet *); static int xl_ifmedia_upd(struct ifnet *);
static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
@ -1487,9 +1487,10 @@ xl_attach(device_t dev)
ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capabilities |= IFCAP_HWCSUM;
#endif #endif
} }
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capabilities |= IFCAP_POLLING;
#endif /* DEVICE_POLLING */ #endif
ifp->if_start = xl_start; ifp->if_start = xl_start;
ifp->if_watchdog = xl_watchdog; ifp->if_watchdog = xl_watchdog;
ifp->if_init = xl_init; ifp->if_init = xl_init;
@ -1497,7 +1498,6 @@ xl_attach(device_t dev)
IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1); IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1; ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
IFQ_SET_READY(&ifp->if_snd); IFQ_SET_READY(&ifp->if_snd);
ifp->if_capenable = ifp->if_capabilities;
/* /*
* Now we have to see what sort of media we have. * Now we have to see what sort of media we have.
@ -1690,6 +1690,11 @@ xl_detach(device_t dev)
KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized")); KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
if (sc->xl_flags & XL_FLAG_USE_MMIO) { if (sc->xl_flags & XL_FLAG_USE_MMIO) {
rid = XL_PCI_LOMEM; rid = XL_PCI_LOMEM;
res = SYS_RES_MEMORY; res = SYS_RES_MEMORY;
@ -1960,12 +1965,12 @@ xl_rxeof(struct xl_softc *sc)
BUS_DMASYNC_POSTREAD); BUS_DMASYNC_POSTREAD);
while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) { while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0) if (sc->rxcycles <= 0)
break; break;
sc->rxcycles--; sc->rxcycles--;
} }
#endif /* DEVICE_POLLING */ #endif
cur_rx = sc->xl_cdata.xl_rx_head; cur_rx = sc->xl_cdata.xl_rx_head;
sc->xl_cdata.xl_rx_head = cur_rx->xl_next; sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
total_len = rxstat & XL_RXSTAT_LENMASK; total_len = rxstat & XL_RXSTAT_LENMASK;
@ -2275,24 +2280,11 @@ xl_intr(void *arg)
XL_LOCK(sc); XL_LOCK(sc);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
if (ifp->if_flags & IFF_POLLING) { if (ifp->if_capenable & IFCAP_POLLING) {
XL_UNLOCK(sc); XL_UNLOCK(sc);
return; return;
} }
#endif
if ((ifp->if_capenable & IFCAP_POLLING) &&
ether_poll_register(xl_poll, ifp)) {
/* Disable interrupts. */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
if (sc->xl_flags & XL_FLAG_FUNCREG)
bus_space_write_4(sc->xl_ftag, sc->xl_fhandle,
4, 0x8000);
xl_poll_locked(ifp, 0, 1);
XL_UNLOCK(sc);
return;
}
#endif /* DEVICE_POLLING */
while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS &&
status != 0xFFFF) { status != 0xFFFF) {
@ -2351,7 +2343,8 @@ xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
struct xl_softc *sc = ifp->if_softc; struct xl_softc *sc = ifp->if_softc;
XL_LOCK(sc); XL_LOCK(sc);
xl_poll_locked(ifp, cmd, count); if (ifp->if_drv_flags & IFF_DRV_RUNNING)
xl_poll_locked(ifp, cmd, count);
XL_UNLOCK(sc); XL_UNLOCK(sc);
} }
@ -2362,21 +2355,6 @@ xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
XL_LOCK_ASSERT(sc); XL_LOCK_ASSERT(sc);
if (!(ifp->if_capenable & IFCAP_POLLING)) {
ether_poll_deregister(ifp);
cmd = POLL_DEREGISTER;
}
if (cmd == POLL_DEREGISTER) {
/* Final call; enable interrupts. */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
if (sc->xl_flags & XL_FLAG_FUNCREG)
bus_space_write_4(sc->xl_ftag, sc->xl_fhandle,
4, 0x8000);
return;
}
sc->rxcycles = count; sc->rxcycles = count;
xl_rxeof(sc); xl_rxeof(sc);
if (sc->xl_type == XL_TYPE_905B) if (sc->xl_type == XL_TYPE_905B)
@ -2989,10 +2967,10 @@ xl_init_locked(struct xl_softc *sc)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */ /* Disable interrupts if we are polling. */
if (ifp->if_flags & IFF_POLLING) if (ifp->if_capenable & IFCAP_POLLING)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
else else
#endif /* DEVICE_POLLING */ #endif
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
if (sc->xl_flags & XL_FLAG_FUNCREG) if (sc->xl_flags & XL_FLAG_FUNCREG)
bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
@ -3204,6 +3182,35 @@ xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
&mii->mii_media, command); &mii->mii_media, command);
break; break;
case SIOCSIFCAP: case SIOCSIFCAP:
#ifdef DEVICE_POLLING
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(xl_poll, ifp);
if (error)
return(error);
XL_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
ifp->if_capenable |= IFCAP_POLLING;
XL_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
XL_LOCK(sc);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
if (sc->xl_flags & XL_FLAG_FUNCREG)
bus_space_write_4(sc->xl_ftag, sc->xl_fhandle,
4, 0x8000);
ifp->if_capenable &= ~IFCAP_POLLING;
XL_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
XL_LOCK(sc); XL_LOCK(sc);
ifp->if_capenable = ifr->ifr_reqcap; ifp->if_capenable = ifr->ifr_reqcap;
if (ifp->if_capenable & IFCAP_TXCSUM) if (ifp->if_capenable & IFCAP_TXCSUM)
@ -3268,9 +3275,6 @@ xl_stop(struct xl_softc *sc)
XL_LOCK_ASSERT(sc); XL_LOCK_ASSERT(sc);
ifp->if_timer = 0; ifp->if_timer = 0;
#ifdef DEVICE_POLLING
ether_poll_deregister(ifp);
#endif /* DEVICE_POLLING */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);