1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-20 11:11:24 +00:00

A number of important fixes:

- mbuf reused after an RX_COPY optimized operation can sometimes have
    a bogus cached address, resulting in TCP hangs. Add critical save points
    to the cached address. Thanks to Michael and the team at Verisign for
    finding this problem.
  - A couple more spots where the rxbuf->flags member should be cleared just
    to be sure no incorrect RX_COPY state is left around. Thanks to Adrian
    for tracking these down.
  - Remove the rearm_queues function from the driver, this was found to be
    responsible for some out-of-order packets by Verisign, and was always a
    bandaid, with the other fixes in this delta the bandaid can finally be
    removed.
  - In the other/link interrupt handler the entire state of the EICS register
    was being writen back into EICR (which clears causes and thus re-enables
    those interrupts), this was wrong, so now mask off the queue portion of
    the register value, so we only clear the other/link interrupt we intend.
    Marc from Verisign found this.
  - Make the SFP+ unsupported option tuneable now, by customer request.
  - Finally, just a couple of minor DEBUG string fixes.

I want to call out and thank all the participants in the 10G community/Intel
calls for helping track down these problems and make the driver better for
everyone!

MFC after:	3 days, these are critical fixes for 9.2!
This commit is contained in:
Jack F Vogel 2013-08-01 20:10:16 +00:00
parent 04ae0d7cc5
commit cbe75ae8f5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=253865

View File

@ -45,7 +45,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
char ixgbe_driver_version[] = "2.5.13";
char ixgbe_driver_version[] = "2.5.15";
/*********************************************************************
* PCI Device ID Table
@ -297,6 +297,7 @@ TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
** doing so you are on your own :)
*/
static int allow_unsupported_sfp = FALSE;
TUNABLE_INT("hw.ixgbe.unsupported_sfp", &allow_unsupported_sfp);
/*
** HW RSC control:
@ -1071,7 +1072,7 @@ ixgbe_init_locked(struct adapter *adapter)
u32 rxdctl, rxctrl;
mtx_assert(&adapter->core_mtx, MA_OWNED);
INIT_DEBUGOUT("ixgbe_init: begin");
INIT_DEBUGOUT("ixgbe_init_locked: begin");
hw->adapter_stopped = FALSE;
ixgbe_stop_adapter(hw);
callout_stop(&adapter->timer);
@ -1382,23 +1383,6 @@ ixgbe_disable_queue(struct adapter *adapter, u32 vector)
}
}
static inline void
ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
{
u32 mask;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
mask = (IXGBE_EIMS_RTX_QUEUE & queues);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
} else {
mask = (queues & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (queues >> 32);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
}
}
static void
ixgbe_handle_que(void *context, int pending)
{
@ -1506,6 +1490,10 @@ ixgbe_msix_que(void *arg)
bool more;
u32 newitr = 0;
/* Protect against spurious interrupts */
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
ixgbe_disable_queue(adapter, que->msix);
++que->irqs;
@ -1592,6 +1580,8 @@ ixgbe_msix_link(void *arg)
/* First get the cause */
reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
/* Be sure the queue bits are not cleared */
reg_eicr = ~IXGBE_EICR_RTX_QUEUE;
/* Clear interrupt with write */
IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
@ -2067,7 +2057,6 @@ ixgbe_local_timer(void *arg)
goto watchdog;
out:
ixgbe_rearm_queues(adapter, adapter->que_mask);
callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
return;
@ -3201,7 +3190,7 @@ ixgbe_free_transmit_buffers(struct tx_ring *txr)
struct ixgbe_tx_buf *tx_buffer;
int i;
INIT_DEBUGOUT("free_transmit_ring: begin");
INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
if (txr->tx_buffers == NULL)
return;
@ -4005,11 +3994,13 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
addr = PNMB(slot + sj, &paddr);
netmap_load_map(rxr->ptag, rxbuf->pmap, addr);
/* Update descriptor */
/* Update descriptor and the cached value */
rxr->rx_base[j].read.pkt_addr = htole64(paddr);
rxbuf->addr = htole64(paddr);
continue;
}
#endif /* DEV_NETMAP */
rxbuf->flags = 0;
rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (rxbuf->buf == NULL) {
@ -4026,8 +4017,9 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
goto fail;
bus_dmamap_sync(rxr->ptag,
rxbuf->pmap, BUS_DMASYNC_PREREAD);
/* Update descriptor */
/* Update the descriptor and the cached value */
rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
rxbuf->addr = htole64(seg[0].ds_addr);
}
@ -4244,6 +4236,8 @@ ixgbe_free_receive_structures(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
struct lro_ctrl *lro = &rxr->lro;
ixgbe_free_receive_buffers(rxr);
@ -4268,7 +4262,7 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr)
struct adapter *adapter = rxr->adapter;
struct ixgbe_rx_buf *rxbuf;
INIT_DEBUGOUT("free_receive_structures: begin");
INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
/* Cleanup any existing buffers */
if (rxr->rx_buffers != NULL) {
@ -4359,6 +4353,8 @@ ixgbe_rx_discard(struct rx_ring *rxr, int i)
rbuf->buf = NULL;
}
rbuf->flags = 0;
return;
}