mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-20 11:11:24 +00:00
Style changes, mostly automated.
Differential Revision: https://reviews.freebsd.org/D4179 Submitted by: Daria Genzel <dariaz@mellanox.com> Sponsored by: Mellanox Technologies MFC after: 3 days
This commit is contained in:
parent
ee09079968
commit
bb3853c6bd
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=291070
@ -74,9 +74,9 @@
|
||||
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
|
||||
|
||||
/* freeBSD HW LRO is limited by 16KB - the size of max mbuf */
|
||||
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ MJUM16BYTES
|
||||
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ MJUM16BYTES
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
|
||||
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
|
||||
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
|
||||
@ -89,13 +89,14 @@
|
||||
((swmtu) + (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN))
|
||||
#define MLX5E_SW2MB_MTU(swmtu) \
|
||||
(MLX5E_SW2HW_MTU(swmtu) + MLX5E_NET_IP_ALIGN)
|
||||
#define MLX5E_MTU_MIN 72 /* Min MTU allowed by the kernel */
|
||||
#define MLX5E_MTU_MAX MIN(ETHERMTU_JUMBO, MJUM16BYTES) /* Max MTU of Ethernet jumbo frames */
|
||||
#define MLX5E_MTU_MIN 72 /* Min MTU allowed by the kernel */
|
||||
#define MLX5E_MTU_MAX MIN(ETHERMTU_JUMBO, MJUM16BYTES) /* Max MTU of Ethernet
|
||||
* jumbo frames */
|
||||
|
||||
#define MLX5E_BUDGET_MAX 8192 /* RX and TX */
|
||||
#define MLX5E_RX_BUDGET_MAX 256
|
||||
#define MLX5E_SQ_BF_BUDGET 16
|
||||
#define MLX5E_SQ_TX_QUEUE_SIZE 4096 /* SQ drbr queue size */
|
||||
#define MLX5E_SQ_TX_QUEUE_SIZE 4096 /* SQ drbr queue size */
|
||||
|
||||
#define MLX5E_MAX_TX_NUM_TC 8 /* units */
|
||||
#define MLX5E_MAX_TX_HEADER 128 /* bytes */
|
||||
@ -159,7 +160,7 @@ typedef void (mlx5e_cq_comp_t)(struct mlx5_core_cq *);
|
||||
#define MLX5E_VPORT_STATS_NUM (0 MLX5E_VPORT_STATS(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_vport_stats {
|
||||
struct sysctl_ctx_list ctx;
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_VPORT_STATS(MLX5E_STATS_VAR)
|
||||
u32 rx_out_of_buffer_prev;
|
||||
@ -226,7 +227,7 @@ struct mlx5e_vport_stats {
|
||||
m(+1, u64 out_multicast_pkts, "out_multicast_pkts", "Out multicast packets") \
|
||||
m(+1, u64 out_broadcast_pkts, "out_broadcast_pkts", "Out broadcast packets")
|
||||
|
||||
#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m) \
|
||||
#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m) \
|
||||
m(+1, u64 time_since_last_clear, "time_since_last_clear", \
|
||||
"Time since the last counters clear event (msec)") \
|
||||
m(+1, u64 symbol_errors, "symbol_errors", "Symbol errors") \
|
||||
@ -291,19 +292,19 @@ struct mlx5e_vport_stats {
|
||||
(0 MLX5E_PPORT_RFC2819_STATS_DEBUG(MLX5E_STATS_COUNT))
|
||||
#define MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM \
|
||||
(0 MLX5E_PPORT_RFC2863_STATS_DEBUG(MLX5E_STATS_COUNT))
|
||||
#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM \
|
||||
#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM \
|
||||
(0 MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(MLX5E_STATS_COUNT))
|
||||
#define MLX5E_PORT_STATS_DEBUG_NUM \
|
||||
(0 MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_pport_stats {
|
||||
struct sysctl_ctx_list ctx;
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_PPORT_STATS(MLX5E_STATS_VAR)
|
||||
};
|
||||
|
||||
struct mlx5e_port_stats_debug {
|
||||
struct sysctl_ctx_list ctx;
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_VAR)
|
||||
};
|
||||
@ -320,7 +321,7 @@ struct mlx5e_port_stats_debug {
|
||||
#define MLX5E_RQ_STATS_NUM (0 MLX5E_RQ_STATS(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_rq_stats {
|
||||
struct sysctl_ctx_list ctx;
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_RQ_STATS(MLX5E_STATS_VAR)
|
||||
};
|
||||
@ -337,7 +338,7 @@ struct mlx5e_rq_stats {
|
||||
#define MLX5E_SQ_STATS_NUM (0 MLX5E_SQ_STATS(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_sq_stats {
|
||||
struct sysctl_ctx_list ctx;
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_SQ_STATS(MLX5E_STATS_VAR)
|
||||
};
|
||||
@ -360,8 +361,8 @@ struct mlx5e_params {
|
||||
u16 tx_cq_moderation_usec;
|
||||
u16 tx_cq_moderation_pkts;
|
||||
u16 min_rx_wqes;
|
||||
bool hw_lro_en;
|
||||
u32 lro_wqe_sz;
|
||||
bool hw_lro_en;
|
||||
u32 lro_wqe_sz;
|
||||
u16 rx_hash_log_tbl_sz;
|
||||
};
|
||||
|
||||
@ -391,31 +392,31 @@ struct mlx5e_params_ethtool {
|
||||
|
||||
/* EEPROM Standards for plug in modules */
|
||||
#ifndef MLX5E_ETH_MODULE_SFF_8472
|
||||
#define MLX5E_ETH_MODULE_SFF_8472 0x1
|
||||
#define MLX5E_ETH_MODULE_SFF_8472_LEN 128
|
||||
#define MLX5E_ETH_MODULE_SFF_8472 0x1
|
||||
#define MLX5E_ETH_MODULE_SFF_8472_LEN 128
|
||||
#endif
|
||||
|
||||
#ifndef MLX5E_ETH_MODULE_SFF_8636
|
||||
#define MLX5E_ETH_MODULE_SFF_8636 0x2
|
||||
#define MLX5E_ETH_MODULE_SFF_8636_LEN 256
|
||||
#define MLX5E_ETH_MODULE_SFF_8636 0x2
|
||||
#define MLX5E_ETH_MODULE_SFF_8636_LEN 256
|
||||
#endif
|
||||
|
||||
#ifndef MLX5E_ETH_MODULE_SFF_8436
|
||||
#define MLX5E_ETH_MODULE_SFF_8436 0x3
|
||||
#define MLX5E_ETH_MODULE_SFF_8436_LEN 256
|
||||
#define MLX5E_ETH_MODULE_SFF_8436 0x3
|
||||
#define MLX5E_ETH_MODULE_SFF_8436_LEN 256
|
||||
#endif
|
||||
|
||||
/* EEPROM I2C Addresses */
|
||||
#define MLX5E_I2C_ADDR_LOW 0x50
|
||||
#define MLX5E_I2C_ADDR_HIGH 0x51
|
||||
#define MLX5E_I2C_ADDR_LOW 0x50
|
||||
#define MLX5E_I2C_ADDR_HIGH 0x51
|
||||
|
||||
#define MLX5E_EEPROM_LOW_PAGE 0x0
|
||||
#define MLX5E_EEPROM_HIGH_PAGE 0x3
|
||||
#define MLX5E_EEPROM_LOW_PAGE 0x0
|
||||
#define MLX5E_EEPROM_HIGH_PAGE 0x3
|
||||
|
||||
#define MLX5E_EEPROM_HIGH_PAGE_OFFSET 128
|
||||
#define MLX5E_EEPROM_PAGE_LENGTH 256
|
||||
#define MLX5E_EEPROM_HIGH_PAGE_OFFSET 128
|
||||
#define MLX5E_EEPROM_PAGE_LENGTH 256
|
||||
|
||||
#define MLX5E_EEPROM_INFO_BYTES 0x3
|
||||
#define MLX5E_EEPROM_INFO_BYTES 0x3
|
||||
|
||||
struct mlx5e_cq {
|
||||
/* data path - accessed per cqe */
|
||||
@ -430,9 +431,9 @@ struct mlx5e_cq {
|
||||
} __aligned(MLX5E_CACHELINE_SIZE);
|
||||
|
||||
struct mlx5e_rq_mbuf {
|
||||
bus_dmamap_t dma_map;
|
||||
caddr_t data;
|
||||
struct mbuf *mbuf;
|
||||
bus_dmamap_t dma_map;
|
||||
caddr_t data;
|
||||
struct mbuf *mbuf;
|
||||
};
|
||||
|
||||
struct mlx5e_rq {
|
||||
@ -474,9 +475,9 @@ enum {
|
||||
|
||||
struct mlx5e_sq {
|
||||
/* data path */
|
||||
struct mtx lock;
|
||||
struct mtx lock;
|
||||
bus_dma_tag_t dma_tag;
|
||||
struct mtx comp_lock;
|
||||
struct mtx comp_lock;
|
||||
|
||||
/* dirtied @completion */
|
||||
u16 cc;
|
||||
@ -484,31 +485,31 @@ struct mlx5e_sq {
|
||||
/* dirtied @xmit */
|
||||
u16 pc __aligned(MLX5E_CACHELINE_SIZE);
|
||||
u16 bf_offset;
|
||||
struct mlx5e_sq_stats stats;
|
||||
struct mlx5e_sq_stats stats;
|
||||
|
||||
struct mlx5e_cq cq;
|
||||
struct task sq_task;
|
||||
struct taskqueue *sq_tq;
|
||||
struct mlx5e_cq cq;
|
||||
struct task sq_task;
|
||||
struct taskqueue *sq_tq;
|
||||
|
||||
/* pointers to per packet info: write@xmit, read@completion */
|
||||
struct mlx5e_sq_mbuf *mbuf;
|
||||
struct buf_ring *br;
|
||||
struct mlx5e_sq_mbuf *mbuf;
|
||||
struct buf_ring *br;
|
||||
|
||||
/* read only */
|
||||
struct mlx5_wq_cyc wq;
|
||||
void __iomem *uar_map;
|
||||
void __iomem *uar_bf_map;
|
||||
struct mlx5_wq_cyc wq;
|
||||
void __iomem *uar_map;
|
||||
void __iomem *uar_bf_map;
|
||||
u32 sqn;
|
||||
u32 bf_buf_size;
|
||||
struct device *pdev;
|
||||
struct device *pdev;
|
||||
u32 mkey_be;
|
||||
|
||||
/* control path */
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
struct mlx5_uar uar;
|
||||
struct mlx5e_channel *channel;
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
struct mlx5_uar uar;
|
||||
struct mlx5e_channel *channel;
|
||||
int tc;
|
||||
unsigned int queue_state;
|
||||
unsigned int queue_state;
|
||||
} __aligned(MLX5E_CACHELINE_SIZE);
|
||||
|
||||
static inline bool
|
||||
@ -616,7 +617,7 @@ struct mlx5e_priv {
|
||||
u32 tdn;
|
||||
struct mlx5_core_mr mr;
|
||||
|
||||
struct mlx5e_channel * volatile *channel;
|
||||
struct mlx5e_channel *volatile *channel;
|
||||
u32 tisn[MLX5E_MAX_TX_NUM_TC];
|
||||
u32 rqtn;
|
||||
u32 tirn[MLX5E_NUM_TT];
|
||||
@ -663,15 +664,15 @@ struct mlx5e_rx_wqe {
|
||||
};
|
||||
|
||||
struct mlx5e_eeprom {
|
||||
int lock_bit;
|
||||
int i2c_addr;
|
||||
int page_num;
|
||||
int device_addr;
|
||||
int module_num;
|
||||
int len;
|
||||
int type;
|
||||
int page_valid;
|
||||
u32 *data;
|
||||
int lock_bit;
|
||||
int i2c_addr;
|
||||
int page_num;
|
||||
int device_addr;
|
||||
int module_num;
|
||||
int len;
|
||||
int type;
|
||||
int page_valid;
|
||||
u32 *data;
|
||||
};
|
||||
|
||||
enum mlx5e_link_mode {
|
||||
@ -715,7 +716,7 @@ int mlx5e_close_locked(struct ifnet *);
|
||||
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event);
|
||||
void mlx5e_rx_cq_comp(struct mlx5_core_cq *);
|
||||
void mlx5e_tx_cq_comp(struct mlx5_core_cq *);
|
||||
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
|
||||
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
|
||||
void mlx5e_tx_que(void *context, int pending);
|
||||
|
||||
int mlx5e_open_flow_table(struct mlx5e_priv *priv);
|
||||
|
@ -191,8 +191,7 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
|
||||
if_printf(priv->ifp, "Can't set HW_LRO to a device with LRO turned off");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
priv->params.hw_lro_en = false;
|
||||
}
|
||||
|
||||
@ -243,7 +242,7 @@ mlx5e_get_eeprom_info(struct mlx5e_priv *priv, struct mlx5e_eeprom *eeprom)
|
||||
case SFF_8024_ID_QSFPPLUS:
|
||||
case SFF_8024_ID_QSFP28:
|
||||
if ((data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK) == SFF_8024_ID_QSFP28 ||
|
||||
((data & MLX5_EEPROM_REVISION_ID_BYTE_MASK) >> 8) >= 0x3) {
|
||||
((data & MLX5_EEPROM_REVISION_ID_BYTE_MASK) >> 8) >= 0x3) {
|
||||
eeprom->type = MLX5E_ETH_MODULE_SFF_8636;
|
||||
eeprom->len = MLX5E_ETH_MODULE_SFF_8636_LEN;
|
||||
} else {
|
||||
@ -281,7 +280,7 @@ mlx5e_get_eeprom(struct mlx5e_priv *priv, struct mlx5e_eeprom *ee)
|
||||
while (ee->device_addr < ee->len) {
|
||||
ret = mlx5_query_eeprom(dev, ee->i2c_addr, ee->page_num, ee->device_addr,
|
||||
ee->len - ee->device_addr, ee->module_num,
|
||||
ee->data + (ee->device_addr/4), &size_read);
|
||||
ee->data + (ee->device_addr / 4), &size_read);
|
||||
if (ret) {
|
||||
if_printf(priv->ifp, "%s:%d: Failed reading eeprom, "
|
||||
"error = 0x%02x\n", __func__, __LINE__, ret);
|
||||
@ -298,8 +297,8 @@ mlx5e_get_eeprom(struct mlx5e_priv *priv, struct mlx5e_eeprom *ee)
|
||||
while (ee->device_addr < MLX5E_EEPROM_PAGE_LENGTH) {
|
||||
ret = mlx5_query_eeprom(dev, ee->i2c_addr, ee->page_num,
|
||||
ee->device_addr, MLX5E_EEPROM_PAGE_LENGTH - ee->device_addr,
|
||||
ee->module_num, ee->data + (ee->len/4) +
|
||||
((ee->device_addr - MLX5E_EEPROM_HIGH_PAGE_OFFSET)/4),
|
||||
ee->module_num, ee->data + (ee->len / 4) +
|
||||
((ee->device_addr - MLX5E_EEPROM_HIGH_PAGE_OFFSET) / 4),
|
||||
&size_read);
|
||||
if (ret) {
|
||||
if_printf(priv->ifp, "%s:%d: Failed reading eeprom, "
|
||||
@ -321,9 +320,9 @@ mlx5e_print_eeprom(struct mlx5e_eeprom *eeprom)
|
||||
printf("\nOffset\t\tValues\n");
|
||||
printf("------\t\t------\n");
|
||||
while (row < eeprom->len) {
|
||||
printf("0x%04x\t\t",row);
|
||||
printf("0x%04x\t\t", row);
|
||||
for (i = 0; i < 16; i++) {
|
||||
printf("%02x ", ((u8*)eeprom->data)[j]);
|
||||
printf("%02x ", ((u8 *)eeprom->data)[j]);
|
||||
j++;
|
||||
row++;
|
||||
}
|
||||
@ -336,9 +335,9 @@ mlx5e_print_eeprom(struct mlx5e_eeprom *eeprom)
|
||||
printf("\nOffset\t\tValues\n");
|
||||
printf("------\t\t------\n");
|
||||
while (row < MLX5E_EEPROM_PAGE_LENGTH) {
|
||||
printf("0x%04x\t\t",row);
|
||||
printf("0x%04x\t\t", row);
|
||||
for (i = 0; i < 16; i++) {
|
||||
printf("%02x ", ((u8*)eeprom->data)[j]);
|
||||
printf("%02x ", ((u8 *)eeprom->data)[j]);
|
||||
j++;
|
||||
row++;
|
||||
}
|
||||
@ -385,8 +384,10 @@ mlx5e_read_eeprom(SYSCTL_HANDLER_ARGS)
|
||||
error = 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Allocate needed length buffer and additional space for the 3rd */
|
||||
/*
|
||||
* Allocate needed length buffer and additional space for
|
||||
* page 0x03
|
||||
*/
|
||||
eeprom.data = malloc(eeprom.len + MLX5E_EEPROM_PAGE_LENGTH,
|
||||
M_MLX5EN, M_WAITOK | M_ZERO);
|
||||
|
||||
@ -396,9 +397,11 @@ mlx5e_read_eeprom(SYSCTL_HANDLER_ARGS)
|
||||
if_printf(priv->ifp, "%s:%d: Failed reading eeprom\n",
|
||||
__func__, __LINE__);
|
||||
error = 0;
|
||||
/* Continue printing partial information in case of an error */
|
||||
/*
|
||||
* Continue printing partial information in case of
|
||||
* an error
|
||||
*/
|
||||
}
|
||||
|
||||
mlx5e_print_eeprom(&eeprom);
|
||||
free(eeprom.data, M_MLX5EN);
|
||||
}
|
||||
@ -498,4 +501,3 @@ mlx5e_create_ethtool(struct mlx5e_priv *priv)
|
||||
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
|
||||
mlx5e_read_eeprom, "I", "EEPROM information");
|
||||
}
|
||||
|
||||
|
@ -270,7 +270,6 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_ANY);
|
||||
}
|
||||
|
||||
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
|
||||
outer_headers.ethertype);
|
||||
@ -288,7 +287,6 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_IPV4);
|
||||
}
|
||||
|
||||
if (tt_vec & (1 << MLX5E_TT_IPV6)) {
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
|
||||
ETHERTYPE_IPV6);
|
||||
@ -381,7 +379,6 @@ mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
|
||||
err = -ENOMEM;
|
||||
goto add_eth_addr_rule_out;
|
||||
}
|
||||
|
||||
err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, flow_context,
|
||||
match_criteria);
|
||||
if (err)
|
||||
@ -423,12 +420,12 @@ mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
|
||||
dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
|
||||
|
||||
MLX5_SET(flow_context, flow_context, action,
|
||||
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
|
||||
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
|
||||
MLX5_SET(flow_context, flow_context, destination_list_size, 1);
|
||||
MLX5_SET(dest_format_struct, dest, destination_type,
|
||||
MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
|
||||
MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
mlx5_get_flow_table_id(priv->ft.main));
|
||||
mlx5_get_flow_table_id(priv->ft.main));
|
||||
|
||||
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
|
||||
|
@ -261,7 +261,6 @@ mlx5e_media_change(struct ifnet *dev)
|
||||
error = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
|
||||
|
||||
error = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN);
|
||||
@ -279,7 +278,6 @@ mlx5e_media_change(struct ifnet *dev)
|
||||
error = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
|
||||
mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
|
||||
mlx5_set_port_status(mdev, MLX5_PORT_UP);
|
||||
@ -315,7 +313,7 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv)
|
||||
unsigned x;
|
||||
unsigned y;
|
||||
|
||||
in = mlx5_vzalloc(sz);
|
||||
in = mlx5_vzalloc(sz);
|
||||
out = mlx5_vzalloc(sz);
|
||||
if (in == NULL || out == NULL)
|
||||
goto free_out;
|
||||
@ -334,7 +332,7 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv)
|
||||
for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
|
||||
s->arg[y] = be64toh(ptr[x]);
|
||||
for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
|
||||
MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
|
||||
MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
|
||||
s_debug->arg[y] = be64toh(ptr[x]);
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
|
||||
@ -343,8 +341,8 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv)
|
||||
s_debug->arg[y] = be64toh(ptr[x]);
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
|
||||
s_debug->arg[y] = be64toh(ptr[x]);
|
||||
free_out:
|
||||
kvfree(in);
|
||||
@ -364,6 +362,7 @@ mlx5e_update_stats_work(struct work_struct *work)
|
||||
#if (__FreeBSD_version < 1100000)
|
||||
struct ifnet *ifp = priv->ifp;
|
||||
#endif
|
||||
|
||||
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
|
||||
@ -520,7 +519,7 @@ mlx5e_update_stats_work(struct work_struct *work)
|
||||
#if (__FreeBSD_version < 1100000)
|
||||
/* no get_counters interface in fbsd 10 */
|
||||
ifp->if_ipackets = s->rx_packets;
|
||||
ifp->if_ierrors = s->rx_error_packets;
|
||||
ifp->if_ierrors = s->rx_error_packets;
|
||||
ifp->if_iqdrops = s->rx_out_of_buffer;
|
||||
ifp->if_opackets = s->tx_packets;
|
||||
ifp->if_oerrors = s->tx_error_packets;
|
||||
@ -605,18 +604,18 @@ mlx5e_create_rq(struct mlx5e_channel *c,
|
||||
|
||||
/* Create DMA descriptor TAG */
|
||||
if ((err = -bus_dma_tag_create(
|
||||
bus_get_dma_tag(mdev->pdev->dev.bsddev),
|
||||
1, /* any alignment */
|
||||
0, /* no boundary */
|
||||
BUS_SPACE_MAXADDR, /* lowaddr */
|
||||
BUS_SPACE_MAXADDR, /* highaddr */
|
||||
NULL, NULL, /* filter, filterarg */
|
||||
MJUM16BYTES, /* maxsize */
|
||||
1, /* nsegments */
|
||||
MJUM16BYTES, /* maxsegsize */
|
||||
0, /* flags */
|
||||
NULL, NULL, /* lockfunc, lockfuncarg */
|
||||
&rq->dma_tag)))
|
||||
bus_get_dma_tag(mdev->pdev->dev.bsddev),
|
||||
1, /* any alignment */
|
||||
0, /* no boundary */
|
||||
BUS_SPACE_MAXADDR, /* lowaddr */
|
||||
BUS_SPACE_MAXADDR, /* highaddr */
|
||||
NULL, NULL, /* filter, filterarg */
|
||||
MJUM16BYTES, /* maxsize */
|
||||
1, /* nsegments */
|
||||
MJUM16BYTES, /* maxsegsize */
|
||||
0, /* flags */
|
||||
NULL, NULL, /* lockfunc, lockfuncarg */
|
||||
&rq->dma_tag)))
|
||||
goto done;
|
||||
|
||||
err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
|
||||
@ -626,10 +625,9 @@ mlx5e_create_rq(struct mlx5e_channel *c,
|
||||
|
||||
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
|
||||
|
||||
if (priv->params.hw_lro_en) {
|
||||
if (priv->params.hw_lro_en) {
|
||||
rq->wqe_sz = priv->params.lro_wqe_sz;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
rq->wqe_sz = MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
|
||||
}
|
||||
if (rq->wqe_sz > MJUM16BYTES) {
|
||||
@ -651,7 +649,6 @@ mlx5e_create_rq(struct mlx5e_channel *c,
|
||||
err = -ENOMEM;
|
||||
goto err_rq_wq_destroy;
|
||||
}
|
||||
|
||||
for (i = 0; i != wq_sz; i++) {
|
||||
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
|
||||
uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
|
||||
@ -753,7 +750,7 @@ mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
|
||||
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
|
||||
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
|
||||
if (priv->counter_set_id >= 0)
|
||||
MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
|
||||
MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
|
||||
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
|
||||
PAGE_SHIFT);
|
||||
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
|
||||
@ -938,18 +935,18 @@ mlx5e_create_sq(struct mlx5e_channel *c,
|
||||
|
||||
/* Create DMA descriptor TAG */
|
||||
if ((err = -bus_dma_tag_create(
|
||||
bus_get_dma_tag(mdev->pdev->dev.bsddev),
|
||||
1, /* any alignment */
|
||||
0, /* no boundary */
|
||||
BUS_SPACE_MAXADDR, /* lowaddr */
|
||||
BUS_SPACE_MAXADDR, /* highaddr */
|
||||
NULL, NULL, /* filter, filterarg */
|
||||
MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */
|
||||
MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */
|
||||
MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */
|
||||
0, /* flags */
|
||||
NULL, NULL, /* lockfunc, lockfuncarg */
|
||||
&sq->dma_tag)))
|
||||
bus_get_dma_tag(mdev->pdev->dev.bsddev),
|
||||
1, /* any alignment */
|
||||
0, /* no boundary */
|
||||
BUS_SPACE_MAXADDR, /* lowaddr */
|
||||
BUS_SPACE_MAXADDR, /* highaddr */
|
||||
NULL, NULL, /* filter, filterarg */
|
||||
MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */
|
||||
MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */
|
||||
MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */
|
||||
0, /* flags */
|
||||
NULL, NULL, /* lockfunc, lockfuncarg */
|
||||
&sq->dma_tag)))
|
||||
goto done;
|
||||
|
||||
err = mlx5_alloc_map_uar(mdev, &sq->uar);
|
||||
@ -963,7 +960,7 @@ mlx5e_create_sq(struct mlx5e_channel *c,
|
||||
|
||||
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
|
||||
sq->uar_map = sq->uar.map;
|
||||
sq->uar_bf_map = sq->uar.bf_map;
|
||||
sq->uar_bf_map = sq->uar.bf_map;
|
||||
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
|
||||
|
||||
err = mlx5e_alloc_sq_db(sq);
|
||||
@ -992,10 +989,10 @@ mlx5e_create_sq(struct mlx5e_channel *c,
|
||||
err = -ENOMEM;
|
||||
goto err_free_drbr;
|
||||
}
|
||||
|
||||
TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
|
||||
taskqueue_start_threads(&sq->sq_tq, 1, PI_NET, "%s tx sq",
|
||||
c->ifp->if_xname);
|
||||
|
||||
c->ifp->if_xname);
|
||||
|
||||
snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
|
||||
mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
|
||||
@ -1427,7 +1424,7 @@ mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
|
||||
static int
|
||||
mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
struct mlx5e_channel_param *cparam,
|
||||
struct mlx5e_channel * volatile *cp)
|
||||
struct mlx5e_channel *volatile *cp)
|
||||
{
|
||||
struct mlx5e_channel *c;
|
||||
u8 rx_moderation_mode;
|
||||
@ -1505,7 +1502,7 @@ mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_close_channel(struct mlx5e_channel * volatile *pp)
|
||||
mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
|
||||
{
|
||||
struct mlx5e_channel *c = *pp;
|
||||
|
||||
@ -1517,7 +1514,7 @@ mlx5e_close_channel(struct mlx5e_channel * volatile *pp)
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_close_channel_wait(struct mlx5e_channel * volatile *pp)
|
||||
mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
|
||||
{
|
||||
struct mlx5e_channel *c = *pp;
|
||||
|
||||
@ -1814,19 +1811,17 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
|
||||
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
|
||||
|
||||
if (priv->params.hw_lro_en) {
|
||||
MLX5_SET(tirc, tirc, lro_enable_mask,
|
||||
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
|
||||
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
|
||||
MLX5_SET(tirc, tirc, lro_max_msg_sz,
|
||||
(priv->params.lro_wqe_sz -
|
||||
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
|
||||
MLX5_SET(tirc, tirc, lro_enable_mask,
|
||||
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
|
||||
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
|
||||
MLX5_SET(tirc, tirc, lro_max_msg_sz,
|
||||
(priv->params.lro_wqe_sz -
|
||||
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
|
||||
/* TODO: add the option to choose timer value dynamically */
|
||||
MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
|
||||
MLX5_CAP_ETH(priv->mdev,
|
||||
lro_timer_supported_periods[2]));
|
||||
MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
|
||||
MLX5_CAP_ETH(priv->mdev,
|
||||
lro_timer_supported_periods[2]));
|
||||
}
|
||||
|
||||
|
||||
switch (tt) {
|
||||
case MLX5E_TT_ANY:
|
||||
MLX5_SET(tirc, tirc, disp_type,
|
||||
@ -2020,6 +2015,7 @@ mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
|
||||
err = mlx5_set_port_mtu(mdev, 0);
|
||||
if (err)
|
||||
return (err);
|
||||
|
||||
err = mlx5_query_port_oper_mtu(mdev, &min_mtu);
|
||||
if (err) {
|
||||
if_printf(ifp, "Query port minimal MTU failed\n");
|
||||
@ -2342,7 +2338,6 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
||||
ifp->if_capenable ^= IFCAP_RXCSUM;
|
||||
if (mask & IFCAP_RXCSUM_IPV6)
|
||||
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
|
||||
|
||||
if (mask & IFCAP_TSO4) {
|
||||
if (!(IFCAP_TSO4 & ifp->if_capenable) &&
|
||||
!(IFCAP_TXCSUM & ifp->if_capenable)) {
|
||||
@ -2363,7 +2358,6 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
||||
ifp->if_capenable ^= IFCAP_TSO6;
|
||||
ifp->if_hwassist ^= CSUM_IP6_TSO;
|
||||
}
|
||||
|
||||
if (mask & IFCAP_VLAN_HWFILTER) {
|
||||
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
|
||||
mlx5e_disable_vlan_filter(priv);
|
||||
@ -2374,13 +2368,12 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
||||
}
|
||||
if (mask & IFCAP_VLAN_HWTAGGING)
|
||||
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
|
||||
|
||||
if (mask & IFCAP_WOL_MAGIC)
|
||||
ifp->if_capenable ^= IFCAP_WOL_MAGIC;
|
||||
|
||||
VLAN_CAPABILITIES(ifp);
|
||||
/* turn off LRO means also turn of HW LRO - if it's on */
|
||||
if (mask & IFCAP_LRO ) {
|
||||
if (mask & IFCAP_LRO) {
|
||||
int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
|
||||
bool need_restart = false;
|
||||
|
||||
@ -2390,7 +2383,7 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
||||
priv->params.hw_lro_en = false;
|
||||
need_restart = true;
|
||||
/* Not sure this is the correct way */
|
||||
priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
|
||||
priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
|
||||
}
|
||||
}
|
||||
if (was_opened && need_restart) {
|
||||
@ -2405,7 +2398,10 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
||||
case SIOCGI2C:
|
||||
ifr = (struct ifreq *)data;
|
||||
|
||||
/* Copy from the user-space address ifr_data to the kernel-space address i2c */
|
||||
/*
|
||||
* Copy from the user-space address ifr_data to the
|
||||
* kernel-space address i2c
|
||||
*/
|
||||
error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
|
||||
if (error)
|
||||
break;
|
||||
@ -2515,11 +2511,10 @@ mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
|
||||
priv->params.default_vlan_prio = 0;
|
||||
priv->counter_set_id = -1;
|
||||
|
||||
/*
|
||||
* hw lro is currently defaulted to off.
|
||||
* when it won't anymore we will consider the
|
||||
* HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
|
||||
*/
|
||||
/*
|
||||
* hw lro is currently defaulted to off. when it won't anymore we
|
||||
* will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
|
||||
*/
|
||||
priv->params.hw_lro_en = false;
|
||||
priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
|
||||
|
||||
@ -2593,11 +2588,12 @@ mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
|
||||
static int
|
||||
sysctl_firmware(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
/* %d.%d%.d the string format.
|
||||
/*
|
||||
* %d.%d%.d the string format.
|
||||
* fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
|
||||
* We need at most 5 chars to store that.
|
||||
* it also has: two "." and NULL at the end.
|
||||
* Which means we need 18 (5*3 + 3) chars at most.
|
||||
* It also has: two "." and NULL at the end, which means we need 18
|
||||
* (5*3 + 3) chars at most.
|
||||
*/
|
||||
char fw[18];
|
||||
struct mlx5e_priv *priv = arg1;
|
||||
@ -2702,6 +2698,7 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
|
||||
mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
|
||||
goto err_free_sysctl;
|
||||
}
|
||||
|
||||
/* HW sysctl tree */
|
||||
child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
|
||||
priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
|
||||
@ -2710,9 +2707,7 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
|
||||
mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
|
||||
goto err_free_sysctl;
|
||||
}
|
||||
|
||||
mlx5e_build_ifp_priv(mdev, priv, ncv);
|
||||
|
||||
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
|
||||
if (err) {
|
||||
if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
|
||||
@ -2725,15 +2720,12 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
|
||||
__func__, err);
|
||||
goto err_unmap_free_uar;
|
||||
}
|
||||
|
||||
err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
|
||||
|
||||
if (err) {
|
||||
if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
|
||||
__func__, err);
|
||||
__func__, err);
|
||||
goto err_dealloc_pd;
|
||||
}
|
||||
|
||||
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
|
||||
if (err) {
|
||||
if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
|
||||
|
@ -95,7 +95,7 @@ mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_lro_update_hdr(struct mbuf* mb, struct mlx5_cqe64 *cqe)
|
||||
mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
/* TODO: consider vlans, ip options, ... */
|
||||
struct ether_header *eh;
|
||||
@ -109,8 +109,8 @@ mlx5e_lro_update_hdr(struct mbuf* mb, struct mlx5_cqe64 *cqe)
|
||||
eh_type = ntohs(eh->ether_type);
|
||||
|
||||
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
||||
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
|
||||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
|
||||
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
|
||||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
|
||||
|
||||
/* TODO: consider vlan */
|
||||
u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
|
||||
@ -131,15 +131,16 @@ mlx5e_lro_update_hdr(struct mbuf* mb, struct mlx5_cqe64 *cqe)
|
||||
ts_ptr = (uint32_t *)(th + 1);
|
||||
|
||||
if (get_cqe_lro_tcppsh(cqe))
|
||||
th->th_flags |= TH_PUSH;
|
||||
th->th_flags |= TH_PUSH;
|
||||
|
||||
if (tcp_ack) {
|
||||
th->th_flags |= TH_ACK;
|
||||
th->th_ack = cqe->lro_ack_seq_num;
|
||||
th->th_win = cqe->lro_tcp_win;
|
||||
th->th_flags |= TH_ACK;
|
||||
th->th_ack = cqe->lro_ack_seq_num;
|
||||
th->th_win = cqe->lro_tcp_win;
|
||||
|
||||
/* FreeBSD handles only 32bit aligned timestamp
|
||||
* right after the TCP hdr
|
||||
/*
|
||||
* FreeBSD handles only 32bit aligned timestamp right after
|
||||
* the TCP hdr
|
||||
* +--------+--------+--------+--------+
|
||||
* | NOP | NOP | TSopt | 10 |
|
||||
* +--------+--------+--------+--------+
|
||||
@ -152,7 +153,8 @@ mlx5e_lro_update_hdr(struct mbuf* mb, struct mlx5_cqe64 *cqe)
|
||||
(__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 |
|
||||
TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 |
|
||||
TCPOLEN_TIMESTAMP))) {
|
||||
/* cqe->timestamp is 64bit long.
|
||||
/*
|
||||
* cqe->timestamp is 64bit long.
|
||||
* [0-31] - timestamp.
|
||||
* [32-64] - timestamp echo replay.
|
||||
*/
|
||||
@ -160,15 +162,14 @@ mlx5e_lro_update_hdr(struct mbuf* mb, struct mlx5_cqe64 *cqe)
|
||||
ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (ip4) {
|
||||
ip4->ip_ttl = cqe->lro_min_ttl;
|
||||
ip4->ip_len = cpu_to_be16(tot_len);
|
||||
ip4->ip_sum = 0;
|
||||
ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
|
||||
ip4->ip_ttl = cqe->lro_min_ttl;
|
||||
ip4->ip_len = cpu_to_be16(tot_len);
|
||||
ip4->ip_sum = 0;
|
||||
ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
|
||||
} else {
|
||||
ip6->ip6_hlim = cqe->lro_min_ttl;
|
||||
ip6->ip6_plen = cpu_to_be16(tot_len -
|
||||
ip6->ip6_hlim = cqe->lro_min_ttl;
|
||||
ip6->ip6_plen = cpu_to_be16(tot_len -
|
||||
sizeof(struct ip6_hdr));
|
||||
}
|
||||
/* TODO: handle tcp checksum */
|
||||
@ -180,7 +181,7 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
|
||||
u32 cqe_bcnt)
|
||||
{
|
||||
struct ifnet *ifp = rq->ifp;
|
||||
int lro_num_seg; /* HW LRO session aggregated packets counter */
|
||||
int lro_num_seg; /* HW LRO session aggregated packets counter */
|
||||
|
||||
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
|
||||
if (lro_num_seg > 1) {
|
||||
@ -195,6 +196,7 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
|
||||
mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
|
||||
else
|
||||
mb->m_pkthdr.flowid = rq->ix;
|
||||
|
||||
M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
|
||||
mb->m_pkthdr.rcvif = ifp;
|
||||
|
||||
@ -306,6 +308,7 @@ mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq)
|
||||
|
||||
#ifdef HAVE_PER_CQ_EVENT_PACKET
|
||||
struct mbuf *mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
|
||||
|
||||
if (mb != NULL) {
|
||||
/* this code is used for debugging purpose only */
|
||||
mb->m_pkthdr.len = mb->m_len = 15;
|
||||
|
@ -166,8 +166,10 @@ mlx5e_get_header_size(struct mbuf *mb)
|
||||
return (eth_hdr_len);
|
||||
}
|
||||
|
||||
/* The return value is not going back to the stack because of
|
||||
* the drbr */
|
||||
/*
|
||||
* The return value is not going back to the stack because of
|
||||
* the drbr
|
||||
*/
|
||||
static int
|
||||
mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
{
|
||||
@ -184,8 +186,10 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
u16 pi;
|
||||
u8 opcode;
|
||||
|
||||
/* Return ENOBUFS if the queue is full, this may trigger reinsertion
|
||||
* of the mbuf into the drbr (see mlx5e_xmit_locked) */
|
||||
/*
|
||||
* Return ENOBUFS if the queue is full, this may trigger reinsertion
|
||||
* of the mbuf into the drbr (see mlx5e_xmit_locked)
|
||||
*/
|
||||
if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
|
||||
return (ENOBUFS);
|
||||
}
|
||||
@ -193,7 +197,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
/* Align SQ edge with NOPs to avoid WQE wrap around */
|
||||
pi = ((~sq->pc) & sq->wq.sz_m1);
|
||||
if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
|
||||
/* send one multi NOP message instead of many */
|
||||
/* Send one multi NOP message instead of many */
|
||||
mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS, false);
|
||||
pi = ((~sq->pc) & sq->wq.sz_m1);
|
||||
if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
|
||||
@ -209,7 +213,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
|
||||
memset(wqe, 0, sizeof(*wqe));
|
||||
|
||||
/* send a copy of the frame to the BPF listener, if any */
|
||||
/* Send a copy of the frame to the BPF listener, if any */
|
||||
if (ifp != NULL && ifp->if_bpf != NULL)
|
||||
ETHER_BPF_MTAP(ifp, mb);
|
||||
|
||||
@ -219,10 +223,9 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
|
||||
wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
}
|
||||
if ( wqe->eth.cs_flags == 0 ) {
|
||||
if (wqe->eth.cs_flags == 0) {
|
||||
sq->stats.csum_offload_none++;
|
||||
}
|
||||
|
||||
if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
|
||||
u32 payload_len;
|
||||
u32 mss = mb->m_pkthdr.tso_segsz;
|
||||
@ -249,7 +252,8 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
if (mb->m_flags & M_VLANTAG) {
|
||||
struct ether_vlan_header *eh =
|
||||
(struct ether_vlan_header *)wqe->eth.inline_hdr_start;
|
||||
/* range checks */
|
||||
|
||||
/* Range checks */
|
||||
if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
|
||||
ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
|
||||
else if (ihs < ETHER_HDR_LEN) {
|
||||
@ -258,14 +262,14 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
}
|
||||
m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
|
||||
m_adj(mb, ETHER_HDR_LEN);
|
||||
/* insert 4 bytes VLAN tag into data stream */
|
||||
/* Insert 4 bytes VLAN tag into data stream */
|
||||
eh->evl_proto = eh->evl_encap_proto;
|
||||
eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
|
||||
eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
|
||||
/* copy rest of header data, if any */
|
||||
/* Copy rest of header data, if any */
|
||||
m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
|
||||
m_adj(mb, ihs - ETHER_HDR_LEN);
|
||||
/* extend header by 4 bytes */
|
||||
/* Extend header by 4 bytes */
|
||||
ihs += ETHER_VLAN_ENCAP_LEN;
|
||||
} else {
|
||||
m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
|
||||
@ -281,10 +285,10 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
}
|
||||
dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
|
||||
|
||||
/* trim off empty mbufs */
|
||||
/* Trim off empty mbufs */
|
||||
while (mb->m_len == 0) {
|
||||
mb = m_free(mb);
|
||||
/* check if all data has been inlined */
|
||||
/* Check if all data has been inlined */
|
||||
if (mb == NULL)
|
||||
goto skip_dma;
|
||||
}
|
||||
@ -292,7 +296,10 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
|
||||
mb, segs, &nsegs, BUS_DMA_NOWAIT);
|
||||
if (err == EFBIG) {
|
||||
/* Update *mbp before defrag in case it was trimmed in the loop above */
|
||||
/*
|
||||
* Update *mbp before defrag in case it was trimmed in the
|
||||
* loop above
|
||||
*/
|
||||
*mbp = mb;
|
||||
/* Update statistics */
|
||||
sq->stats.defragged++;
|
||||
@ -306,7 +313,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
|
||||
mb, segs, &nsegs, BUS_DMA_NOWAIT);
|
||||
}
|
||||
/* catch errors */
|
||||
/* Catch errors */
|
||||
if (err != 0) {
|
||||
goto tx_drop;
|
||||
}
|
||||
@ -327,12 +334,12 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
|
||||
wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
|
||||
|
||||
/* store pointer to mbuf */
|
||||
/* Store pointer to mbuf */
|
||||
sq->mbuf[pi].mbuf = mb;
|
||||
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
sq->pc += sq->mbuf[pi].num_wqebbs;
|
||||
|
||||
/* make sure all mbuf data is written to RAM */
|
||||
/* Make sure all mbuf data is written to RAM */
|
||||
if (mb != NULL)
|
||||
bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE);
|
||||
|
||||
@ -370,7 +377,7 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
|
||||
|
||||
ci = sqcc & sq->wq.sz_m1;
|
||||
mb = sq->mbuf[ci].mbuf;
|
||||
sq->mbuf[ci].mbuf = NULL; /* safety clear */
|
||||
sq->mbuf[ci].mbuf = NULL; /* Safety clear */
|
||||
|
||||
if (mb == NULL) {
|
||||
if (sq->mbuf[ci].num_bytes == 0) {
|
||||
@ -382,7 +389,7 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
|
||||
BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
|
||||
|
||||
/* free transmitted mbuf */
|
||||
/* Free transmitted mbuf */
|
||||
m_freem(mb);
|
||||
}
|
||||
sqcc += sq->mbuf[ci].num_wqebbs;
|
||||
@ -390,7 +397,7 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
|
||||
|
||||
mlx5_cqwq_update_db_record(&sq->cq.wq);
|
||||
|
||||
/* ensure cq space is freed before enabling more cqes */
|
||||
/* Ensure cq space is freed before enabling more cqes */
|
||||
wmb();
|
||||
|
||||
sq->cc = sqcc;
|
||||
@ -402,37 +409,38 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
|
||||
static int
|
||||
mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
|
||||
{
|
||||
struct mbuf *next;
|
||||
struct mbuf *next;
|
||||
int err = 0;
|
||||
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
|
||||
if (mb)
|
||||
err = drbr_enqueue(ifp, sq->br, mb);
|
||||
return (err);
|
||||
}
|
||||
}
|
||||
|
||||
if (mb != NULL)
|
||||
/* If we can't insert mbuf into drbr, try to xmit anyway.
|
||||
* We keep the error we got so we could return that after xmit.
|
||||
*/
|
||||
err = drbr_enqueue(ifp, sq->br, mb);
|
||||
if (mb != NULL)
|
||||
/*
|
||||
* If we can't insert mbuf into drbr, try to xmit anyway.
|
||||
* We keep the error we got so we could return that after xmit.
|
||||
*/
|
||||
err = drbr_enqueue(ifp, sq->br, mb);
|
||||
|
||||
/* Process the queue */
|
||||
while ((next = drbr_peek(ifp, sq->br)) != NULL) {
|
||||
if (mlx5e_sq_xmit(sq, &next) != 0) {
|
||||
if (next == NULL) {
|
||||
drbr_advance(ifp, sq->br);
|
||||
} else {
|
||||
drbr_putback(ifp, sq->br, next);
|
||||
atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
|
||||
}
|
||||
break;
|
||||
}
|
||||
drbr_advance(ifp, sq->br);
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
||||
break;
|
||||
}
|
||||
return (err);
|
||||
/* Process the queue */
|
||||
while ((next = drbr_peek(ifp, sq->br)) != NULL) {
|
||||
if (mlx5e_sq_xmit(sq, &next) != 0) {
|
||||
if (next == NULL) {
|
||||
drbr_advance(ifp, sq->br);
|
||||
} else {
|
||||
drbr_putback(ifp, sq->br, next);
|
||||
atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
|
||||
}
|
||||
break;
|
||||
}
|
||||
drbr_advance(ifp, sq->br);
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
||||
break;
|
||||
}
|
||||
return (err);
|
||||
}
|
||||
|
||||
int
|
||||
@ -443,17 +451,16 @@ mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
|
||||
|
||||
sq = mlx5e_select_queue(ifp, mb);
|
||||
if (unlikely(sq == NULL)) {
|
||||
/* invalid send queue */
|
||||
/* Invalid send queue */
|
||||
m_freem(mb);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
if (mtx_trylock(&sq->lock)) {
|
||||
ret = mlx5e_xmit_locked(ifp, sq, mb);
|
||||
mtx_unlock(&sq->lock);
|
||||
} else {
|
||||
ret = drbr_enqueue(ifp, sq->br, mb);
|
||||
taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
|
||||
taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
|
@ -72,7 +72,7 @@ __FBSDID("$FreeBSD$");
|
||||
static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, tlro,
|
||||
CTLFLAG_RW, 0, "TCP turbo LRO parameters");
|
||||
|
||||
static MALLOC_DEFINE(M_TLRO, "TLRO", "Turbo LRO");
|
||||
static MALLOC_DEFINE(M_TLRO, "TLRO", "Turbo LRO");
|
||||
|
||||
static int tlro_min_rate = 20; /* Hz */
|
||||
|
||||
@ -124,7 +124,7 @@ tcp_tlro_info_save_timestamp(struct tlro_mbuf_data *pinfo)
|
||||
(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
|
||||
return (0);
|
||||
|
||||
/* save timestamps */
|
||||
/* Save timestamps */
|
||||
pinfo->tcp_ts = ts_ptr[1];
|
||||
pinfo->tcp_ts_reply = ts_ptr[2];
|
||||
return (1);
|
||||
@ -145,7 +145,7 @@ tcp_tlro_info_restore_timestamp(struct tlro_mbuf_data *pinfoa,
|
||||
(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
|
||||
return;
|
||||
|
||||
/* restore timestamps */
|
||||
/* Restore timestamps */
|
||||
ts_ptr[1] = pinfob->tcp_ts;
|
||||
ts_ptr[2] = pinfob->tcp_ts_reply;
|
||||
}
|
||||
@ -167,7 +167,7 @@ tcp_tlro_extract_header(struct tlro_mbuf_data *pinfo, struct mbuf *m, int seq)
|
||||
int diff;
|
||||
int off;
|
||||
|
||||
/* fill in information */
|
||||
/* Fill in information */
|
||||
pinfo->head = m;
|
||||
pinfo->last_tick = ticks;
|
||||
pinfo->sequence = seq;
|
||||
@ -274,31 +274,31 @@ tcp_tlro_extract_header(struct tlro_mbuf_data *pinfo, struct mbuf *m, int seq)
|
||||
sizeof(tcp->th_dport));
|
||||
phdr += sizeof(tcp->th_sport) +
|
||||
sizeof(tcp->th_dport);
|
||||
/* store TCP header length */
|
||||
/* Store TCP header length */
|
||||
*phdr++ = tcp->th_off;
|
||||
if (tcp->th_off < (sizeof(*tcp) >> 2))
|
||||
goto error;
|
||||
|
||||
/* compute offset to data payload */
|
||||
/* Compute offset to data payload */
|
||||
pinfo->tcp_len = (tcp->th_off << 2);
|
||||
off += pinfo->tcp_len;
|
||||
|
||||
/* store more info */
|
||||
/* Store more info */
|
||||
pinfo->data_off = off;
|
||||
pinfo->tcp = tcp;
|
||||
|
||||
/* try to save timestamp, if any */
|
||||
/* Try to save timestamp, if any */
|
||||
*phdr++ = tcp_tlro_info_save_timestamp(pinfo);
|
||||
|
||||
/* verify offset and IP/TCP length */
|
||||
/* Verify offset and IP/TCP length */
|
||||
if (off > m->m_pkthdr.len ||
|
||||
pinfo->ip_len < pinfo->tcp_len)
|
||||
goto error;
|
||||
|
||||
/* compute data payload length */
|
||||
/* Compute data payload length */
|
||||
pinfo->data_len = (pinfo->ip_len - pinfo->tcp_len - pinfo->ip_hdrlen);
|
||||
|
||||
/* trim any padded data */
|
||||
/* Trim any padded data */
|
||||
diff = (m->m_pkthdr.len - off) - pinfo->data_len;
|
||||
if (diff != 0) {
|
||||
if (diff < 0)
|
||||
@ -306,9 +306,9 @@ tcp_tlro_extract_header(struct tlro_mbuf_data *pinfo, struct mbuf *m, int seq)
|
||||
else
|
||||
m_adj(m, -diff);
|
||||
}
|
||||
/* compute header length */
|
||||
/* Compute header length */
|
||||
pinfo->buf_length = phdr - (uint8_t *)pinfo->buf;
|
||||
/* zero-pad rest of buffer */
|
||||
/* Zero-pad rest of buffer */
|
||||
memset(phdr, 0, TLRO_MAX_HEADER - pinfo->buf_length);
|
||||
return;
|
||||
error:
|
||||
@ -422,47 +422,47 @@ tcp_tlro_combine(struct tlro_ctrl *tlro, int force)
|
||||
break;
|
||||
}
|
||||
if (pinfoa->buf_length == 0) {
|
||||
/* forward traffic which cannot be combined */
|
||||
/* Forward traffic which cannot be combined */
|
||||
for (z = y; z != x; z++) {
|
||||
/* just forward packets */
|
||||
/* Just forward packets */
|
||||
pinfob = tlro->mbuf[z].data;
|
||||
|
||||
m = pinfob->head;
|
||||
|
||||
/* reset info structure */
|
||||
/* Reset info structure */
|
||||
pinfob->head = NULL;
|
||||
pinfob->buf_length = 0;
|
||||
|
||||
/* do stats */
|
||||
/* Do stats */
|
||||
tlro->lro_flushed++;
|
||||
|
||||
/* input packet to network layer */
|
||||
/* Input packet to network layer */
|
||||
(*tlro->ifp->if_input) (tlro->ifp, m);
|
||||
}
|
||||
y = z;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* compute current checksum subtracted some header parts */
|
||||
/* Compute current checksum subtracted some header parts */
|
||||
temp = (pinfoa->ip_len - pinfoa->ip_hdrlen);
|
||||
cs = ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8) +
|
||||
tcp_tlro_csum((uint32_p_t *)pinfoa->tcp, pinfoa->tcp_len);
|
||||
|
||||
/* append all fragments into one block */
|
||||
/* Append all fragments into one block */
|
||||
for (z = y + 1; z != x; z++) {
|
||||
|
||||
pinfob = tlro->mbuf[z].data;
|
||||
|
||||
/* check for command packets */
|
||||
/* Check for command packets */
|
||||
if ((pinfoa->tcp->th_flags & ~(TH_ACK | TH_PUSH)) ||
|
||||
(pinfob->tcp->th_flags & ~(TH_ACK | TH_PUSH)))
|
||||
break;
|
||||
|
||||
/* check if there is enough space */
|
||||
/* Check if there is enough space */
|
||||
if ((pinfoa->ip_len + pinfob->data_len) > tlro_max_packet)
|
||||
break;
|
||||
|
||||
/* try to append the new segment */
|
||||
/* Try to append the new segment */
|
||||
temp = ntohl(pinfoa->tcp->th_seq) + pinfoa->data_len;
|
||||
if (temp != (int)ntohl(pinfob->tcp->th_seq))
|
||||
break;
|
||||
@ -470,7 +470,7 @@ tcp_tlro_combine(struct tlro_ctrl *tlro, int force)
|
||||
temp = pinfob->ip_len - pinfob->ip_hdrlen;
|
||||
cs += ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8) +
|
||||
tcp_tlro_csum((uint32_p_t *)pinfob->tcp, pinfob->tcp_len);
|
||||
/* remove fields which appear twice */
|
||||
/* Remove fields which appear twice */
|
||||
cs += (IPPROTO_TCP << 8);
|
||||
if (pinfob->ip_version == 4) {
|
||||
cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v4->ip_src, 4);
|
||||
@ -479,45 +479,45 @@ tcp_tlro_combine(struct tlro_ctrl *tlro, int force)
|
||||
cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v6->ip6_src, 16);
|
||||
cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v6->ip6_dst, 16);
|
||||
}
|
||||
/* remainder computation */
|
||||
/* Remainder computation */
|
||||
while (cs > 0xffff)
|
||||
cs = (cs >> 16) + (cs & 0xffff);
|
||||
|
||||
/* update window and ack sequence number */
|
||||
/* Update window and ack sequence number */
|
||||
pinfoa->tcp->th_ack = pinfob->tcp->th_ack;
|
||||
pinfoa->tcp->th_win = pinfob->tcp->th_win;
|
||||
|
||||
/* check if we should restore the timestamp */
|
||||
/* Check if we should restore the timestamp */
|
||||
tcp_tlro_info_restore_timestamp(pinfoa, pinfob);
|
||||
|
||||
/* accumulate TCP flags */
|
||||
/* Accumulate TCP flags */
|
||||
pinfoa->tcp->th_flags |= pinfob->tcp->th_flags;
|
||||
|
||||
/* update lengths */
|
||||
pinfoa->ip_len += pinfob->data_len;
|
||||
pinfoa->data_len += pinfob->data_len;
|
||||
|
||||
/* clear mbuf pointer - packet is accumulated */
|
||||
/* Clear mbuf pointer - packet is accumulated */
|
||||
m = pinfob->head;
|
||||
|
||||
/* reset info structure */
|
||||
/* Reset info structure */
|
||||
pinfob->head = NULL;
|
||||
pinfob->buf_length = 0;
|
||||
|
||||
/* append data to mbuf [y] */
|
||||
/* Append data to mbuf [y] */
|
||||
m_adj(m, pinfob->data_off);
|
||||
/* delete mbuf tags, if any */
|
||||
/* Delete mbuf tags, if any */
|
||||
m_tag_delete_chain(m, NULL);
|
||||
/* clear packet header flag */
|
||||
/* Clear packet header flag */
|
||||
m->m_flags &= ~M_PKTHDR;
|
||||
|
||||
/* concat mbuf(s) to end of list */
|
||||
/* Concat mbuf(s) to end of list */
|
||||
pinfoa->pprev[0] = m;
|
||||
m = m_last(m);
|
||||
pinfoa->pprev = &m->m_next;
|
||||
pinfoa->head->m_pkthdr.len += pinfob->data_len;
|
||||
}
|
||||
/* compute new TCP header checksum */
|
||||
/* Compute new TCP header checksum */
|
||||
pinfoa->tcp->th_sum = 0;
|
||||
|
||||
temp = pinfoa->ip_len - pinfoa->ip_hdrlen;
|
||||
@ -525,14 +525,14 @@ tcp_tlro_combine(struct tlro_ctrl *tlro, int force)
|
||||
tcp_tlro_csum((uint32_p_t *)pinfoa->tcp, pinfoa->tcp_len) +
|
||||
((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8);
|
||||
|
||||
/* remainder computation */
|
||||
/* Remainder computation */
|
||||
while (cs > 0xffff)
|
||||
cs = (cs >> 16) + (cs & 0xffff);
|
||||
|
||||
/* update new checksum */
|
||||
/* Update new checksum */
|
||||
pinfoa->tcp->th_sum = ~htole16(cs);
|
||||
|
||||
/* update IP length, if any */
|
||||
/* Update IP length, if any */
|
||||
if (pinfoa->ip_version == 4) {
|
||||
if (pinfoa->ip_len > IP_MAXPACKET) {
|
||||
M_HASHTYPE_SET(pinfoa->head, M_HASHTYPE_LRO_TCP);
|
||||
@ -551,34 +551,34 @@ tcp_tlro_combine(struct tlro_ctrl *tlro, int force)
|
||||
}
|
||||
|
||||
temp = curr_ticks - pinfoa->last_tick;
|
||||
/* check if packet should be forwarded */
|
||||
/* Check if packet should be forwarded */
|
||||
if (force != 0 || z != x || temp >= ticks_limit ||
|
||||
pinfoa->data_len == 0) {
|
||||
|
||||
/* compute new IPv4 header checksum */
|
||||
/* Compute new IPv4 header checksum */
|
||||
if (pinfoa->ip_version == 4) {
|
||||
pinfoa->ip.v4->ip_sum = 0;
|
||||
cs = tcp_tlro_csum((uint32_p_t *)pinfoa->ip.v4,
|
||||
sizeof(*pinfoa->ip.v4));
|
||||
pinfoa->ip.v4->ip_sum = ~htole16(cs);
|
||||
}
|
||||
/* forward packet */
|
||||
/* Forward packet */
|
||||
m = pinfoa->head;
|
||||
|
||||
/* reset info structure */
|
||||
/* Reset info structure */
|
||||
pinfoa->head = NULL;
|
||||
pinfoa->buf_length = 0;
|
||||
|
||||
/* do stats */
|
||||
/* Do stats */
|
||||
tlro->lro_flushed++;
|
||||
|
||||
/* input packet to network layer */
|
||||
/* Input packet to network layer */
|
||||
(*tlro->ifp->if_input) (tlro->ifp, m);
|
||||
}
|
||||
y = z;
|
||||
}
|
||||
|
||||
/* cleanup all NULL heads */
|
||||
/* Cleanup all NULL heads */
|
||||
for (y = 0; y != tlro->curr; y++) {
|
||||
if (tlro->mbuf[y].data->head == NULL) {
|
||||
for (z = y + 1; z != tlro->curr; z++) {
|
||||
@ -622,23 +622,23 @@ tcp_tlro_init(struct tlro_ctrl *tlro, struct ifnet *ifp,
|
||||
ssize_t size;
|
||||
uint32_t x;
|
||||
|
||||
/* set zero defaults */
|
||||
/* Set zero defaults */
|
||||
memset(tlro, 0, sizeof(*tlro));
|
||||
|
||||
/* compute size needed for data */
|
||||
/* Compute size needed for data */
|
||||
size = (sizeof(struct tlro_mbuf_ptr) * max_mbufs) +
|
||||
(sizeof(struct tlro_mbuf_data) * max_mbufs);
|
||||
|
||||
/* range check */
|
||||
/* Range check */
|
||||
if (max_mbufs <= 0 || size <= 0 || ifp == NULL)
|
||||
return (EINVAL);
|
||||
|
||||
/* setup tlro control structure */
|
||||
/* Setup tlro control structure */
|
||||
tlro->mbuf = malloc(size, M_TLRO, M_WAITOK | M_ZERO);
|
||||
tlro->max = max_mbufs;
|
||||
tlro->ifp = ifp;
|
||||
|
||||
/* setup pointer array */
|
||||
/* Setup pointer array */
|
||||
for (x = 0; x != tlro->max; x++) {
|
||||
tlro->mbuf[x].data = ((struct tlro_mbuf_data *)
|
||||
&tlro->mbuf[max_mbufs]) + x;
|
||||
@ -653,24 +653,24 @@ tcp_tlro_free(struct tlro_ctrl *tlro)
|
||||
struct mbuf *m;
|
||||
uint32_t y;
|
||||
|
||||
/* check if not setup */
|
||||
/* Check if not setup */
|
||||
if (tlro->mbuf == NULL)
|
||||
return;
|
||||
/* free MBUF array and any leftover MBUFs */
|
||||
/* Free MBUF array and any leftover MBUFs */
|
||||
for (y = 0; y != tlro->max; y++) {
|
||||
|
||||
pinfo = tlro->mbuf[y].data;
|
||||
|
||||
m = pinfo->head;
|
||||
|
||||
/* reset info structure */
|
||||
/* Reset info structure */
|
||||
pinfo->head = NULL;
|
||||
pinfo->buf_length = 0;
|
||||
|
||||
m_freem(m);
|
||||
}
|
||||
free(tlro->mbuf, M_TLRO);
|
||||
/* reset buffer */
|
||||
/* Reset buffer */
|
||||
memset(tlro, 0, sizeof(*tlro));
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ struct tlro_mbuf_data {
|
||||
#ifdef INET6
|
||||
struct ip6_hdr *v6;
|
||||
#endif
|
||||
} ip;
|
||||
} ip;
|
||||
struct tcphdr *tcp;
|
||||
struct mbuf *head;
|
||||
struct mbuf **pprev;
|
||||
@ -56,7 +56,7 @@ struct tlro_mbuf_data {
|
||||
uint32_t tcp_ts_reply;
|
||||
uint16_t tcp_len;
|
||||
uint8_t ip_version;
|
||||
uint8_t buf_length; /* in 32-bit words */
|
||||
uint8_t buf_length; /* in 32-bit words */
|
||||
uint64_t buf[TLRO_MAX_HEADER / 8];
|
||||
} __aligned(256);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user