1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-18 10:35:55 +00:00

Add support for CQE zipping. CQE zipping reduces PCI overhead by

coalescing and zipping multiple CQEs into a single merged CQE. The
feature is enabled by default and can be disabled by a sysctl.

Implementing this feature mlx5_cqwq_pop() has been separated from
mlx5e_get_cqe().

MFC after:	1 week
Submitted by:	Mark Bloch <markb@mellanox.com>
Differential Revision:	https://reviews.freebsd.org/D4598
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2015-12-28 18:50:18 +00:00
parent ec0143b260
commit 90cc1c7724
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=292838
7 changed files with 131 additions and 3 deletions

View File

@ -1042,6 +1042,7 @@ enum {
MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
};
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
@ -1219,4 +1220,36 @@ struct mlx5_ifc_mcia_reg_bits {
};
#define MLX5_CMD_OP_QUERY_EEPROM 0x93c
struct mlx5_mini_cqe8 {
union {
u32 rx_hash_result;
u32 checksum;
struct {
u16 wqe_counter;
u8 s_wqe_opcode;
u8 reserved;
} s_wqe_info;
};
u32 byte_cnt;
};
enum {
MLX5_NO_INLINE_DATA,
MLX5_INLINE_DATA32_SEG,
MLX5_INLINE_DATA64_SEG,
MLX5_COMPRESSED,
};
enum mlx5_exp_cqe_zip_recv_type {
MLX5_CQE_FORMAT_HASH,
MLX5_CQE_FORMAT_CSUM,
};
#define MLX5E_CQE_FORMAT_MASK 0xc
static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
{
return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
}
#endif /* MLX5_DEVICE_H */

View File

@ -370,6 +370,7 @@ struct mlx5e_params {
u16 tx_cq_moderation_pkts;
u16 min_rx_wqes;
bool hw_lro_en;
bool cqe_zipping_en;
u32 lro_wqe_sz;
u16 rx_hash_log_tbl_sz;
};
@ -390,7 +391,8 @@ struct mlx5e_params {
m(+1, u64 tx_coalesce_usecs, "tx_coalesce_usecs", "Limit in usec for joining tx packets") \
m(+1, u64 tx_coalesce_pkts, "tx_coalesce_pkts", "Maximum number of tx packets to join") \
m(+1, u64 tx_coalesce_mode, "tx_coalesce_mode", "0: EQE mode 1: CQE mode") \
m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro")
m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro") \
m(+1, u64 cqe_zipping, "cqe_zipping", "0 : CQE zipping disabled")
#define MLX5E_PARAMS_NUM (0 MLX5E_PARAMS(MLX5E_STATS_COUNT))

View File

@ -204,6 +204,18 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
priv->params.hw_lro_en = false;
}
if (&priv->params_ethtool.arg[arg2] ==
&priv->params_ethtool.cqe_zipping) {
if (priv->params_ethtool.cqe_zipping &&
MLX5_CAP_GEN(priv->mdev, cqe_compression)) {
priv->params.cqe_zipping_en = true;
priv->params_ethtool.cqe_zipping = 1;
} else {
priv->params.cqe_zipping_en = false;
priv->params_ethtool.cqe_zipping = 0;
}
}
if (was_opened)
mlx5e_open_locked(priv->ifp);
done:
@ -472,6 +484,7 @@ mlx5e_create_ethtool(struct mlx5e_priv *priv)
priv->params_ethtool.tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
priv->params_ethtool.tx_coalesce_pkts = priv->params.tx_cq_moderation_pkts;
priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
priv->params_ethtool.cqe_zipping = priv->params.cqe_zipping_en;
/* create root node */
node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,

View File

@ -1604,6 +1604,16 @@ mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
{
void *cqc = param->cqc;
/*
* TODO The sysctl to control on/off is a bool value for now, which means
* we only support CSUM, once HASH is implemnted we'll need to address that.
*/
if (priv->params.cqe_zipping_en) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_compression_en, 1);
}
MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
@ -2571,6 +2581,8 @@ mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
priv->params.hw_lro_en = false;
priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression);
priv->mdev = mdev;
priv->params.num_channels = num_comp_vectors;
priv->order_base_2_num_channels = order_base_2(num_comp_vectors);

View File

@ -248,6 +248,69 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
}
}
static inline void
mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
{
memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)),
sizeof(struct mlx5_cqe64));
}
static inline void
mlx5e_write_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
{
memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq.sz_m1),
data, sizeof(struct mlx5_cqe64));
}
static inline void
mlx5e_decompress_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *title,
struct mlx5_mini_cqe8 *mini,
u16 wqe_counter, int i)
{
title->byte_cnt = mini->byte_cnt;
title->wqe_counter = cpu_to_be16((wqe_counter + i) & cq->wq.sz_m1);
title->check_sum = mini->checksum;
title->op_own = (title->op_own & 0xf0) |
(((cq->wq.cc + i) >> cq->wq.log_sz) & 1);
}
#define MLX5E_MINI_ARRAY_SZ 8
/* Make sure structs are not packet differently */
CTASSERT(sizeof(struct mlx5_cqe64) ==
sizeof(struct mlx5_mini_cqe8) * MLX5E_MINI_ARRAY_SZ);
static void
mlx5e_decompress_cqes(struct mlx5e_cq *cq)
{
struct mlx5_mini_cqe8 mini_array[MLX5E_MINI_ARRAY_SZ];
struct mlx5_cqe64 title;
u32 cqe_count;
u32 i = 0;
u16 title_wqe_counter;
mlx5e_read_cqe_slot(cq, cq->wq.cc, &title);
title_wqe_counter = be16_to_cpu(title.wqe_counter);
cqe_count = be32_to_cpu(title.byte_cnt);
/* Make sure we won't overflow */
KASSERT(cqe_count <= cq->wq.sz_m1,
("%s: cqe_count %u > cq->wq.sz_m1 %u", __func__,
cqe_count, cq->wq.sz_m1));
mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array);
while (true) {
mlx5e_decompress_cqe(cq, &title,
&mini_array[i % MLX5E_MINI_ARRAY_SZ],
title_wqe_counter, i);
mlx5e_write_cqe_slot(cq, cq->wq.cc + i, &title);
i++;
if (i == cqe_count)
break;
if (i % MLX5E_MINI_ARRAY_SZ == 0)
mlx5e_read_cqe_slot(cq, cq->wq.cc + i, mini_array);
}
}
static int
mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
{
@ -268,6 +331,11 @@ mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
if (!cqe)
break;
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)
mlx5e_decompress_cqes(&rq->cq);
mlx5_cqwq_pop(&rq->cq.wq);
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);

View File

@ -383,6 +383,8 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
if (!cqe)
break;
mlx5_cqwq_pop(&sq->cq.wq);
ci = sqcc & sq->wq.sz_m1;
mb = sq->mbuf[ci].mbuf;
sq->mbuf[ci].mbuf = NULL; /* Safety clear */

View File

@ -37,8 +37,6 @@ mlx5e_get_cqe(struct mlx5e_cq *cq)
if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK)
return (NULL);
mlx5_cqwq_pop(&cq->wq);
/* ensure cqe content is read after cqe ownership bit */
rmb();