1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-11-22 07:20:00 +00:00

mlx5en: add IPSEC_OFFLOAD support

Right now, only IPv4 transport mode, with aes-gcm ESP, is supported.
Driver also cooperates with NAT-T, and obeys socket policies, which
makes IKEd like StrongSwan working.

Sponsored by:	NVIDIA networking
This commit is contained in:
Konstantin Belousov 2024-07-22 14:31:35 +03:00
parent 65f264dcf7
commit e23731db48
53 changed files with 11237 additions and 4351 deletions

View File

@ -4952,6 +4952,8 @@ dev/mlx5/mlx5_core/mlx5_alloc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_cmd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_crypto.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_cq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_diag_cnt.c optional mlx5 pci \
@ -4962,15 +4964,17 @@ dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_eswitch.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fc_cmd.c optional mlx5 pci \
dev/mlx5/mlx5_core/mlx5_fs_chains.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_cmd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_core.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_counters.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_tcp.c optional mlx5 pci \
dev/mlx5/mlx5_core/mlx5_fs_ft_pool.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_tree.c optional mlx5 pci \
dev/mlx5/mlx5_core/mlx5_fs_tcp.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \
compile-with "${OFED_C}"
@ -5012,8 +5016,18 @@ dev/mlx5/mlx5_core/mlx5_vsc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_wq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_lib/mlx5_aso.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_lib/mlx5_gid.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_accel/mlx5_ipsec.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_dim.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"

View File

@ -111,6 +111,12 @@ struct mlx5_cq_modify_params {
} params;
};
enum {
CQE_STRIDE_64 = 0,
CQE_STRIDE_128 = 1,
CQE_STRIDE_128_PAD = 2,
};
static inline int cqe_sz_to_mlx_sz(u8 size)
{
return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;

36
sys/dev/mlx5/crypto.h Normal file
View File

@ -0,0 +1,36 @@
/*-
* Copyright (c) 2023, NVIDIA Technologies. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#ifndef __MLX5_CRYPTO_H__
#define __MLX5_CRYPTO_H__
struct mlx5_core_dev;
int mlx5_encryption_key_create(struct mlx5_core_dev *, u32 pdn, u32 key_type,
const void *p_key, u32 key_len, u32 *p_obj_id);
int mlx5_encryption_key_destroy(struct mlx5_core_dev *mdev, u32 oid);
#endif /* __MLX5_CRYPTO_H__ */

View File

@ -148,12 +148,14 @@ __mlx5_mask16(typ, fld))
tmp; \
})
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
/* insert a value to a struct */
#define MLX5_VSC_SET(typ, p, fld, v) do { \
@ -391,6 +393,8 @@ enum {
MLX5_OPCODE_UMR = 0x25,
MLX5_OPCODE_QOS_REMAP = 0x2a,
MLX5_OPCODE_ACCESS_ASO = 0x2d,
MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15),
};
@ -567,6 +571,11 @@ struct mlx5_eqe_vport_change {
__be32 rsvd1[6];
};
struct mlx5_eqe_obj_change {
u8 rsvd0[2];
__be16 obj_type;
__be32 obj_id;
};
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
@ -638,6 +647,7 @@ union ev_data {
struct mlx5_eqe_dct dct;
struct mlx5_eqe_temp_warning temp_warning;
struct mlx5_eqe_xrq_err xrq_err;
struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
@ -703,7 +713,12 @@ struct mlx5_cqe64 {
u8 l4_hdr_type_etc;
__be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
union {
__be32 immediate;
__be32 inval_rkey;
__be32 pkey;
__be32 ft_metadata;
};
u8 rsvd40[4];
__be32 byte_cnt;
__be64 timestamp;
@ -919,6 +934,7 @@ enum {
MLX5_MATCH_OUTER_HEADERS = 1 << 0,
MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
MLX5_MATCH_INNER_HEADERS = 1 << 2,
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
};
@ -988,12 +1004,19 @@ enum mlx5_cap_type {
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
MLX5_CAP_DEBUG,
MLX5_CAP_NVME,
MLX5_CAP_DMC,
MLX5_CAP_DEC,
MLX5_CAP_RESERVED_14,
MLX5_CAP_DEV_MEM,
MLX5_CAP_RESERVED_16,
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_CRYPTO = 0x1a,
MLX5_CAP_DEV_SHAMPO = 0x1d,
MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@ -1058,6 +1081,9 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
MLX5_GET64(flow_table_nic_cap, (mdev)->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
@ -1067,6 +1093,54 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
@ -1093,14 +1167,46 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap)
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET64(flow_table_eswitch_cap, \
(mdev)->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
MLX5_GET(port_selection_cap, \
mdev->hca_caps_cur[MLX5_CAP_PORT_SELECTION], cap)
#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
MLX5_GET(port_selection_cap, \
mdev->hca_caps_max[MLX5_CAP_PORT_SELECTION], cap)
#define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
MLX5_GET(adv_virtualization_cap, \
mdev->hca_caps_cur[MLX5_CAP_ADV_VIRTUALIZATION], cap)
#define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
MLX5_GET(adv_virtualization_cap, \
mdev->hca_caps_max[MLX5_CAP_ADV_VIRTUALIZATION], cap)
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
@ -1169,6 +1275,9 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
MLX5_ADDR_OF(device_event_cap, (mdev)->hca_caps_cur[MLX5_CAP_DEV_EVENT], cap)
#define MLX5_CAP_IPSEC(mdev, cap) \
MLX5_GET(ipsec_cap, (mdev)->hca_caps_cur[MLX5_CAP_IPSEC], cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,

View File

@ -61,10 +61,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
{
unsigned long flags;
spin_lock_irqsave(doorbell_lock, flags);
if (doorbell_lock)
spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4);
spin_unlock_irqrestore(doorbell_lock, flags);
if (doorbell_lock)
spin_unlock_irqrestore(doorbell_lock, flags);
}
#endif

View File

@ -634,11 +634,13 @@ struct mlx5_priv {
#endif
struct mlx5_pme_stats pme_stats;
struct mlx5_flow_steering *steering;
struct mlx5_eswitch *eswitch;
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
struct mlx5_fc_stats fc_stats;
struct mlx5_ft_pool *ft_pool;
};
enum mlx5_device_state {
@ -728,6 +730,10 @@ struct mlx5_core_dev {
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *nic_tx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER];
struct mlx5_crspace_regmap *dump_rege;
uint32_t *dump_data;
@ -756,6 +762,7 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
struct xarray ipsec_sadb;
};
enum {

View File

@ -33,8 +33,33 @@
#include <dev/mlx5/device.h>
#include <dev/mlx5/driver.h>
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_NONE,
MLX5_FLOW_DESTINATION_TYPE_VPORT,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
MLX5_FLOW_DESTINATION_TYPE_TIR,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
MLX5_FLOW_DESTINATION_TYPE_UPLINK,
MLX5_FLOW_DESTINATION_TYPE_PORT,
MLX5_FLOW_DESTINATION_TYPE_COUNTER,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
MLX5_FLOW_DESTINATION_TYPE_RANGE,
MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
};
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19,
};
enum {
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
};
/*Flow tag*/
@ -61,54 +86,149 @@ enum {
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_ANCHOR,
MLX5_FLOW_NAMESPACE_FDB_BYPASS,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
MLX5_FLOW_NAMESPACE_EGRESS_IPSEC,
MLX5_FLOW_NAMESPACE_EGRESS_MACSEC,
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
MLX5_FLOW_NAMESPACE_PORT_SEL,
MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
};
enum {
FDB_BYPASS_PATH,
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
FDB_TC_MISS,
FDB_BR_OFFLOAD,
FDB_SLOW_PATH,
FDB_PER_VPORT,
};
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace;
struct mlx5_flow_handle;
enum {
FLOW_CONTEXT_HAS_TAG = BIT(0),
};
struct mlx5_flow_context {
u32 flags;
u32 flow_tag;
u32 flow_source;
};
struct mlx5_flow_spec {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
struct mlx5_flow_context flow_context;
};
enum {
MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
};
enum mlx5_flow_dest_range_field {
MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
};
struct mlx5_flow_destination {
u32 type;
enum mlx5_flow_destination_type type;
union {
u32 tir_num;
struct mlx5_flow_table *ft;
u32 vport_num;
u32 tir_num;
u32 ft_num;
struct mlx5_flow_table *ft;
u32 counter_id;
struct {
u16 num;
u16 vhca_id;
struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
struct {
struct mlx5_flow_table *hit_ft;
struct mlx5_flow_table *miss_ft;
enum mlx5_flow_dest_range_field field;
u32 min;
u32 max;
} range;
u32 sampler_id;
};
};
enum mlx5_flow_act_actions {
MLX5_FLOW_ACT_ACTIONS_FLOW_TAG = 1 << 0,
MLX5_FLOW_ACT_ACTIONS_MODIFY_HDR = 1 << 1,
MLX5_FLOW_ACT_ACTIONS_PACKET_REFORMAT = 1 << 2,
MLX5_FLOW_ACT_ACTIONS_COUNT = 1 << 3,
struct mlx5_exe_aso {
u32 object_id;
u8 type;
u8 return_reg_id;
union {
u32 ctrl_data;
struct {
u8 meter_idx;
u8 init_color;
} flow_meter;
};
};
enum MLX5_FLOW_ACT_FLAGS {
MLX5_FLOW_ACT_NO_APPEND = 1 << 0,
enum {
FLOW_ACT_NO_APPEND = BIT(0),
FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1),
};
struct mlx5_fs_vlan {
u16 ethtype;
u16 vid;
u8 prio;
};
#define MLX5_FS_VLAN_DEPTH 2
enum mlx5_flow_act_crypto_type {
MLX5_FLOW_ACT_CRYPTO_TYPE_IPSEC,
};
enum mlx5_flow_act_crypto_op {
MLX5_FLOW_ACT_CRYPTO_OP_ENCRYPT,
MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT,
};
struct mlx5_flow_act_crypto_params {
u32 obj_id;
u8 type; /* see enum mlx5_flow_act_crypto_type */
u8 op; /* see enum mlx5_flow_act_crypto_op */
};
struct mlx5_flow_act {
u32 actions; /* See enum mlx5_flow_act_actions */
u32 flags;
u32 flow_tag;
struct mlx5_modify_hdr *modify_hdr;
u32 action;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *counter;
struct mlx5_flow_act_crypto_params crypto;
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
struct mlx5_flow_group *fg;
struct mlx5_exe_aso exe_aso;
};
#define FT_NAME_STR_SZ 20
@ -135,6 +255,28 @@ static inline bool outer_header_zero(u32 *match_criteria)
size - 1);
}
struct mlx5_flow_namespace *
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type,
int vport);
struct mlx5_flow_table_attr {
int prio;
int max_fte;
u32 level;
u32 flags;
u16 uid;
struct mlx5_flow_table *next_ft;
struct {
int max_num_groups;
int num_reserved_entries;
} autogroup;
};
struct mlx5_flow_namespace *
mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
@ -145,24 +287,19 @@ mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
*/
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
const char *name,
int num_flow_table_entries,
int max_num_groups,
int num_reserved_entries);
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
u16 vport,
int prio,
const char *name,
int num_flow_table_entries);
struct mlx5_flow_table_attr *ft_attr, u16 vport);
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
struct mlx5_flow_namespace *ns,
int prio, u32 level);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
const char *name,
int num_flow_table_entries);
struct mlx5_flow_table_attr *ft_attr);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
@ -175,18 +312,17 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u32 sw_action,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest);
void mlx5_del_flow_rule(struct mlx5_flow_rule **);
struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft,
const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int num_dest);
void mlx5_del_flow_rules(struct mlx5_flow_handle **pp);
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *new_dest,
struct mlx5_flow_destination *old_dest);
/*The following API is for sniffer*/
typedef int (*rule_event_fn)(struct mlx5_flow_rule *rule,
@ -292,4 +428,8 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes);
u32 mlx5_fc_id(struct mlx5_fc *counter);
/******* End of Flow counters API ******/
u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
#endif

View File

@ -1,137 +1,277 @@
/*-
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2023 NVIDIA corporation & affiliates.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __MLX5_ACCEL_IPSEC_H__
#define __MLX5_ACCEL_IPSEC_H__
#ifdef CONFIG_MLX5_ACCEL
#include <sys/mbuf.h>
#include <dev/mlx5/driver.h>
#include <dev/mlx5/qp.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#include <dev/mlx5/mlx5_en/en.h>
#include <dev/mlx5/mlx5_lib/aso.h>
enum {
MLX5_ACCEL_IPSEC_DEVICE = BIT(1),
MLX5_ACCEL_IPSEC_IPV6 = BIT(2),
MLX5_ACCEL_IPSEC_ESP = BIT(3),
MLX5_ACCEL_IPSEC_LSO = BIT(4),
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5_IPSEC_METADATA_MARKER(ipsec_metadata) ((ipsec_metadata >> 31) & 0x1)
struct mlx5e_priv;
struct mlx5e_tx_wqe;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec_rx;
struct aes_gcm_keymat {
u64 seq_iv;
u32 salt;
u32 icv_len;
u32 key_len;
u32 aes_key[256 / 32];
};
#define MLX5_IPSEC_SADB_IP_AH BIT(7)
#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
#define MLX5_IPSEC_SADB_IPV6 BIT(2)
enum {
MLX5_IPSEC_CMD_ADD_SA = 0,
MLX5_IPSEC_CMD_DEL_SA = 1,
struct mlx5e_ipsec_priv_bothdir {
struct mlx5e_ipsec_sa_entry *priv_in;
struct mlx5e_ipsec_sa_entry *priv_out;
};
enum mlx5_accel_ipsec_enc_mode {
MLX5_IPSEC_SADB_MODE_NONE = 0,
MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
struct mlx5e_ipsec_work {
struct work_struct work;
struct mlx5e_ipsec_sa_entry *sa_entry;
void *data;
};
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_DEVICE)
struct mlx5e_ipsec_dwork {
struct delayed_work dwork;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec_priv_bothdir *pb;
};
struct mlx5e_ipsec_aso {
u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr;
struct mlx5_aso *aso;
/* Protect ASO WQ access, as it is global to whole IPsec */
spinlock_t lock;
};
struct mlx5_replay_esn {
u32 replay_window;
u32 esn;
u32 esn_msb;
u8 overlap : 1;
u8 trigger : 1;
};
struct mlx5_accel_esp_xfrm_attrs {
u32 spi;
struct aes_gcm_keymat aes_gcm;
struct mlx5_accel_ipsec_sa {
__be32 cmd;
u8 key_enc[32];
u8 key_auth[32];
__be32 sip[4];
__be32 dip[4];
union {
struct {
__be32 reserved;
u8 salt_iv[8];
__be32 salt;
} __packed gcm;
struct {
u8 salt[16];
} __packed cbc;
};
__be32 spi;
__be32 sw_sa_handle;
__be16 tfclen;
u8 enc_mode;
u8 sip_masklen;
u8 dip_masklen;
u8 flags;
u8 reserved[2];
} __packed;
__be32 a4;
__be32 a6[4];
} saddr;
/**
* mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
* @mdev: mlx5 device
* @cmd: command to execute
* May be called from atomic context. Returns context pointer, or error
* Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
* context, to cleanup the context pointer
*/
void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd);
union {
__be32 a4;
__be32 a6[4];
} daddr;
/**
* mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
* @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
* Sleeps (killable) until command execution is complete.
* Returns the command result, or -EINTR if killed
*/
int mlx5_accel_ipsec_sa_cmd_wait(void *context);
u8 dir : 2;
u8 encap : 1;
u8 drop : 1;
u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
u16 sport;
u16 dport;
};
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
MLX5_IPSEC_CAP_ESN = 1 << 1,
MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
MLX5_IPSEC_CAP_ROCE = 1 << 3,
MLX5_IPSEC_CAP_PRIO = 1 << 4,
MLX5_IPSEC_CAP_TUNNEL = 1 << 5,
MLX5_IPSEC_CAP_ESPINUDP = 1 << 6,
};
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count);
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
struct workqueue_struct *wq;
struct mlx5e_ipsec_tx *tx;
struct mlx5e_ipsec_rx *rx_ipv4;
struct mlx5e_ipsec_rx *rx_ipv6;
struct mlx5e_ipsec_aso *aso;
u32 pdn;
u32 mkey;
};
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_flow_handle *kspi_rule;
struct mlx5_flow_handle *reqid_rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
};
#else
struct mlx5e_ipsec_esn_state {
u32 esn;
u32 esn_msb;
u8 overlap: 1;
};
#define MLX5_IPSEC_DEV(mdev) false
struct mlx5e_ipsec_sa_entry {
struct secasvar *savp;
if_t ifp;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_ipsec_rule ipsec_rule;
struct mlx5e_ipsec_dwork *dwork;
struct mlx5e_ipsec_work *work;
u32 ipsec_obj_id;
u32 enc_key_id;
u16 kspi; /* Stack allocated unique SA identifier */
struct mlx5e_ipsec_esn_state esn_state;
};
static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
struct upspec {
u16 dport;
u16 sport;
u8 proto;
};
struct mlx5_accel_pol_xfrm_attrs {
union {
__be32 a4;
__be32 a6[4];
} saddr;
union {
__be32 a4;
__be32 a6[4];
} daddr;
struct upspec upspec;
u8 family;
u8 action;
u8 dir : 2;
u32 reqid;
u32 prio;
};
struct mlx5e_ipsec_pol_entry {
struct secpolicy *sp;
struct mlx5e_ipsec *ipsec;
struct mlx5e_ipsec_rule ipsec_rule;
struct mlx5_accel_pol_xfrm_attrs attrs;
};
/* This function doesn't really belong here, but let's put it here for now */
void mlx5_object_change_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
return 0;
return sa_entry->ipsec->mdev;
}
static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
static inline struct mlx5_core_dev *
mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
{
return pol_entry->ipsec->mdev;
}
#endif
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u8 dir);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
struct ipsec_accel_out_tag;
void mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
struct ipsec_accel_out_tag *tag);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
static inline int mlx5e_accel_ipsec_get_metadata(unsigned int id)
{
return MLX5_ETH_WQE_FT_META_IPSEC << 23 | id;
}
static inline void
mlx5e_accel_ipsec_handle_tx(struct mbuf *mb, struct mlx5e_tx_wqe *wqe)
{
struct ipsec_accel_out_tag *tag;
tag = (struct ipsec_accel_out_tag *)m_tag_find(mb,
PACKET_TAG_IPSEC_ACCEL_OUT, NULL);
if (tag != NULL)
mlx5e_accel_ipsec_handle_tx_wqe(mb, wqe, tag);
}
void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv);
void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv);
int mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mbuf *mb);
int mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe);
static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe)
{
return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
static inline void mlx5e_accel_ipsec_handle_rx(struct mbuf *mb, struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
if (!MLX5_IPSEC_METADATA_MARKER(ipsec_meta_data)) {
struct m_tag *mtag;
mtag = m_tag_find(mb, PACKET_TAG_IPSEC_ACCEL_IN, NULL);
if (mtag != NULL)
m_tag_delete(mb, mtag);
return;
}
mlx5e_accel_ipsec_handle_rx_cqe(mb, cqe);
}
#endif /* __MLX5_ACCEL_IPSEC_H__ */

View File

@ -0,0 +1,747 @@
/*-
* Copyright (c) 2023 NVIDIA corporation & affiliates.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "opt_ipsec.h"
#include <sys/types.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/pfkeyv2.h>
#include <netipsec/key_var.h>
#include <netipsec/keydb.h>
#include <netipsec/ipsec.h>
#include <netipsec/xform.h>
#include <netipsec/ipsec_offload.h>
#include <dev/mlx5/fs.h>
#include <dev/mlx5/mlx5_en/en.h>
#include <dev/mlx5/mlx5_accel/ipsec.h>
#define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
static int mlx5e_if_sa_deinstall(struct ifnet *ifp, u_int dev_spi, void *priv);
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(void *x)
{
return (struct mlx5e_ipsec_sa_entry *)x;
}
static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(void *x)
{
return (struct mlx5e_ipsec_pol_entry *)x;
}
static void
mlx5e_ipsec_handle_counters_onedir(struct mlx5e_ipsec_sa_entry *sa_entry,
u64 *packets, u64 *bytes)
{
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_fc_query(mdev, ipsec_rule->fc, packets, bytes);
}
static struct mlx5e_ipsec_sa_entry *
mlx5e_ipsec_other_sa_entry(struct mlx5e_ipsec_priv_bothdir *pb,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
return (pb->priv_in == sa_entry ? pb->priv_out : pb->priv_in);
}
static void
mlx5e_ipsec_handle_counters(struct work_struct *_work)
{
struct mlx5e_ipsec_dwork *dwork =
container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
struct mlx5e_ipsec_sa_entry *other_sa_entry;
u64 bytes, bytes1, packets1, packets;
if (sa_entry->attrs.drop)
return;
other_sa_entry = mlx5e_ipsec_other_sa_entry(dwork->pb, sa_entry);
if (other_sa_entry == NULL || other_sa_entry->attrs.drop)
return;
mlx5e_ipsec_handle_counters_onedir(sa_entry, &packets, &bytes);
mlx5e_ipsec_handle_counters_onedir(other_sa_entry, &packets1, &bytes1);
packets += packets1;
bytes += bytes1;
#ifdef IPSEC_OFFLOAD
ipsec_accel_drv_sa_lifetime_update(sa_entry->savp, sa_entry->ifp,
sa_entry->kspi, bytes, packets);
#endif
queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
MLX5_IPSEC_RESCHED);
}
static int
mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5e_ipsec_priv_bothdir *pb)
{
struct mlx5e_ipsec_dwork *dwork;
dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
if (!dwork)
return (ENOMEM);
dwork->sa_entry = sa_entry;
dwork->pb = pb;
INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_counters);
sa_entry->dwork = dwork;
return 0;
}
static int mlx5_xform_ah_authsize(const struct auth_hash *esph)
{
int alen;
if (esph == NULL)
return 0;
switch (esph->type) {
case CRYPTO_SHA2_256_HMAC:
case CRYPTO_SHA2_384_HMAC:
case CRYPTO_SHA2_512_HMAC:
alen = esph->hashsize / 2; /* RFC4868 2.3 */
break;
case CRYPTO_POLY1305:
case CRYPTO_AES_NIST_GMAC:
alen = esph->hashsize;
break;
default:
alen = AH_HMAC_HASHLEN;
break;
}
return alen;
}
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u8 dir)
{
struct secasvar *savp = sa_entry->savp;
const struct auth_hash *esph = savp->tdb_authalgxform;
struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
struct secasindex *saidx = &savp->sah->saidx;
struct seckey *key_encap = savp->key_enc;
int key_len;
memset(attrs, 0, sizeof(*attrs));
/* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */
key_len = _KEYLEN(key_encap) - SAV_ISCTRORGCM(savp) * 4 - SAV_ISCHACHA(savp) * 4;
memcpy(aes_gcm->aes_key, key_encap->key_data, key_len);
aes_gcm->key_len = key_len;
/* salt and seq_iv */
aes_gcm->seq_iv = 0;
memcpy(&aes_gcm->salt, key_encap->key_data + key_len,
sizeof(aes_gcm->salt));
switch (savp->alg_enc) {
case SADB_X_EALG_AESGCM8:
attrs->authsize = 8 / 4; /* in dwords */
break;
case SADB_X_EALG_AESGCM12:
attrs->authsize = 12 / 4; /* in dwords */
break;
case SADB_X_EALG_AESGCM16:
attrs->authsize = 16 / 4; /* in dwords */
break;
default: break;
}
/* iv len */
aes_gcm->icv_len = mlx5_xform_ah_authsize(esph); //TBD: check if value make sense
attrs->dir = dir;
/* spi - host order */
attrs->spi = ntohl(savp->spi);
attrs->family = saidx->dst.sa.sa_family;
attrs->reqid = saidx->reqid;
if (saidx->src.sa.sa_family == AF_INET) {
attrs->saddr.a4 = saidx->src.sin.sin_addr.s_addr;
attrs->daddr.a4 = saidx->dst.sin.sin_addr.s_addr;
} else {
memcpy(&attrs->saddr.a6, &saidx->src.sin6.sin6_addr, 16);
memcpy(&attrs->daddr.a6, &saidx->dst.sin6.sin6_addr, 16);
}
if (savp->natt) {
attrs->encap = true;
attrs->sport = savp->natt->sport;
attrs->dport = savp->natt->dport;
}
if (savp->flags & SADB_X_SAFLAGS_ESN) {
/* We support replay window with ESN only */
attrs->replay_esn.trigger = true;
if (sa_entry->esn_state.esn_msb)
attrs->replay_esn.esn = sa_entry->esn_state.esn;
else
/* According to RFC4303, section "3.3.3. Sequence Number Generation",
* the first packet sent using a given SA will contain a sequence
* number of 1.
*/
attrs->replay_esn.esn = max_t(u32, sa_entry->esn_state.esn, 1);
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
if (savp->replay) {
switch (savp->replay->wsize) {
case 4:
attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
break;
case 8:
attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
break;
case 16:
attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
break;
case 32:
attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
break;
default:
/* Do nothing */
break;
}
}
}
}
static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
struct secasvar *savp)
{
struct secasindex *saidx = &savp->sah->saidx;
struct seckey *key_encp = savp->key_enc;
int keylen;
if (!(mlx5_ipsec_device_caps(mdev) &
MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
mlx5_core_err(mdev, "FULL offload is not supported\n");
return (EINVAL);
}
if (savp->alg_enc == SADB_EALG_NONE) {
mlx5_core_err(mdev, "Cannot offload authenticated xfrm states\n");
return (EINVAL);
}
if (savp->alg_enc != SADB_X_EALG_AESGCM16) {
mlx5_core_err(mdev, "Only IPSec aes-gcm-16 encryption protocol may be offloaded\n");
return (EINVAL);
}
if (savp->tdb_compalgxform) {
mlx5_core_err(mdev, "Cannot offload compressed xfrm states\n");
return (EINVAL);
}
if (savp->alg_auth != SADB_X_AALG_AES128GMAC && savp->alg_auth != SADB_X_AALG_AES256GMAC) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bits\n");
return (EINVAL);
}
if ((saidx->dst.sa.sa_family != AF_INET && saidx->dst.sa.sa_family != AF_INET6) ||
(saidx->src.sa.sa_family != AF_INET && saidx->src.sa.sa_family != AF_INET6)) {
mlx5_core_err(mdev, "Only IPv4/6 xfrm states may be offloaded\n");
return (EINVAL);
}
if (saidx->proto != IPPROTO_ESP) {
mlx5_core_err(mdev, "Only ESP xfrm state may be offloaded\n");
return (EINVAL);
}
/* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */
keylen = _KEYLEN(key_encp) - SAV_ISCTRORGCM(savp) * 4 - SAV_ISCHACHA(savp) * 4;
if (keylen != 128/8 && keylen != 256 / 8) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return (EINVAL);
}
if (saidx->mode != IPSEC_MODE_TRANSPORT) {
mlx5_core_err(mdev, "Only transport xfrm states may be offloaded in full offlaod mode\n");
return (EINVAL);
}
if (savp->natt) {
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
mlx5_core_err(mdev, "Encapsulation is not supported\n");
return (EINVAL);
}
}
if (savp->replay && savp->replay->wsize != 0 && savp->replay->wsize != 4 &&
savp->replay->wsize != 8 && savp->replay->wsize != 16 && savp->replay->wsize != 32) {
mlx5_core_err(mdev, "Unsupported replay window size %d\n", savp->replay->wsize);
return (EINVAL);
}
if ((savp->flags & SADB_X_SAFLAGS_ESN) != 0) {
if ((mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN) == 0) {
mlx5_core_err(mdev, "ESN is not supported\n");
return (EINVAL);
}
} else if (savp->replay != NULL && savp->replay->wsize != 0) {
mlx5_core_warn(mdev,
"non-ESN but replay-protect SA offload is not supported\n");
return (EINVAL);
}
return 0;
}
static int
mlx5e_if_sa_newkey_onedir(struct ifnet *ifp, void *sav, int dir,
u_int drv_spi, struct mlx5e_ipsec_sa_entry **privp,
struct mlx5e_ipsec_priv_bothdir *pb)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct mlx5e_priv *priv = if_getsoftc(ifp);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_ipsec *ipsec = priv->ipsec;
int err;
if (priv->gone != 0 || ipsec == NULL)
return (EOPNOTSUPP);
err = mlx5e_xfrm_validate_state(mdev, sav);
if (err)
return err;
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (sa_entry == NULL)
return (ENOMEM);
sa_entry->kspi = drv_spi;
sa_entry->savp = sav;
sa_entry->ifp = ifp;
sa_entry->ipsec = ipsec;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs, dir);
err = mlx5e_ipsec_create_dwork(sa_entry, pb);
if (err)
goto err_xfrm;
/* create hw context */
err = mlx5_ipsec_create_sa_ctx(sa_entry);
if (err)
goto err_sa_ctx;
err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err)
goto err_fs;
*privp = sa_entry;
if (sa_entry->dwork)
queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork, MLX5_IPSEC_RESCHED);
err = xa_insert(&mdev->ipsec_sadb, sa_entry->ipsec_obj_id, sa_entry, GFP_KERNEL);
if (err)
goto err_xa;
return 0;
err_xa:
if (sa_entry->dwork)
cancel_delayed_work_sync(&sa_entry->dwork->dwork);
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_fs:
mlx5_ipsec_free_sa_ctx(sa_entry);
err_sa_ctx:
kfree(sa_entry->dwork);
err_xfrm:
kfree(sa_entry);
mlx5_en_err(ifp, "Device failed to offload this state");
return err;
}
static int
mlx5e_if_sa_newkey(struct ifnet *ifp, void *sav, u_int dev_spi, void **privp)
{
struct mlx5e_ipsec_priv_bothdir *pb;
int error;
pb = malloc(sizeof(struct mlx5e_ipsec_priv_bothdir), M_DEVBUF,
M_WAITOK | M_ZERO);
error = mlx5e_if_sa_newkey_onedir(ifp, sav, IPSEC_DIR_INBOUND,
dev_spi, &pb->priv_in, pb);
if (error != 0) {
free(pb, M_DEVBUF);
return (error);
}
error = mlx5e_if_sa_newkey_onedir(ifp, sav, IPSEC_DIR_OUTBOUND,
dev_spi, &pb->priv_out, pb);
if (error == 0) {
*privp = pb;
} else {
mlx5e_if_sa_deinstall(ifp, dev_spi, pb->priv_in);
free(pb, M_DEVBUF);
}
return (error);
}
static void
mlx5e_if_sa_deinstall_onekey(struct ifnet *ifp, u_int dev_spi, void *priv)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(priv);
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
struct mlx5e_ipsec_sa_entry *old;
old = xa_erase(&mdev->ipsec_sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry->dwork);
kfree(sa_entry);
}
static int
mlx5e_if_sa_deinstall(struct ifnet *ifp, u_int dev_spi, void *priv)
{
struct mlx5e_ipsec_priv_bothdir pb, *pbp;
pbp = priv;
pb = *(struct mlx5e_ipsec_priv_bothdir *)priv;
pbp->priv_in = pbp->priv_out = NULL;
if (pb.priv_in->dwork != NULL)
cancel_delayed_work_sync(&pb.priv_in->dwork->dwork);
if (pb.priv_out->dwork != NULL)
cancel_delayed_work_sync(&pb.priv_out->dwork->dwork);
mlx5e_if_sa_deinstall_onekey(ifp, dev_spi, pb.priv_in);
mlx5e_if_sa_deinstall_onekey(ifp, dev_spi, pb.priv_out);
free(pbp, M_DEVBUF);
return (0);
}
static void
mlx5e_if_sa_cnt_one(struct ifnet *ifp, void *sa, uint32_t drv_spi,
void *priv, u64 *bytes, u64 *packets)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(priv);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_fc_query(mdev, ipsec_rule->fc, packets, bytes);
}
static int
mlx5e_if_sa_cnt(struct ifnet *ifp, void *sa, uint32_t drv_spi,
void *priv, struct seclifetime *lt)
{
struct mlx5e_ipsec_priv_bothdir *pb;
u64 packets_in, packets_out;
u64 bytes_in, bytes_out;
pb = priv;
mlx5e_if_sa_cnt_one(ifp, sa, drv_spi, pb->priv_in,
&bytes_in, &packets_in);
mlx5e_if_sa_cnt_one(ifp, sa, drv_spi, pb->priv_out,
&bytes_out, &packets_out);
/* TODO: remove this casting once Kostia changes allocation type to be u64 */
lt->bytes = bytes_in + bytes_out;
lt->allocations = (uint32_t)(packets_in + packets_out);
return (0);
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
struct secpolicy *sp, struct inpcb *inp)
{
struct secpolicyindex *spidx = &sp->spidx;
if (!(mlx5_ipsec_device_caps(mdev) &
MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
mlx5_core_err(mdev, "FULL offload is not supported\n");
return (EINVAL);
}
if (sp->tcount > 1) {
mlx5_core_err(mdev, "Can offload exactly one template, "
"not %d\n", sp->tcount);
return (EINVAL);
}
if (sp->policy == IPSEC_POLICY_BYPASS &&
!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
mlx5_core_err(mdev, "Device does not support policy priority\n");
return (EINVAL);
}
if (sp->tcount > 0 && inp != NULL) {
mlx5_core_err(mdev, "Not valid input data\n");
return (EINVAL);
}
if (spidx->dir != IPSEC_DIR_INBOUND && spidx->dir != IPSEC_DIR_OUTBOUND) {
mlx5_core_err(mdev, "Wrong policy direction\n");
return (EINVAL);
}
if (sp->tcount > 0 && sp->req[0]->saidx.mode != IPSEC_MODE_TRANSPORT) {
mlx5_core_err(mdev, "Device supports transport mode only");
return (EINVAL);
}
if (sp->policy != IPSEC_POLICY_DISCARD &&
sp->policy != IPSEC_POLICY_IPSEC && sp->policy != IPSEC_POLICY_BYPASS) {
mlx5_core_err(mdev, "Offloaded policy must be specific on its action\n");
return (EINVAL);
}
if (sp->policy == IPSEC_POLICY_BYPASS && !inp) {
mlx5_core_err(mdev, "Missing port information for IKE bypass\n");
return (EINVAL);
}
if (inp != NULL) {
INP_RLOCK(inp);
if (inp->inp_socket == NULL || inp->inp_socket->so_proto->
pr_protocol != IPPROTO_UDP) {
mlx5_core_err(mdev, "Unsupported IKE bypass protocol %d\n",
inp->inp_socket == NULL ? -1 :
inp->inp_socket->so_proto->pr_protocol);
INP_RUNLOCK(inp);
return (EINVAL);
}
INP_RUNLOCK(inp);
}
/* TODO fill relevant bits */
return 0;
}
static void mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
struct mlx5_accel_pol_xfrm_attrs *attrs,
struct inpcb *inp)
{
struct secpolicy *sp = pol_entry->sp;
struct secpolicyindex *spidx = &sp->spidx;
memset(attrs, 0, sizeof(*attrs));
if (!inp) {
if (spidx->src.sa.sa_family == AF_INET) {
attrs->saddr.a4 = spidx->src.sin.sin_addr.s_addr;
attrs->daddr.a4 = spidx->dst.sin.sin_addr.s_addr;
} else if (spidx->src.sa.sa_family == AF_INET6) {
memcpy(&attrs->saddr.a6, &spidx->src.sin6.sin6_addr, 16);
memcpy(&attrs->daddr.a6, &spidx->dst.sin6.sin6_addr, 16);
} else {
KASSERT(0, ("unsupported family %d", spidx->src.sa.sa_family));
}
attrs->family = spidx->src.sa.sa_family;
attrs->prio = 0;
attrs->action = sp->policy;
attrs->reqid = sp->req[0]->saidx.reqid;
} else {
INP_RLOCK(inp);
if ((inp->inp_vflag & INP_IPV4) != 0) {
attrs->saddr.a4 = inp->inp_laddr.s_addr;
attrs->daddr.a4 = inp->inp_faddr.s_addr;
attrs->family = AF_INET;
} else if ((inp->inp_vflag & INP_IPV6) != 0) {
memcpy(&attrs->saddr.a6, &inp->in6p_laddr, 16);
memcpy(&attrs->daddr.a6, &inp->in6p_laddr, 16);
attrs->family = AF_INET6;
} else {
KASSERT(0, ("unsupported family %d", inp->inp_vflag));
}
attrs->upspec.dport = inp->inp_fport;
attrs->upspec.sport = inp->inp_lport;
attrs->upspec.proto = inp->inp_ip_p;
INP_RUNLOCK(inp);
/* Give highest priority for PCB policies */
attrs->prio = 1;
attrs->action = IPSEC_POLICY_IPSEC;
}
attrs->dir = spidx->dir;
}
static int mlx5e_if_spd_install(struct ifnet *ifp, void *sp, void *inp1,
void **ifdatap)
{
struct mlx5e_ipsec_pol_entry *pol_entry;
struct mlx5e_priv *priv;
int err;
priv = if_getsoftc(ifp);
if (priv->gone || !priv->ipsec)
return (EOPNOTSUPP);
err = mlx5e_xfrm_validate_policy(priv->mdev, sp, inp1);
if (err)
return err;
pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
if (!pol_entry)
return (ENOMEM);
pol_entry->sp = sp;
pol_entry->ipsec = priv->ipsec;
mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs, inp1);
err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
if (err)
goto err_pol;
*ifdatap = pol_entry;
return 0;
err_pol:
kfree(pol_entry);
mlx5_en_err(ifp, "Device failed to offload this policy");
return err;
}
static int mlx5e_if_spd_deinstall(struct ifnet *ifp, void *sp, void *ifdata)
{
struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(ifdata);
mlx5e_accel_ipsec_fs_del_pol(pol_entry);
kfree(pol_entry);
return 0;
}
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *pipsec = priv->ipsec;
if (!pipsec)
return;
mlx5e_accel_ipsec_fs_cleanup(pipsec);
destroy_workqueue(pipsec->wq);
mlx5e_ipsec_aso_cleanup(pipsec);
kfree(pipsec);
priv->ipsec = NULL;
}
static int
mlx5e_if_ipsec_hwassist(if_t ifnet, void *sav __unused,
uint32_t drv_spi __unused, void *priv __unused)
{
return (if_gethwassist(ifnet) & (CSUM_TSO | CSUM_TCP | CSUM_UDP |
CSUM_IP | CSUM_IP6_TSO | CSUM_IP6_TCP | CSUM_IP6_UDP));
}
static const struct if_ipsec_accel_methods mlx5e_ipsec_funcs = {
.if_sa_newkey = mlx5e_if_sa_newkey,
.if_sa_deinstall = mlx5e_if_sa_deinstall,
.if_spdadd = mlx5e_if_spd_install,
.if_spddel = mlx5e_if_spd_deinstall,
.if_sa_cnt = mlx5e_if_sa_cnt,
.if_hwassist = mlx5e_if_ipsec_hwassist,
};
int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_ipsec *pipsec;
if_t ifp = priv->ifp;
int ret;
mlx5_core_info(mdev, "ipsec "
"offload %d log_max_dek %d gen_obj_types %d "
"ipsec_encrypt %d ipsec_decrypt %d "
"esp_aes_gcm_128_encrypt %d esp_aes_gcm_128_decrypt %d "
"ipsec_full_offload %d "
"reformat_add_esp_trasport %d reformat_del_esp_trasport %d "
"decap %d "
"ignore_flow_level_tx %d ignore_flow_level_rx %d "
"reformat_natt_tx %d reformat_natt_rx %d "
"ipsec_esn %d\n",
MLX5_CAP_GEN(mdev, ipsec_offload) != 0,
MLX5_CAP_GEN(mdev, log_max_dek) != 0,
(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC) != 0,
MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) != 0,
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt) != 0,
MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) != 0,
MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt) != 0,
MLX5_CAP_IPSEC(mdev, ipsec_full_offload) != 0,
MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) != 0,
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) != 0,
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap) != 0,
MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) != 0,
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level) != 0,
MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_transport_over_udp) != 0,
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
reformat_del_esp_transport_over_udp) != 0,
MLX5_CAP_IPSEC(mdev, ipsec_esn) != 0);
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
mlx5_core_dbg(mdev, "Not an IPSec offload device\n");
return 0;
}
xa_init_flags(&mdev->ipsec_sadb, XA_FLAGS_ALLOC);
pipsec = kzalloc(sizeof(*pipsec), GFP_KERNEL);
if (pipsec == NULL)
return (ENOMEM);
pipsec->mdev = mdev;
pipsec->pdn = priv->pdn;
pipsec->mkey = priv->mr.key;
ret = mlx5e_ipsec_aso_init(pipsec);
if (ret)
goto err_ipsec_aso;
pipsec->wq = alloc_workqueue("mlx5e_ipsec", WQ_UNBOUND, 0);
if (pipsec->wq == NULL) {
ret = ENOMEM;
goto err_ipsec_wq;
}
ret = mlx5e_accel_ipsec_fs_init(pipsec);
if (ret)
goto err_ipsec_alloc;
if_setipsec_accel_methods(ifp, &mlx5e_ipsec_funcs);
priv->ipsec = pipsec;
mlx5_core_dbg(mdev, "IPSec attached to netdevice\n");
return 0;
err_ipsec_alloc:
destroy_workqueue(pipsec->wq);
err_ipsec_wq:
mlx5e_ipsec_aso_cleanup(pipsec);
err_ipsec_aso:
kfree(pipsec);
mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,486 @@
/*-
* Copyright (c) 2023 NVIDIA corporation & affiliates.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/types.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <net/pfkeyv2.h>
#include <netipsec/ipsec.h>
#include <dev/mlx5/mlx5_en/en.h>
#include <dev/mlx5/crypto.h>
#include <dev/mlx5/mlx5_accel/ipsec.h>
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
u32 caps = 0;
if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return 0;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return 0;
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return 0;
if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
return 0;
if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
return 0;
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
reformat_del_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
caps |= MLX5_IPSEC_CAP_PRIO;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_transport_over_udp) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_transport_over_udp))
caps |= MLX5_IPSEC_CAP_ESPINUDP;
}
if (!caps)
return 0;
if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
caps |= MLX5_IPSEC_CAP_ESN;
return caps;
}
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
/* ASO context */
MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
MLX5_SET(ipsec_obj, obj, full_offload, 1);
MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
* in flow steering to perform matching against. Please be
* aware that this register was chosen arbitrary and can't
* be used in other places as long as IPsec packet offload
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
if (attrs->replay_esn.trigger) {
MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
if (attrs->dir == IPSEC_DIR_INBOUND) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
attrs->replay_esn.replay_window);
if (attrs->replay_esn.replay_window != 0)
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
else
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_MODE);
}
MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
attrs->replay_esn.esn);
}
switch (attrs->dir) {
case IPSEC_DIR_OUTBOUND:
if (attrs->replay_esn.replay_window != 0)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
else
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_MODE);
break;
default:
break;
}
}
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
void *obj, *salt_p, *salt_iv_p;
int err;
obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
/* salt and seq_iv */
salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */
if (attrs->replay_esn.trigger) {
MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
}
/* enc./dec. key */
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJ);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
mlx5e_ipsec_packet_setup(obj, sa_entry->ipsec->pdn, attrs);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
sa_entry->ipsec_obj_id =
MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return err;
}
static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_DESTROY_GENERAL_OBJ);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
int err;
/* key */
err = mlx5_encryption_key_create(mdev, sa_entry->ipsec->pdn,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
aes_gcm->aes_key,
aes_gcm->key_len,
&sa_entry->enc_key_id);
if (err) {
mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
return err;
}
err = mlx5_create_ipsec_obj(sa_entry);
if (err) {
mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
goto err_enc_key;
}
return 0;
err_enc_key:
mlx5_encryption_key_destroy(mdev, sa_entry->enc_key_id);
return err;
}
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_destroy_ipsec_obj(sa_entry);
mlx5_encryption_key_destroy(mdev, sa_entry->enc_key_id);
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
struct mlx5_wqe_aso_ctrl_seg *data)
{
if (!data)
return;
ctrl->data_mask_mode = data->data_mask_mode;
ctrl->condition_1_0_operand = data->condition_1_0_operand;
ctrl->condition_1_0_offset = data->condition_1_0_offset;
ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
ctrl->condition_0_data = data->condition_0_data;
ctrl->condition_0_mask = data->condition_0_mask;
ctrl->condition_1_data = data->condition_1_data;
ctrl->condition_1_mask = data->condition_1_mask;
ctrl->bitwise_data = data->bitwise_data;
ctrl->data_mask = data->data_mask;
}
static int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_aso *aso = ipsec->aso;
struct mlx5_wqe_aso_ctrl_seg *ctrl;
struct mlx5_aso_wqe *wqe;
unsigned long expires;
u8 ds_cnt;
int ret;
spin_lock_bh(&aso->lock);
memset(aso->ctx, 0, sizeof(aso->ctx));
wqe = mlx5_aso_get_wqe(aso->aso);
ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
ctrl = &wqe->aso_ctrl;
ctrl->va_l = cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
ctrl->l_key = cpu_to_be32(ipsec->mkey);
mlx5e_ipsec_aso_copy(ctrl, data);
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
expires = jiffies + msecs_to_jiffies(10);
do {
ret = mlx5_aso_poll_cq(aso->aso, false);
if (ret)
/* We are in atomic context */
udelay(10);
} while (ret && time_is_after_jiffies(expires));
spin_unlock_bh(&aso->lock);
return ret;
}
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
u64 modify_field_select = 0;
u64 general_obj_types;
void *obj;
int err;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return -EINVAL;
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJ);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err) {
mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
sa_entry->ipsec_obj_id, err);
return err;
}
obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
/* esn */
if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
return -EOPNOTSUPP;
obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
MLX5_SET64(ipsec_obj, obj, modify_field_select,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJ);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
int err;
err = mlx5_modify_ipsec_obj(sa_entry, attrs);
if (err)
return;
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
}
static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data)
{
data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE << 4;
mlx5e_ipsec_aso_query(sa_entry, data);
}
#define MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET 0
static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mode_param)
{
struct mlx5_accel_esp_xfrm_attrs attrs = {};
struct mlx5_wqe_aso_ctrl_seg data = {};
if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
sa_entry->esn_state.esn_msb++;
sa_entry->esn_state.overlap = 0;
} else {
sa_entry->esn_state.overlap = 1;
}
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs, sa_entry->attrs.dir);
mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
data.bitwise_data = cpu_to_be64(BIT_ULL(54));
data.data_mask = data.bitwise_data;
mlx5e_ipsec_aso_update(sa_entry, &data);
}
static void mlx5e_ipsec_handle_event(struct work_struct *_work)
{
struct mlx5e_ipsec_work *work =
container_of(_work, struct mlx5e_ipsec_work, work);
struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
struct mlx5_accel_esp_xfrm_attrs *attrs;
struct mlx5e_ipsec_aso *aso;
int ret;
aso = sa_entry->ipsec->aso;
attrs = &sa_entry->attrs;
/* TODO: Kostia, this event should be locked/protected
* from concurent SA delete.
*/
ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
if (ret)
goto unlock;
if (attrs->replay_esn.trigger &&
!MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
}
unlock:
kfree(work);
}
void mlx5_object_change_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
{
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5_eqe_obj_change *object;
struct mlx5e_ipsec_work *work;
u16 type;
object = &eqe->data.obj_change;
type = be16_to_cpu(object->obj_type);
if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
return;
sa_entry = xa_load(&dev->ipsec_sadb, be32_to_cpu(object->obj_id));
if (!sa_entry)
return;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;
INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
work->data = sa_entry;
queue_work(sa_entry->ipsec->wq, &work->work);
}
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5e_ipsec_aso *aso;
struct device *pdev;
int err;
aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
if (!aso)
return -ENOMEM;
pdev = &mdev->pdev->dev;
aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
err = dma_mapping_error(pdev, aso->dma_addr);
if (err)
goto err_dma;
aso->aso = mlx5_aso_create(mdev, ipsec->pdn);
if (IS_ERR(aso->aso)) {
err = PTR_ERR(aso->aso);
goto err_aso_create;
}
spin_lock_init(&aso->lock);
ipsec->aso = aso;
return 0;
err_aso_create:
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
err_dma:
kfree(aso);
return err;
}
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5e_ipsec_aso *aso;
struct device *pdev;
aso = ipsec->aso;
pdev = &mdev->pdev->dev;
mlx5_aso_destroy(aso->aso);
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
kfree(aso);
}

View File

@ -0,0 +1,76 @@
/*-
* Copyright (c) 2023 NVIDIA corporation & affiliates.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netipsec/keydb.h>
#include <netipsec/ipsec_offload.h>
#include <dev/mlx5/qp.h>
#include <dev/mlx5/mlx5_en/en.h>
#include <dev/mlx5/mlx5_accel/ipsec.h>
#define MLX5_IPSEC_METADATA_HANDLE(ipsec_metadata) (ipsec_metadata & 0xFFFFFF)
int mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mbuf *mb)
{
struct mlx5e_priv *priv;
struct ipsec_accel_in_tag *tag;
struct m_tag *mtag;
priv = if_getsoftc(ifp);
if (priv->ipsec == NULL)
return (0);
mtag = m_tag_get(PACKET_TAG_IPSEC_ACCEL_IN, sizeof(*tag), M_NOWAIT);
if (mtag == NULL)
return -ENOMEM;
m_tag_prepend(mb, mtag);
return 0;
}
int mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe)
{
struct ipsec_accel_in_tag *tag;
u32 drv_spi;
drv_spi = MLX5_IPSEC_METADATA_HANDLE(be32_to_cpu(cqe->ft_metadata));
tag = (struct ipsec_accel_in_tag *) m_tag_find(mb, PACKET_TAG_IPSEC_ACCEL_IN, NULL);
WARN_ON(tag == NULL);
if (tag)
tag->drv_spi = drv_spi;
return 0;
}
void
mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
struct ipsec_accel_out_tag *tag)
{
wqe->eth.flow_table_metadata = cpu_to_be32(
mlx5e_accel_ipsec_get_metadata(tag->drv_spi));
}

View File

@ -29,6 +29,8 @@
#include <linux/if_ether.h>
#include <dev/mlx5/device.h>
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_flow_table)
#define MLX5_MAX_UC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
@ -83,15 +85,15 @@ struct l2addr_node {
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_rule *drop_rule;
struct mlx5_flow_handle *drop_rule;
};
struct vport_egress {
struct mlx5_flow_table *acl;
struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_rule *allowed_vlan;
struct mlx5_flow_rule *drop_rule;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_handle *drop_rule;
};
struct mlx5_vport {

View File

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies. */
#ifndef __ML5_ESW_CHAINS_H__
#define __ML5_ESW_CHAINS_H__
#include <dev/mlx5/fs.h>
struct mlx5_fs_chains;
enum mlx5_chains_flags {
MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED = BIT(1),
MLX5_CHAINS_FT_TUNNEL_SUPPORTED = BIT(2),
};
struct mlx5_chains_attr {
enum mlx5_flow_namespace_type ns;
int fs_base_prio;
int fs_base_level;
u32 flags;
u32 max_grp_num;
struct mlx5_flow_table *default_ft;
};
bool
mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
bool
mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
u32
mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains);
u32
mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains);
u32
mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains);
struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level);
void
mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level);
struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains);
struct mlx5_flow_table *
mlx5_chains_create_global_table(struct mlx5_fs_chains *chains);
void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft);
int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
u32 *chain_mapping);
int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains,
u32 chain_mapping);
struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr);
void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft);
void
mlx5_chains_print_info(struct mlx5_fs_chains *chains);
#endif /* __ML5_ESW_CHAINS_H__ */

View File

@ -0,0 +1,120 @@
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _MLX5_FS_CMD_
#define _MLX5_FS_CMD_
#include "fs_core.h"
struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft);
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
int (*modify_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft);
int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
struct mlx5_flow_group *fg);
int (*destroy_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg);
int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte);
int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte);
int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte);
int (*update_root_ft)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect);
int (*packet_reformat_alloc)(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat);
void (*packet_reformat_dealloc)(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat);
int (*modify_header_alloc)(struct mlx5_flow_root_namespace *ns,
u8 namespace, u8 num_actions,
void *modify_actions,
struct mlx5_modify_hdr *modify_hdr);
void (*modify_header_dealloc)(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr);
int (*set_peer)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns);
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
u32 (*get_capabilities)(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes);
int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
#endif

View File

@ -1,14 +1,11 @@
/*-
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@ -26,306 +23,327 @@
#ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_
#include <asm/atomic.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <dev/mlx5/fs.h>
enum fs_type {
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
struct mlx5_flow_definer {
enum mlx5_flow_namespace_type ns_type;
u32 id;
};
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
union {
u32 id;
};
};
struct mlx5_pkt_reformat {
enum mlx5_flow_namespace_type ns_type;
int reformat_type; /* from mlx5_ifc */
union {
u32 id;
};
};
/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only,
* and those are in parallel to one another when going over them to connect
* a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one
* parallel namespace will not automatically connect to the first flow table
* found in any prio in any next namespace, but skip the entire containing
* TYPE_PRIO_CHAINS prio.
*
* This is used to implement tc chains, each chain of prios is a different
* namespace inside a containing TYPE_PRIO_CHAINS prio.
*/
enum fs_node_type {
FS_TYPE_NAMESPACE,
FS_TYPE_PRIO,
FS_TYPE_PRIO_CHAINS,
FS_TYPE_FLOW_TABLE,
FS_TYPE_FLOW_GROUP,
FS_TYPE_FLOW_ENTRY,
FS_TYPE_FLOW_DEST
};
enum fs_ft_type {
/**********************************************************************************************************/
#define fs_ft_type fs_flow_table_type
/************************************************************************************************************/
enum fs_flow_table_type {
FS_FT_NIC_RX = 0x0,
FS_FT_NIC_TX = 0x1,
FS_FT_ESW_EGRESS_ACL = 0x2,
FS_FT_ESW_INGRESS_ACL = 0x3,
FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0x5,
FS_FT_SNIFFER_TX = 0x6
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
FS_FT_PORT_SEL = 0X9,
FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
};
enum fs_flow_table_op_mod {
FS_FT_OP_MOD_NORMAL,
FS_FT_OP_MOD_LAG_DEMUX,
};
enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
/* Should always be the first variable in the struct */
struct fs_base {
struct list_head list;
struct fs_base *parent;
enum fs_type type;
struct kref refcount;
enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_DMFS,
MLX5_FLOW_STEERING_MODE_SMFS
};
enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
};
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
enum mlx5_flow_steering_mode mode;
struct kmem_cache *fgs_cache;
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_namespace **fdb_sub_ns;
struct mlx5_flow_root_namespace **esw_egress_root_ns;
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
};
struct fs_node {
struct list_head list;
struct list_head children;
enum fs_node_type type;
struct fs_node *parent;
struct fs_node *root;
/* lock the node for writing and traversing */
struct mutex lock;
struct completion complete;
atomic_t users_refcount;
const char *name;
struct rw_semaphore lock;
refcount_t refcount;
bool active;
void (*del_hw_func)(struct fs_node *);
void (*del_sw_func)(struct fs_node *);
atomic_t version;
};
struct mlx5_flow_rule {
struct fs_base base;
struct fs_node node;
struct mlx5_flow_table *ft;
struct mlx5_flow_destination dest_attr;
struct list_head clients_data;
/*protect clients lits*/
struct mutex clients_lock;
/* next_ft should be accessed under chain_lock and only of
* destination type is FWD_NEXT_fT.
*/
struct list_head next_ft;
u32 sw_action;
};
struct fs_fte {
struct fs_base base;
u32 val[MLX5_ST_SZ_DW(fte_match_param)];
uint32_t dests_size;
struct list_head dests;
uint32_t index; /* index in ft */
struct mlx5_flow_act flow_act;
u32 sw_action; /* enum mlx5_rule_fwd_action */
enum fs_fte_status status;
};
struct fs_star_rule {
struct mlx5_flow_group *fg;
struct fs_fte *fte;
struct mlx5_flow_handle {
int num_rules;
struct mlx5_flow_rule *rule[];
};
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_base base;
/* sorted list by start_index */
struct list_head fgs;
struct {
bool active;
unsigned int max_types;
unsigned int group_size;
unsigned int num_types;
unsigned int max_fte;
} autogroup;
struct fs_node node;
u32 id;
u16 vport;
unsigned int max_fte;
unsigned int level;
uint32_t id;
u16 vport;
enum fs_ft_type type;
struct fs_star_rule star_rule;
unsigned int shared_refcount;
enum fs_flow_table_type type;
enum fs_flow_table_op_mod op_mod;
struct {
bool active;
unsigned int required_groups;
unsigned int group_size;
unsigned int num_groups;
unsigned int max_fte;
} autogroup;
/* Protect fwd_rules */
struct mutex lock;
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
struct xarray fgs_xa;
enum mlx5_flow_table_miss_action def_miss_action;
struct mlx5_flow_namespace *ns;
};
enum fs_prio_flags {
MLX5_CORE_FS_PRIO_SHARED = 1
struct mlx5_ft_underlay_qp {
struct list_head list;
u32 qpn;
};
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
#define MLX5_ST_SZ_DW_MATCH_PARAM \
((MLX5_BYTE_OFF(fte_match_param, MLX5_FTE_MATCH_PARAM_RESERVED) / sizeof(u32)) + \
BUILD_BUG_ON_ZERO(MLX5_ST_SZ_BYTES(fte_match_param) != \
MLX5_FLD_SZ_BYTES(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED) +\
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 fwd_dests;
u32 index;
struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
enum fs_fte_status status;
struct mlx5_fc *counter;
int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */
struct fs_prio {
struct fs_base base;
struct list_head objs; /* each object is a namespace or ft */
unsigned int max_ft;
unsigned int num_ft;
unsigned int max_ns;
struct fs_node node;
unsigned int num_levels;
unsigned int start_level;
unsigned int prio;
/*When create shared flow table, this lock should be taken*/
struct mutex shared_lock;
u8 flags;
unsigned int num_ft;
};
/* Type of children is fs_prio */
struct mlx5_flow_namespace {
/* parent == NULL => root ns */
struct fs_base base;
/* sorted by priority number */
struct list_head prios; /* list of fs_prios */
struct list_head list_notifiers;
struct rw_semaphore notifiers_rw_sem;
struct rw_semaphore dests_rw_sem;
struct fs_node node;
enum mlx5_flow_table_miss_action def_miss_action;
};
struct mlx5_flow_group_mask {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW_MATCH_PARAM];
};
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
struct ida fte_allocator;
u32 id;
struct xarray ftes_xa;
};
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
struct mlx5_flow_table *ft_level_0;
enum fs_ft_type table_type;
enum mlx5_flow_steering_mode mode;
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
/* When chaining flow-tables, this lock should be taken */
struct mutex fs_chain_lock;
/* Should be held when chaining flow tables */
struct mutex chain_lock;
struct list_head underlay_qpns;
const struct mlx5_flow_cmds *cmds;
};
struct mlx5_flow_group {
struct fs_base base;
struct list_head ftes;
struct mlx5_core_fs_mask mask;
uint32_t start_index;
uint32_t max_ftes;
uint32_t num_ftes;
uint32_t id;
};
struct mlx5_flow_handler {
struct list_head list;
rule_event_fn add_dst_cb;
rule_event_fn del_dst_cb;
void *client_context;
struct mlx5_flow_namespace *ns;
};
struct fs_client_priv_data {
struct mlx5_flow_handler *fs_handler;
struct list_head list;
void *client_dst_data;
};
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
u32 id;
};
struct mlx5_pkt_reformat {
enum mlx5_flow_namespace_type ns_type;
int reformat_type; /* from mlx5_ifc */
u32 id;
};
void _fs_remove_node(struct kref *kref);
#define fs_get_obj(v, _base) {v = container_of((_base), typeof(*v), base); }
#define fs_get_parent(v, child) {v = (child)->base.parent ? \
container_of((child)->base.parent, \
typeof(*v), base) : NULL; }
#define fs_list_for_each_entry(pos, cond, root) \
list_for_each_entry(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_continue(pos, cond, root) \
list_for_each_entry_continue(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_reverse(pos, cond, root) \
list_for_each_entry_reverse(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_continue_reverse(pos, cond, root) \
list_for_each_entry_continue_reverse(pos, root, base.list) \
if (!(cond)) {} else
#define fs_for_each_ft(pos, prio) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_TABLE, \
&(prio)->objs)
#define fs_for_each_ft_reverse(pos, prio) \
fs_list_for_each_entry_reverse(pos, \
(pos)->base.type == FS_TYPE_FLOW_TABLE, \
&(prio)->objs)
#define fs_for_each_ns(pos, prio) \
fs_list_for_each_entry(pos, \
(pos)->base.type == FS_TYPE_NAMESPACE, \
&(prio)->objs)
#define fs_for_each_ns_or_ft_reverse(pos, prio) \
list_for_each_entry_reverse(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft(pos, prio) \
list_for_each_entry(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft_continue_reverse(pos, prio) \
list_for_each_entry_continue_reverse(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft_continue(pos, prio) \
list_for_each_entry_continue(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_prio(pos, ns) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_reverse(pos, ns) \
fs_list_for_each_entry_reverse(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_continue(pos, ns) \
fs_list_for_each_entry_continue(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_continue_reverse(pos, ns) \
fs_list_for_each_entry_continue_reverse(pos, \
(pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_fg(pos, ft) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_GROUP, \
&(ft)->fgs)
#define fs_for_each_fte(pos, fg) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_ENTRY, \
&(fg)->ftes)
#define fs_for_each_dst(pos, fte) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_DEST, \
&(fte)->dests)
int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
u16 vport, enum fs_ft_type type, unsigned int level,
unsigned int log_size, const char *name, unsigned int *table_id);
int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id);
int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
u32 *in,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int *group_id);
int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int group_id);
int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
u32 *match_val,
enum fs_ft_type type, unsigned int table_id,
unsigned int index, unsigned int group_id,
struct mlx5_flow_act *flow_act,
u32 sw_action, int dest_size,
struct list_head *dests); /* mlx5_flow_desination */
int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
enum fs_ft_type type, unsigned int table_id,
unsigned int index);
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
enum fs_ft_type type,
unsigned int id);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
int mlx5_cmd_modify_header_alloc(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type namespace,
u8 num_actions,
void *modify_actions,
struct mlx5_modify_hdr *modify_hdr);
void mlx5_cmd_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
int mlx5_cmd_packet_reformat_alloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat);
void mlx5_cmd_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *pkt_reformat);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
struct delayed_work *dwork,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns);
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode);
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
void mlx5_fs_core_free(struct mlx5_core_dev *dev);
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
list_for_each_entry(pos, root, node.list)
#define fs_list_for_each_entry_safe(pos, tmp, root) \
list_for_each_entry_safe(pos, tmp, root, node.list)
#define fs_for_each_ns_or_ft_reverse(pos, prio) \
list_for_each_entry_reverse(pos, &(prio)->node.children, list)
#define fs_for_each_ns_or_ft(pos, prio) \
list_for_each_entry(pos, (&(prio)->node.children), list)
#define fs_for_each_prio(pos, ns) \
fs_list_for_each_entry(pos, &(ns)->node.children)
#define fs_for_each_ns(pos, prio) \
fs_list_for_each_entry(pos, &(prio)->node.children)
#define fs_for_each_ft(pos, prio) \
fs_list_for_each_entry(pos, &(prio)->node.children)
#define fs_for_each_ft_safe(pos, tmp, prio) \
fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children)
#define fs_for_each_fg(pos, ft) \
fs_list_for_each_entry(pos, &(ft)->node.children)
#define fs_for_each_fte(pos, fg) \
fs_list_for_each_entry(pos, &(fg)->node.children)
#define fs_for_each_dst(pos, fte) \
fs_list_for_each_entry(pos, &(fte)->node.children)
#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
(type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
(type == FS_FT_NIC_TX) ? MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) : \
(type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
(type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
(BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
)
#endif

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021 Mellanox Technologies. */
#ifndef __MLX5_FS_FT_POOL_H__
#define __MLX5_FS_FT_POOL_H__
#include <linux/module.h>
#include <dev/mlx5/driver.h>
#include <dev/mlx5/mlx5_core/fs_core.h>
#include <linux/compiler.h>
#define POOL_NEXT_SIZE 0
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
int
mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
int desired_size);
void
mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz);
#endif /* __MLX5_FS_FT_POOL_H__ */

View File

@ -27,15 +27,15 @@
#define __MLX5E_ACCEL_FS_TCP_H__
struct inpcb;
struct mlx5_flow_rule;
struct mlx5_flow_handle;
struct mlx5e_priv;
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *);
struct mlx5_flow_rule *
struct mlx5_flow_handle *
mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *,
struct inpcb *, uint32_t tirn, uint32_t flow_tag, uint16_t vlan_id);
#define MLX5E_ACCEL_FS_ADD_INPCB_NO_VLAN 0xFFFF
void mlx5e_accel_fs_del_inpcb(struct mlx5_flow_rule *);
void mlx5e_accel_fs_del_inpcb(struct mlx5_flow_handle *);
#endif /* __MLX5E_ACCEL_FS_TCP_H__ */

View File

@ -614,6 +614,9 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJ);
MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJ);
MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJ);
MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
default: return "unknown command opcode";
}
}

View File

@ -163,4 +163,14 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
/************************************************ TESTTEST********************************************/
static inline int mlx5_init_fs(struct mlx5_core_dev *dev)
{
return 0;
}
static inline int mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
return 0;
}
#endif /* __MLX5_CORE_H__ */

View File

@ -0,0 +1,94 @@
/*-
* Copyright (c) 2019-2021, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "opt_rss.h"
#include "opt_ratelimit.h"
#include <linux/kernel.h>
#include <linux/module.h>
#include <dev/mlx5/driver.h>
#include <dev/mlx5/crypto.h>
int mlx5_encryption_key_create(struct mlx5_core_dev *mdev, u32 pdn, u32 key_type,
const void *p_key, u32 key_len, u32 *p_obj_id)
{
u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
u32 out[MLX5_ST_SZ_DW(create_encryption_key_out)] = {};
u64 general_obj_types;
int err;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJ_TYPES_ENCRYPTION_KEY))
return -EINVAL;
switch (key_len) {
case 128 / 8:
memcpy(MLX5_ADDR_OF(create_encryption_key_in, in,
encryption_key_object.key[4]), p_key, 128 / 8);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.pd, pdn);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_size,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_type,
key_type);
break;
case 256 / 8:
memcpy(MLX5_ADDR_OF(create_encryption_key_in, in,
encryption_key_object.key[0]), p_key, 256 / 8);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.pd, pdn);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_size,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_type,
key_type);
break;
default:
return -EINVAL;
}
MLX5_SET(create_encryption_key_in, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJ);
MLX5_SET(create_encryption_key_in, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err == 0)
*p_obj_id = MLX5_GET(create_encryption_key_out, out, obj_id);
/* avoid leaking key on the stack */
explicit_bzero(in, sizeof(in));
return err;
}
int mlx5_encryption_key_destroy(struct mlx5_core_dev *mdev, u32 oid)
{
u32 in[MLX5_ST_SZ_DW(destroy_encryption_key_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_encryption_key_out)] = {};
MLX5_SET(destroy_encryption_key_in, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJ);
MLX5_SET(destroy_encryption_key_in, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
MLX5_SET(destroy_encryption_key_in, in, obj_id, oid);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}

View File

@ -33,6 +33,7 @@
#include <dev/mlx5/mlx5_fpga/core.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#include <dev/mlx5/mlx5_core/eswitch.h>
#include <dev/mlx5/mlx5_accel/ipsec.h>
#ifdef RSS
#include <net/rss_config.h>
@ -165,6 +166,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT";
case MLX5_EVENT_TYPE_OBJECT_CHANGE:
return "MLX5_EVENT_TYPE_OBJECT_CHANGE";
default:
return "Unrecognized event";
}
@ -370,6 +373,10 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_temp_warning_event(dev, eqe);
break;
case MLX5_EVENT_TYPE_OBJECT_CHANGE:
mlx5_object_change_event(dev, eqe);
break;
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@ -571,6 +578,10 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT);
}
if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD);
if (err) {

View File

@ -64,7 +64,7 @@ struct esw_uc_addr {
/* E-Switch MC FDB table hash node */
struct esw_mc_addr { /* SRIOV only */
struct l2addr_node node;
struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
u32 refcnt;
};
@ -73,7 +73,7 @@ struct vport_addr {
struct l2addr_node node;
u8 action;
u32 vport;
struct mlx5_flow_rule *flow_rule; /* SRIOV only */
struct mlx5_flow_handle *flow_rule; /* SRIOV only */
};
enum {
@ -215,59 +215,54 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
}
/* E-Switch FDB */
static struct mlx5_flow_rule *
static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{
int match_header = MLX5_MATCH_OUTER_HEADERS;
struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule = NULL;
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_act flow_act = {};
u32 *match_v;
u32 *match_c;
struct mlx5_flow_spec *spec;
u8 *dmac_v;
u8 *dmac_c;
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
if (!match_v || !match_c) {
printf("mlx5_core: WARN: ""FDB: Failed to alloc match parameters\n");
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
printf("mlx5_core: WARN: ""FDB: Failed to alloc flow spec\n");
goto out;
}
dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
ether_addr_copy(dmac_v, mac);
/* Match criteria mask */
memset(dmac_c, 0xff, 6);
dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
dest.vport_num = vport;
dest.vport.num = vport;
esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DEST;
flow_rule =
mlx5_add_flow_rule(esw->fdb_table.fdb,
match_header,
match_c,
match_v,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
&flow_act, &dest, 1);
if (IS_ERR_OR_NULL(flow_rule)) {
printf("mlx5_core: WARN: ""FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
out:
kfree(match_v);
kfree(match_c);
kfree(spec);
return flow_rule;
}
static int esw_create_fdb_table(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
@ -295,7 +290,9 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw)
/* (-2) Since MaorG said so .. */
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)) - 2;
fdb = mlx5_create_flow_table(root_ns, 0, "FDB", table_size);
ft_attr.prio = 0;
ft_attr.max_fte = table_size;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR_OR_NULL(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
@ -397,7 +394,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
mlx5_mpfs_del_mac(esw->dev, esw_uc->table_index);
mlx5_del_flow_rule(&vaddr->flow_rule);
mlx5_del_flow_rules(&vaddr->flow_rule);
l2addr_hash_del(esw_uc);
return 0;
@ -456,12 +453,12 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
vport, mac, vaddr->flow_rule, esw_mc->refcnt,
esw_mc->uplink_rule);
mlx5_del_flow_rule(&vaddr->flow_rule);
mlx5_del_flow_rules(&vaddr->flow_rule);
if (--esw_mc->refcnt)
return 0;
mlx5_del_flow_rule(&esw_mc->uplink_rule);
mlx5_del_flow_rules(&esw_mc->uplink_rule);
l2addr_hash_del(esw_mc);
return 0;
@ -602,13 +599,13 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_group *vlan_grp = NULL;
struct mlx5_flow_group *drop_grp = NULL;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
void *match_criteria;
char table_name[32];
u32 *flow_group_in;
int table_size = 2;
int err = 0;
@ -629,8 +626,8 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
if (!flow_group_in)
return;
snprintf(table_name, 32, "egress_%d", vport->vport);
acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size);
ft_attr.max_fte = table_size;
acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport->vport);
if (IS_ERR_OR_NULL(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
@ -678,8 +675,8 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
mlx5_del_flow_rule(&vport->egress.allowed_vlan);
mlx5_del_flow_rule(&vport->egress.drop_rule);
mlx5_del_flow_rules(&vport->egress.allowed_vlan);
mlx5_del_flow_rules(&vport->egress.drop_rule);
}
static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
@ -703,12 +700,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
struct mlx5_flow_group *g;
void *match_criteria;
char table_name[32];
u32 *flow_group_in;
int table_size = 1;
int err = 0;
@ -729,8 +726,8 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
if (!flow_group_in)
return;
snprintf(table_name, 32, "ingress_%d", vport->vport);
acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size);
ft_attr.max_fte = table_size;
acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport->vport);
if (IS_ERR_OR_NULL(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
@ -763,7 +760,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
mlx5_del_flow_rule(&vport->ingress.drop_rule);
mlx5_del_flow_rules(&vport->ingress.drop_rule);
}
static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
@ -785,9 +782,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_destination dest;
u32 *match_v;
u32 *match_c;
struct mlx5_flow_spec *spec;
int err = 0;
if (IS_ERR_OR_NULL(vport->ingress.acl)) {
@ -806,35 +801,28 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
if (!match_v || !match_c) {
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
vport->vport, err);
goto out;
}
MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag);
dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
dest.vport_num = vport->vport;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DROP;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->ingress.drop_rule =
mlx5_add_flow_rule(vport->ingress.acl,
MLX5_MATCH_OUTER_HEADERS,
match_c,
match_v,
MLX5_FLOW_RULE_FWD_ACTION_DROP,
&flow_act, &dest);
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
printf("mlx5_core: WARN: ""vport[%d] configure ingress rules, err(%d)\n", vport->vport, err);
vport->ingress.drop_rule = NULL;
}
out:
kfree(match_v);
kfree(match_c);
kfree(spec);
return err;
}
@ -842,9 +830,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_destination dest;
u32 *match_v;
u32 *match_c;
struct mlx5_flow_spec *spec;
int err = 0;
if (IS_ERR_OR_NULL(vport->egress.acl)) {
@ -862,9 +848,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
if (!match_v || !match_c) {
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
vport->vport, err);
@ -872,21 +857,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Allowed vlan rule */
MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
dest.vport_num = vport->vport;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_ALLOW;
vport->egress.allowed_vlan =
mlx5_add_flow_rule(vport->egress.acl,
MLX5_MATCH_OUTER_HEADERS,
match_c,
match_v,
MLX5_FLOW_RULE_FWD_ACTION_ALLOW,
&flow_act, &dest);
mlx5_add_flow_rules(vport->egress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
printf("mlx5_core: WARN: ""vport[%d] configure egress allowed vlan rule failed, err(%d)\n", vport->vport, err);
@ -894,24 +875,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
goto out;
}
/* Drop others rule (star rule) */
memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DROP;
vport->egress.drop_rule =
mlx5_add_flow_rule(vport->egress.acl,
0,
match_c,
match_v,
MLX5_FLOW_RULE_FWD_ACTION_DROP,
&flow_act, &dest);
mlx5_add_flow_rules(vport->egress.acl, NULL,
&flow_act, NULL, 0);
if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
printf("mlx5_core: WARN: ""vport[%d] configure egress drop rule failed, err(%d)\n", vport->vport, err);
vport->egress.drop_rule = NULL;
}
out:
kfree(match_v);
kfree(match_c);
kfree(spec);
return err;
}

View File

@ -1,102 +0,0 @@
/*-
* Copyright (c) 2022 NVIDIA corporation & affiliates.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <dev/mlx5/driver.h>
#include <dev/mlx5/device.h>
#include <dev/mlx5/mlx5_ifc.h>
#include <dev/mlx5/mlx5_core/mlx5_fc_cmd.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id)
{
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
int err;
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
if (!err)
*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
return err;
}
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
{
return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
}
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
MLX5_SET(dealloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
}
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes)
{
u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter)] = {};
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
void *stats;
int err = 0;
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
return 0;
}
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out)
{
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 2023, NVIDIA Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _MLX5_FC_CMD_
#define _MLX5_FC_CMD_
#include "fs_core.h"
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes);
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out);
static inline int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
{
return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
}
#endif

View File

@ -0,0 +1,664 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies.
#include <dev/mlx5/driver.h>
#include <dev/mlx5/mlx5_ifc.h>
#include <dev/mlx5/fs.h>
#include "mlx5_core.h"
#include "fs_chains.h"
#include "fs_ft_pool.h"
#include "fs_core.h"
#define chains_lock(chains) ((chains)->lock)
#define chains_xa(chains) ((chains)->chains_xa)
#define prios_xa(chains) ((chains)->prios_xa)
#define chains_default_ft(chains) ((chains)->chains_default_ft)
#define chains_end_ft(chains) ((chains)->chains_end_ft)
#define FT_TBL_SZ (64 * 1024)
struct mlx5_fs_chains {
struct mlx5_core_dev *dev;
struct xarray chains_xa;
struct xarray prios_xa;
/* Protects above chains_ht and prios_ht */
struct mutex lock;
struct mlx5_flow_table *chains_default_ft;
struct mlx5_flow_table *chains_end_ft;
enum mlx5_flow_namespace_type ns;
u32 group_num;
u32 flags;
int fs_base_prio;
int fs_base_level;
};
struct fs_chain {
u32 chain;
int ref;
int id;
uint32_t xa_idx;
struct mlx5_fs_chains *chains;
struct list_head prios_list;
struct mlx5_flow_handle *restore_rule;
struct mlx5_modify_hdr *miss_modify_hdr;
};
struct prio_key {
u32 chain;
u32 prio;
u32 level;
};
struct prio {
struct list_head list;
struct prio_key key;
uint32_t xa_idx;
int ref;
struct fs_chain *chain;
struct mlx5_flow_table *ft;
struct mlx5_flow_table *next_ft;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
};
/*
static const struct rhashtable_params chain_params = {
.head_offset = offsetof(struct fs_chain, node),
.key_offset = offsetof(struct fs_chain, chain),
.key_len = sizeof_field(struct fs_chain, chain),
.automatic_shrinking = true,
};
static const struct rhashtable_params prio_params = {
.head_offset = offsetof(struct prio, node),
.key_offset = offsetof(struct prio, key),
.key_len = sizeof_field(struct prio, key),
.automatic_shrinking = true,
};
*/
bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
{
return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
}
bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
{
return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
}
bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
{
return mlx5_chains_prios_supported(chains) &&
mlx5_chains_ignore_flow_level_supported(chains);
}
u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
{
if (!mlx5_chains_prios_supported(chains))
return 1;
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX - 1;
/* We should get here only for eswitch case */
return FDB_TC_MAX_CHAIN;
}
u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
{
return mlx5_chains_get_chain_range(chains) + 1;
}
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
if (!chains->dev->priv.eswitch)
return 1;
/* We should get here only for eswitch case */
return FDB_TC_MAX_PRIO;
}
static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
{
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
/* Same value for FDB and NIC RX tables */
return FDB_TC_LEVELS_PER_PRIO;
}
void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft)
{
chains_end_ft(chains) = ft;
}
static struct mlx5_flow_table *
mlx5_chains_create_table(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
int sz;
if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till
* ignore_flow_level is allowed on FT creation and not just for FTEs.
* Instead caller should add an explicit miss rule if needed.
*/
ft_attr.next_ft = chains_default_ft(chains);
/* The root table(chain 0, prio 1, level 0) is required to be
* connected to the previous fs_core managed prio.
* We always create it, as a managed table, in order to align with
* fs_core logic.
*/
if (!mlx5_chains_ignore_flow_level_supported(chains) ||
(chain == 0 && prio == 1 && level == 0)) {
ft_attr.level = chains->fs_base_level;
ft_attr.prio = chains->fs_base_prio;
ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
mlx5_get_fdb_sub_ns(chains->dev, chain) :
mlx5_get_flow_namespace(chains->dev, chains->ns);
} else {
ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
ft_attr.prio = chains->fs_base_prio;
/* Firmware doesn't allow us to create another level 0 table,
* so we create all unmanaged tables as level 1 (base + 1).
*
* To connect them, we use explicit miss rules with
* ignore_flow_level. Caller is responsible to create
* these rules (if needed).
*/
ft_attr.level = chains->fs_base_level + 1;
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = chains->group_num;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
(int)PTR_ERR(ft), chain, prio, level, sz);
return ft;
}
return ft;
}
static struct fs_chain *
mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
{
struct fs_chain *chain_s = NULL;
int err;
chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
if (!chain_s)
return ERR_PTR(-ENOMEM);
chain_s->chains = chains;
chain_s->chain = chain;
INIT_LIST_HEAD(&chain_s->prios_list);
err = xa_alloc(&chains_xa(chains), &chain_s->xa_idx, chain_s,
xa_limit_32b, GFP_KERNEL);
if (err)
goto err_insert;
return chain_s;
err_insert:
kvfree(chain_s);
return ERR_PTR(err);
}
static void
mlx5_chains_destroy_chain(struct fs_chain *chain)
{
struct mlx5_fs_chains *chains = chain->chains;
xa_erase(&chains_xa(chains), chain->xa_idx);
kvfree(chain);
}
static struct fs_chain *
mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
{
struct fs_chain *chain_s = NULL;
unsigned long idx;
xa_for_each(&chains_xa(chains), idx, chain_s) {
if (chain_s->chain == chain)
break;
}
if (!chain_s) {
chain_s = mlx5_chains_create_chain(chains, chain);
if (IS_ERR(chain_s))
return chain_s;
}
chain_s->ref++;
return chain_s;
}
static struct mlx5_flow_handle *
mlx5_chains_add_miss_rule(struct fs_chain *chain,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act act = {};
act.flags = FLOW_ACT_NO_APPEND;
if (mlx5_chains_ignore_flow_level_supported(chain->chains))
act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = next_ft;
return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
}
static int
mlx5_chains_update_prio_prevs(struct prio *prio,
struct mlx5_flow_table *next_ft)
{
struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
struct fs_chain *chain = prio->chain;
struct prio *pos;
int n = 0, err;
if (prio->key.level)
return 0;
/* Iterate in reverse order until reaching the level 0 rule of
* the previous priority, adding all the miss rules first, so we can
* revert them if any of them fails.
*/
pos = prio;
list_for_each_entry_continue_reverse(pos,
&chain->prios_list,
list) {
miss_rules[n] = mlx5_chains_add_miss_rule(chain,
pos->ft,
next_ft);
if (IS_ERR(miss_rules[n])) {
err = PTR_ERR(miss_rules[n]);
goto err_prev_rule;
}
n++;
if (!pos->key.level)
break;
}
/* Success, delete old miss rules, and update the pointers. */
n = 0;
pos = prio;
list_for_each_entry_continue_reverse(pos,
&chain->prios_list,
list) {
mlx5_del_flow_rules(&pos->miss_rule);
pos->miss_rule = miss_rules[n];
pos->next_ft = next_ft;
n++;
if (!pos->key.level)
break;
}
return 0;
err_prev_rule:
while (--n >= 0)
mlx5_del_flow_rules(&miss_rules[n]);
return err;
}
static void
mlx5_chains_put_chain(struct fs_chain *chain)
{
if (--chain->ref == 0)
mlx5_chains_destroy_chain(chain);
}
static struct prio *
mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_handle *miss_rule;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_table *next_ft;
struct mlx5_flow_table *ft;
struct fs_chain *chain_s;
struct list_head *pos;
struct prio *prio_s;
u32 *flow_group_in;
int err;
chain_s = mlx5_chains_get_chain(chains, chain);
if (IS_ERR(chain_s))
return ERR_CAST(chain_s);
prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!prio_s || !flow_group_in) {
err = -ENOMEM;
goto err_alloc;
}
/* Chain's prio list is sorted by prio and level.
* And all levels of some prio point to the next prio's level 0.
* Example list (prio, level):
* (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
* In hardware, we will we have the following pointers:
* (3,0) -> (5,0) -> (7,0) -> Slow path
* (3,1) -> (5,0)
* (5,1) -> (7,0)
* (6,1) -> (7,0)
*/
/* Default miss for each chain: */
next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
chains_default_ft(chains) :
chains_end_ft(chains);
list_for_each(pos, &chain_s->prios_list) {
struct prio *p = list_entry(pos, struct prio, list);
/* exit on first pos that is larger */
if (prio < p->key.prio || (prio == p->key.prio &&
level < p->key.level)) {
/* Get next level 0 table */
next_ft = p->key.level == 0 ? p->ft : p->next_ft;
break;
}
}
ft = mlx5_chains_create_table(chains, chain, prio, level);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_create;
}
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
ft->max_fte - 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ft->max_fte - 1);
miss_group = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(miss_group)) {
err = PTR_ERR(miss_group);
goto err_group;
}
/* Add miss rule to next_ft */
miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
if (IS_ERR(miss_rule)) {
err = PTR_ERR(miss_rule);
goto err_miss_rule;
}
prio_s->miss_group = miss_group;
prio_s->miss_rule = miss_rule;
prio_s->next_ft = next_ft;
prio_s->chain = chain_s;
prio_s->key.chain = chain;
prio_s->key.prio = prio;
prio_s->key.level = level;
prio_s->ft = ft;
err = xa_alloc(&prios_xa(chains), &prio_s->xa_idx, prio_s,
xa_limit_32b, GFP_KERNEL);
if (err)
goto err_insert;
list_add(&prio_s->list, pos->prev);
/* Table is ready, connect it */
err = mlx5_chains_update_prio_prevs(prio_s, ft);
if (err)
goto err_update;
kvfree(flow_group_in);
return prio_s;
err_update:
list_del(&prio_s->list);
xa_erase(&prios_xa(chains), prio_s->xa_idx);
err_insert:
mlx5_del_flow_rules(&miss_rule);
err_miss_rule:
mlx5_destroy_flow_group(miss_group);
err_group:
mlx5_destroy_flow_table(ft);
err_create:
err_alloc:
kvfree(prio_s);
kvfree(flow_group_in);
mlx5_chains_put_chain(chain_s);
return ERR_PTR(err);
}
static void
mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
struct prio *prio)
{
struct fs_chain *chain = prio->chain;
WARN_ON(mlx5_chains_update_prio_prevs(prio,
prio->next_ft));
list_del(&prio->list);
xa_erase(&prios_xa(chains), prio->xa_idx);
mlx5_del_flow_rules(&prio->miss_rule);
mlx5_destroy_flow_group(prio->miss_group);
mlx5_destroy_flow_table(prio->ft);
mlx5_chains_put_chain(chain);
kvfree(prio);
}
struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level)
{
struct mlx5_flow_table *prev_fts;
struct prio *prio_s;
unsigned long idx;
int l = 0;
if ((chain > mlx5_chains_get_chain_range(chains) &&
chain != mlx5_chains_get_nf_ft_chain(chains)) ||
prio > mlx5_chains_get_prio_range(chains) ||
level > mlx5_chains_get_level_range(chains))
return ERR_PTR(-EOPNOTSUPP);
/* create earlier levels for correct fs_core lookup when
* connecting tables.
*/
for (l = 0; l < level; l++) {
prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
if (IS_ERR(prev_fts)) {
prio_s = ERR_CAST(prev_fts);
goto err_get_prevs;
}
}
mutex_lock(&chains_lock(chains));
xa_for_each(&prios_xa(chains), idx, prio_s) {
if (chain == prio_s->key.chain &&
prio == prio_s->key.prio &&
level == prio_s->key.level)
break;
}
if (!prio_s) {
prio_s = mlx5_chains_create_prio(chains, chain,
prio, level);
if (IS_ERR(prio_s))
goto err_create_prio;
}
++prio_s->ref;
mutex_unlock(&chains_lock(chains));
return prio_s->ft;
err_create_prio:
mutex_unlock(&chains_lock(chains));
err_get_prevs:
while (--l >= 0)
mlx5_chains_put_table(chains, chain, prio, l);
return ERR_CAST(prio_s);
}
void
mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level)
{
struct prio *prio_s;
unsigned long idx;
mutex_lock(&chains_lock(chains));
xa_for_each(&prios_xa(chains), idx, prio_s) {
if (chain == prio_s->key.chain &&
prio == prio_s->key.prio &&
level == prio_s->key.level)
break;
}
if (!prio_s)
goto err_get_prio;
if (--prio_s->ref == 0)
mlx5_chains_destroy_prio(chains, prio_s);
mutex_unlock(&chains_lock(chains));
while (level-- > 0)
mlx5_chains_put_table(chains, chain, prio, level);
return;
err_get_prio:
mutex_unlock(&chains_lock(chains));
WARN_ONCE(1,
"Couldn't find table: (chain: %d prio: %d level: %d)",
chain, prio, level);
}
struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
{
return chains_end_ft(chains);
}
struct mlx5_flow_table *
mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
{
u32 chain, prio, level;
int err;
if (!mlx5_chains_ignore_flow_level_supported(chains)) {
err = -EOPNOTSUPP;
mlx5_core_warn(chains->dev,
"Couldn't create global flow table, ignore_flow_level not supported.");
goto err_ignore;
}
chain = mlx5_chains_get_chain_range(chains),
prio = mlx5_chains_get_prio_range(chains);
level = mlx5_chains_get_level_range(chains);
return mlx5_chains_create_table(chains, chain, prio, level);
err_ignore:
return ERR_PTR(err);
}
void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft)
{
mlx5_destroy_flow_table(ft);
}
static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{
struct mlx5_fs_chains *chains;
chains = kzalloc(sizeof(*chains), GFP_KERNEL);
if (!chains)
return ERR_PTR(-ENOMEM);
chains->dev = dev;
chains->flags = attr->flags;
chains->ns = attr->ns;
chains->group_num = attr->max_grp_num;
chains->fs_base_prio = attr->fs_base_prio;
chains->fs_base_level = attr->fs_base_level;
chains_default_ft(chains) = chains_end_ft(chains) = attr->default_ft;
xa_init(&chains_xa(chains));
xa_init(&prios_xa(chains));
mutex_init(&chains_lock(chains));
return chains;
}
static void
mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
{
mutex_destroy(&chains_lock(chains));
xa_destroy(&prios_xa(chains));
xa_destroy(&chains_xa(chains));
kfree(chains);
}
struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{
struct mlx5_fs_chains *chains;
chains = mlx5_chains_init(dev, attr);
return chains;
}
void
mlx5_chains_destroy(struct mlx5_fs_chains *chains)
{
mlx5_chains_cleanup(chains);
}
void
mlx5_chains_print_info(struct mlx5_fs_chains *chains)
{
mlx5_core_dbg(chains->dev, "Flow table chains groups(%d)\n", chains->group_num);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -31,7 +31,7 @@
#include <linux/rbtree.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#include <dev/mlx5/mlx5_core/fs_core.h>
#include <dev/mlx5/mlx5_core/mlx5_fc_cmd.h>
#include <dev/mlx5/mlx5_core/fs_cmd.h>
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)

View File

@ -0,0 +1,85 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021 Mellanox Technologies. */
#include "fs_ft_pool.h"
/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
* and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
* for each flow table pool. We can allocate up to 16M of each pool,
* and we keep track of how much we used via mlx5_ft_pool_get_avail_sz.
* Firmware doesn't report any of this for now.
* ESW_POOL is expected to be sorted from large to small and match firmware
* pools.
*/
#define FT_SIZE (16 * 1024 * 1024)
static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024,
64 * 1024,
128,
1 /* size for termination tables */ };
struct mlx5_ft_pool {
int ft_left[ARRAY_SIZE(FT_POOLS)];
};
int mlx5_ft_pool_init(struct mlx5_core_dev *dev)
{
struct mlx5_ft_pool *ft_pool;
int i;
ft_pool = kzalloc(sizeof(*ft_pool), GFP_KERNEL);
if (!ft_pool)
return -ENOMEM;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
ft_pool->ft_left[i] = FT_SIZE / FT_POOLS[i];
dev->priv.ft_pool = ft_pool;
return 0;
}
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev)
{
kfree(dev->priv.ft_pool);
}
int
mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
int desired_size)
{
u32 max_ft_size = 1 << MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_ft_size, table_type);
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
if (desired_size != POOL_NEXT_SIZE)
break;
}
}
if (found_i != -1) {
--dev->priv.ft_pool->ft_left[found_i];
return FT_POOLS[found_i];
}
return 0;
}
void
mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz)
{
int i;
if (!sz)
return;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
if (sz == FT_POOLS[i]) {
++dev->priv.ft_pool->ft_left[i];
return;
}
}
WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
}

View File

@ -81,12 +81,12 @@ accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct inpcb *inp)
#endif
void
mlx5e_accel_fs_del_inpcb(struct mlx5_flow_rule *rule)
mlx5e_accel_fs_del_inpcb(struct mlx5_flow_handle *rule)
{
mlx5_del_flow_rule(&rule);
mlx5_del_flow_rules(&rule);
}
struct mlx5_flow_rule *
struct mlx5_flow_handle *
mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *priv,
struct inpcb *inp, uint32_t tirn, uint32_t flow_tag,
uint16_t vlan_id)
@ -96,18 +96,17 @@ mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *priv,
#if defined(INET) || defined(INET6)
struct mlx5e_accel_fs_tcp *fs_tcp = &priv->fts.accel_tcp;
#endif
struct mlx5_flow_rule *flow;
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
struct mlx5_flow_act flow_act = {
.actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
.flow_tag = flow_tag,
};
struct mlx5_flow_act flow_act = {};
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return (ERR_PTR(-ENOMEM));
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
spec->flow_context.flow_tag = flow_tag;
INP_RLOCK(inp);
/* Set VLAN ID to match, if any. */
@ -160,13 +159,9 @@ mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn;
flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DEST;
flow = mlx5_add_flow_rule(ft->t, spec->match_criteria_enable,
spec->match_criteria,
spec->match_value,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act,
&dest);
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
out:
kvfree(spec);
return (flow);
@ -175,18 +170,18 @@ mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *priv,
static int
accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, int type)
{
static u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
static u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
static struct mlx5_flow_spec spec = {};
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_tcp *fs_tcp;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
struct mlx5_flow_act flow_act = {
.actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
};
fs_tcp = &priv->fts.accel_tcp;
spec.flow_context.flags = FLOW_CONTEXT_HAS_TAG;
spec.flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
/*
@ -197,10 +192,11 @@ accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, int type)
* of flow tables.
*/
dest.ft = (type == MLX5E_ACCEL_FS_TCP_NUM_TYPES - 1) ?
priv->fts.vlan.t : fs_tcp->tables[type + 1].t;
((priv->fts.ipsec_ft) ? priv->fts.ipsec_ft : priv->fts.vlan.t) :
fs_tcp->tables[type + 1].t;
rule = mlx5_add_flow_rule(fs_tcp->tables[type].t, 0, match_criteria, match_value,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
rule = mlx5_add_flow_rules(fs_tcp->tables[type].t, &spec, &flow_act,
&dest, 1);
if (IS_ERR(rule))
return (PTR_ERR(rule));
@ -317,11 +313,13 @@ static int
accel_fs_tcp_create_table(struct mlx5e_priv *priv, int type)
{
struct mlx5e_flow_table *ft = &priv->fts.accel_tcp.tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fts.accel_tcp.ns, 0, "tcp",
MLX5E_ACCEL_FS_TCP_TABLE_SIZE);
ft_attr.max_fte = MLX5E_ACCEL_FS_TCP_TABLE_SIZE;
ft_attr.level = type;
ft->t = mlx5_create_flow_table(priv->fts.accel_tcp.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@ -365,7 +363,7 @@ mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
return;
for (i = 0; i < MLX5E_ACCEL_FS_TCP_NUM_TYPES; i++) {
mlx5_del_flow_rule(&priv->fts.accel_tcp.default_rules[i]);
mlx5_del_flow_rules(&priv->fts.accel_tcp.default_rules[i]);
accel_fs_tcp_destroy_table(priv, i);
}
}
@ -402,7 +400,7 @@ mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
err_destroy_rules:
while (i--)
mlx5_del_flow_rule(&priv->fts.accel_tcp.default_rules[i]);
mlx5_del_flow_rules(&priv->fts.accel_tcp.default_rules[i]);
i = MLX5E_ACCEL_FS_TCP_NUM_TYPES;
err_destroy_tables:

File diff suppressed because it is too large Load Diff

View File

@ -240,6 +240,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
if (MLX5_CAP_GEN(dev, ipsec_offload)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC);
if (err)
return err;
}
err = mlx5_core_query_special_contexts(dev);
if (err)
return err;

View File

@ -26,6 +26,7 @@
#include "opt_rss.h"
#include "opt_ratelimit.h"
#include "opt_ipsec.h"
#include <linux/kmod.h>
#include <linux/module.h>
@ -67,6 +68,9 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1);
MODULE_DEPEND(mlx5, firmware, 1, 1, 1);
#ifdef IPSEC_OFFLOAD
MODULE_DEPEND(mlx5, ipsec, 1, 1, 1);
#endif
MODULE_VERSION(mlx5, 1);
SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
@ -1209,7 +1213,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_eqs;
}
err = mlx5_init_fs(dev);
err = mlx5_fs_core_init(dev);
if (err) {
mlx5_core_err(dev, "flow steering init %d\n", err);
goto err_free_comp_eqs;
@ -1327,7 +1331,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_diag_cnt_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_mpfs_destroy(dev);
mlx5_cleanup_fs(dev);
mlx5_fs_core_cleanup(dev);
mlx5_wait_for_reclaim_vfs_pages(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
@ -1694,10 +1698,17 @@ static int init_one(struct pci_dev *pdev,
mlx5_pagealloc_init(dev);
pr_info("%s - MARK BLOCH WAS HERE\n", __func__);
err = mlx5_fs_core_alloc(dev);
if (err) {
mlx5_core_err(dev, "Failed to alloc flow steering\n");
goto clean_health;
}
err = mlx5_load_one(dev, priv, true);
if (err) {
mlx5_core_err(dev, "mlx5_load_one failed %d\n", err);
goto clean_health;
goto clean_fs;
}
mlx5_fwdump_prep(dev);
@ -1743,6 +1754,8 @@ static int init_one(struct pci_dev *pdev,
pci_save_state(pdev);
return 0;
clean_fs:
mlx5_fs_core_free(dev);
clean_health:
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
@ -1774,6 +1787,7 @@ static void remove_one(struct pci_dev *pdev)
(long long)(dev->priv.fw_pages * MLX5_ADAPTER_PAGE_SIZE));
}
mlx5_fs_core_free(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_fwdump_clean(dev);

View File

@ -33,66 +33,6 @@
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#include <dev/mlx5/mlx5_core/transobj.h>
int mlx5_encryption_key_create(struct mlx5_core_dev *mdev, u32 pdn,
const void *p_key, u32 key_len, u32 *p_obj_id)
{
u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
u32 out[MLX5_ST_SZ_DW(create_encryption_key_out)] = {};
u64 general_obj_types;
int err;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJ_TYPES_ENCRYPTION_KEY))
return -EINVAL;
switch (key_len) {
case 128 / 8:
memcpy(MLX5_ADDR_OF(create_encryption_key_in, in,
encryption_key_object.key[4]), p_key, 128 / 8);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.pd, pdn);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_size,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_type,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
break;
case 256 / 8:
memcpy(MLX5_ADDR_OF(create_encryption_key_in, in,
encryption_key_object.key[0]), p_key, 256 / 8);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.pd, pdn);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_size,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256);
MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_type,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
break;
default:
return -EINVAL;
}
MLX5_SET(create_encryption_key_in, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJ);
MLX5_SET(create_encryption_key_in, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err == 0)
*p_obj_id = MLX5_GET(create_encryption_key_out, out, obj_id);
/* avoid leaking key on the stack */
memset(in, 0, sizeof(in));
return err;
}
int mlx5_encryption_key_destroy(struct mlx5_core_dev *mdev, u32 oid)
{
u32 in[MLX5_ST_SZ_DW(destroy_encryption_key_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_encryption_key_out)] = {};
MLX5_SET(destroy_encryption_key_in, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJ);
MLX5_SET(destroy_encryption_key_in, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
MLX5_SET(destroy_encryption_key_in, in, obj_id, oid);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
int mlx5_tls_open_tis(struct mlx5_core_dev *mdev, int tc, int tdn, int pdn, u32 *p_tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};

View File

@ -27,6 +27,7 @@
#define __MLX5_WQ_H__
#include <dev/mlx5/mlx5_ifc.h>
#include <dev/mlx5/cq.h>
struct mlx5_wq_param {
int linear;
@ -136,6 +137,22 @@ static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
*wq->db = cpu_to_be32(wq->cc & 0xffffff);
}
static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
{
u32 ci = mlx5_cqwq_get_ci(wq);
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
if (cqe_ownership_bit != sw_ownership_val)
return NULL;
/* ensure cqe content is read after cqe ownership bit */
atomic_thread_fence_acq();
return cqe;
}
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{
return wq->cur_sz == wq->sz_m1;

View File

@ -70,6 +70,7 @@
#include <dev/mlx5/mlx5_core/wq.h>
#include <dev/mlx5/mlx5_core/transobj.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#include <dev/mlx5/mlx5_accel/ipsec.h>
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
@ -956,7 +957,7 @@ struct mlx5_flow_rule;
struct mlx5e_eth_addr_info {
u8 addr [ETH_ALEN + 2];
/* flow table rule per traffic type */
struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
struct mlx5_flow_handle *ft_rule[MLX5E_NUM_TT];
};
#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
@ -992,10 +993,10 @@ enum {
struct mlx5e_vlan_db {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_ft_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_ft_rule;
struct mlx5_flow_rule *any_cvlan_ft_rule;
struct mlx5_flow_rule *any_svlan_ft_rule;
struct mlx5_flow_handle *active_vlans_ft_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_ft_rule;
struct mlx5_flow_handle *any_cvlan_ft_rule;
struct mlx5_flow_handle *any_svlan_ft_rule;
bool filter_disabled;
};
@ -1004,7 +1005,7 @@ struct mlx5e_vxlan_db_el {
u_int proto;
u_int port;
bool installed;
struct mlx5_flow_rule *vxlan_ft_rule;
struct mlx5_flow_handle *vxlan_ft_rule;
TAILQ_ENTRY(mlx5e_vxlan_db_el) link;
};
@ -1027,19 +1028,20 @@ enum accel_fs_tcp_type {
struct mlx5e_accel_fs_tcp {
struct mlx5_flow_namespace *ns;
struct mlx5e_flow_table tables[MLX5E_ACCEL_FS_TCP_NUM_TYPES];
struct mlx5_flow_rule *default_rules[MLX5E_ACCEL_FS_TCP_NUM_TYPES];
struct mlx5_flow_handle *default_rules[MLX5E_ACCEL_FS_TCP_NUM_TYPES];
};
struct mlx5e_flow_tables {
struct mlx5_flow_namespace *ns;
struct mlx5e_flow_table vlan;
struct mlx5e_flow_table vxlan;
struct mlx5_flow_rule *vxlan_catchall_ft_rule;
struct mlx5_flow_handle *vxlan_catchall_ft_rule;
struct mlx5e_flow_table main;
struct mlx5e_flow_table main_vxlan;
struct mlx5_flow_rule *main_vxlan_rule[MLX5E_NUM_TT];
struct mlx5_flow_handle *main_vxlan_rule[MLX5E_NUM_TT];
struct mlx5e_flow_table inner_rss;
struct mlx5e_accel_fs_tcp accel_tcp;
struct mlx5_flow_table *ipsec_ft;
};
struct mlx5e_xmit_args {
@ -1067,6 +1069,7 @@ struct mlx5e_dcbx {
u32 xoff;
};
struct mlx5e_ipsec;
struct mlx5e_priv {
struct mlx5_core_dev *mdev; /* must be first */
@ -1145,6 +1148,7 @@ struct mlx5e_priv {
bool sw_is_port_buf_owner;
struct pfil_head *pfil;
struct mlx5e_ipsec *ipsec;
struct mlx5e_channel channel[];
};

View File

@ -61,7 +61,7 @@ struct mlx5e_tls_rx_tag {
uint32_t tirn; /* HW TIR context number */
uint32_t dek_index; /* HW TLS context number */
struct mlx5e_tls_rx *tls_rx; /* parent pointer */
struct mlx5_flow_rule *flow_rule;
struct mlx5_flow_handle *flow_rule;
struct mtx mtx;
struct completion progress_complete;
uint32_t state; /* see MLX5E_TLS_RX_ST_XXX */

View File

@ -143,17 +143,17 @@ static void
mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai)
{
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4]);
mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_ANY]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4]);
mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_ANY]);
}
static int
@ -248,24 +248,30 @@ mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
static int
mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai, int type,
u32 *mc, u32 *mv)
struct mlx5_flow_spec *spec)
{
struct mlx5_flow_destination dest = {};
u8 mc_enable = 0;
struct mlx5_flow_rule **rule_p;
struct mlx5_flow_handle **rule_p;
struct mlx5_flow_table *ft = priv->fts.main.t;
u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
u32 *tirn = priv->tirn;
u32 tt_vec;
int err = 0;
struct mlx5_flow_act flow_act = {
.actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
.flow_tag = MLX5_FS_ETH_FLOW_TAG,
.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
};
u8 *mc;
u8 *mv;
mv = (u8 *)spec->match_value;
mc = (u8 *)spec->match_criteria;
spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
@ -289,12 +295,11 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
tt_vec = mlx5e_get_tt_vec(ai, type);
spec->match_criteria_enable = mc_enable;
if (tt_vec & BIT(MLX5E_TT_ANY)) {
rule_p = &ai->ft_rule[MLX5E_TT_ANY];
dest.tir_num = tirn[MLX5E_TT_ANY];
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -302,14 +307,13 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
mc_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
spec->match_criteria_enable = mc_enable;
if (tt_vec & BIT(MLX5E_TT_IPV4)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
dest.tir_num = tirn[MLX5E_TT_IPV4];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -319,9 +323,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -334,9 +336,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -346,9 +346,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -360,9 +358,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -372,9 +368,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -386,9 +380,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -398,9 +390,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -412,9 +402,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -424,9 +412,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@ -445,23 +431,19 @@ static int
mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai, int type)
{
u32 *match_criteria;
u32 *match_value;
struct mlx5_flow_spec *spec;
int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!match_value || !match_criteria) {
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_eth_addr_rule_out;
}
err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
match_value);
err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, spec);
add_eth_addr_rule_out:
kvfree(match_criteria);
kvfree(match_value);
kvfree(spec);
return (err);
}
@ -469,51 +451,56 @@ mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
static void
mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
{
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
}
static int
mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec)
{
struct mlx5_flow_destination dest = {};
u8 mc_enable = 0;
struct mlx5_flow_rule **rule_p;
struct mlx5_flow_handle **rule_p;
struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
u32 *tirn = priv->tirn_inner_vxlan;
struct mlx5_flow_act flow_act = {
.actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
.flow_tag = MLX5_FS_ETH_FLOW_TAG,
.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
};
int err = 0;
u8 *mc;
u8 *mv;
spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
mc = (u8 *)spec->match_criteria;
mv = (u8 *)spec->match_value;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
mc_enable = MLX5_MATCH_INNER_HEADERS;
spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
dest.tir_num = tirn[MLX5E_TT_IPV4];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
dest.tir_num = tirn[MLX5E_TT_IPV6];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -523,16 +510,14 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -541,16 +526,14 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -559,16 +542,14 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -577,8 +558,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -586,18 +566,16 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
mc_enable = 0;
spec->match_criteria_enable = 0;
memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
dest.tir_num = tirn[MLX5E_TT_ANY];
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -614,22 +592,19 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
static int
mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
{
u32 *match_criteria;
u32 *match_value;
struct mlx5_flow_spec *spec;
int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (match_value == NULL || match_criteria == NULL) {
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_main_vxlan_rules_out;
}
err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
err = mlx5e_add_main_vxlan_rules_sub(priv, spec);
add_main_vxlan_rules_out:
kvfree(match_criteria);
kvfree(match_value);
kvfree(spec);
return (err);
}
@ -687,22 +662,27 @@ enum mlx5e_vlan_rule_type {
static int
mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, u16 vid,
u32 *mc, u32 *mv)
struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fts.vlan.t;
struct mlx5_flow_destination dest = {};
u8 mc_enable = 0;
struct mlx5_flow_rule **rule_p;
struct mlx5_flow_handle **rule_p;
int err = 0;
struct mlx5_flow_act flow_act = {
.actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
.flow_tag = MLX5_FS_ETH_FLOW_TAG,
.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
};
u8 *mv;
u8 *mc;
mv = (u8 *)spec->match_value;
mc = (u8 *)spec->match_criteria;
spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fts.vxlan.t;
mc_enable = MLX5_MATCH_OUTER_HEADERS;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
@ -729,11 +709,7 @@ mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
break;
}
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act,
&dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
@ -747,24 +723,20 @@ static int
mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
u32 *match_criteria;
u32 *match_value;
struct mlx5_flow_spec *spec;
int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!match_value || !match_criteria) {
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_vlan_rule_out;
}
err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
match_value);
err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, spec);
add_vlan_rule_out:
kvfree(match_criteria);
kvfree(match_value);
kvfree(spec);
return (err);
}
@ -775,16 +747,16 @@ mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
mlx5_del_flow_rule(&priv->vlan.untagged_ft_rule);
mlx5_del_flow_rules(&priv->vlan.untagged_ft_rule);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
mlx5_del_flow_rule(&priv->vlan.any_cvlan_ft_rule);
mlx5_del_flow_rules(&priv->vlan.any_cvlan_ft_rule);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
mlx5_del_flow_rule(&priv->vlan.any_svlan_ft_rule);
mlx5_del_flow_rules(&priv->vlan.any_svlan_ft_rule);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5_del_flow_rule(&priv->vlan.active_vlans_ft_rule[vid]);
mlx5_del_flow_rules(&priv->vlan.active_vlans_ft_rule[vid]);
mlx5e_vport_context_update_vlans(priv);
break;
default:
@ -1518,11 +1490,16 @@ mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
{
struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
&priv->fts.main;
struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
ft_attr.max_fte = MLX5E_MAIN_TABLE_SIZE;
if (priv->ipsec)
ft_attr.level = inner_vxlan ? 10 : 12;
else
ft_attr.level = inner_vxlan ? 2 : 4;
ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@ -1643,11 +1620,13 @@ static int
mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fts.vlan;
struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
MLX5E_VLAN_TABLE_SIZE);
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
ft_attr.level = (priv->ipsec) ? 8 : 0;
ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@ -1683,23 +1662,29 @@ mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
}
static int
mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
struct mlx5e_vxlan_db_el *el)
{
struct mlx5_flow_table *ft = priv->fts.vxlan.t;
struct mlx5_flow_destination dest = {};
u8 mc_enable;
struct mlx5_flow_rule **rule_p;
struct mlx5_flow_handle **rule_p;
int err = 0;
struct mlx5_flow_act flow_act = {
.actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
.flow_tag = MLX5_FS_ETH_FLOW_TAG,
.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
};
u8 *mc;
u8 *mv;
mv = (u8 *)spec->match_value;
mc = (u8 *)spec->match_criteria;
spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fts.main_vxlan.t;
mc_enable = MLX5_MATCH_OUTER_HEADERS;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule_p = &el->vxlan_ft_rule;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
@ -1708,8 +1693,7 @@ mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
@ -1764,23 +1748,20 @@ static int
mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
struct mlx5e_vxlan_db_el *el)
{
u32 *match_criteria;
u32 *match_value;
struct mlx5_flow_spec *spec;
int err;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (match_value == NULL || match_criteria == NULL) {
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_vxlan_rule_out;
}
err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
err = mlx5e_add_vxlan_rule_sub(priv, spec, el);
add_vxlan_rule_out:
kvfree(match_criteria);
kvfree(match_value);
kvfree(spec);
return (err);
}
@ -1818,24 +1799,25 @@ mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
}
static int
mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fts.vxlan.t;
struct mlx5_flow_destination dest = {};
u8 mc_enable = 0;
struct mlx5_flow_rule **rule_p;
struct mlx5_flow_handle **rule_p;
int err = 0;
struct mlx5_flow_act flow_act = {
.actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
.flow_tag = MLX5_FS_ETH_FLOW_TAG,
.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
};
spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fts.main.t;
rule_p = &priv->fts.vxlan_catchall_ft_rule;
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
@ -1850,24 +1832,20 @@ mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
static int
mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
{
u32 *match_criteria;
u32 *match_value;
struct mlx5_flow_spec *spec;
int err;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (match_value == NULL || match_criteria == NULL) {
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_vxlan_rule_out;
}
err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
match_value);
err = mlx5e_add_vxlan_catchall_rule_sub(priv, spec);
add_vxlan_rule_out:
kvfree(match_criteria);
kvfree(match_value);
kvfree(spec);
return (err);
}
@ -1911,7 +1889,7 @@ mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
}
if (el->installed)
mlx5_del_flow_rule(&el->vxlan_ft_rule);
mlx5_del_flow_rules(&el->vxlan_ft_rule);
TAILQ_REMOVE(&priv->vxlan.head, el, link);
kvfree(el);
return (0);
@ -1925,7 +1903,7 @@ mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
TAILQ_FOREACH(el, &priv->vxlan.head, link) {
if (!el->installed)
continue;
mlx5_del_flow_rule(&el->vxlan_ft_rule);
mlx5_del_flow_rules(&el->vxlan_ft_rule);
el->installed = false;
}
}
@ -1933,7 +1911,7 @@ mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
static void
mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
{
mlx5_del_flow_rule(&priv->fts.vxlan_catchall_ft_rule);
mlx5_del_flow_rules(&priv->fts.vxlan_catchall_ft_rule);
}
void
@ -2030,11 +2008,13 @@ static int
mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fts.vxlan;
struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
MLX5E_VXLAN_TABLE_SIZE);
ft_attr.max_fte = MLX5E_VXLAN_TABLE_SIZE;
ft_attr.level = (priv->ipsec) ? 9 : 1;
ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@ -2144,11 +2124,13 @@ static int
mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
MLX5E_INNER_RSS_TABLE_SIZE);
ft_attr.max_fte = MLX5E_INNER_RSS_TABLE_SIZE;
ft_attr.level = (priv->ipsec) ? 11 : 3;
ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@ -2198,9 +2180,13 @@ mlx5e_open_flow_tables(struct mlx5e_priv *priv)
priv->fts.ns = mlx5_get_flow_namespace(
priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
err = mlx5e_accel_ipsec_fs_rx_tables_create(priv);
if (err)
return err;
err = mlx5e_create_vlan_flow_table(priv);
if (err)
return (err);
goto err_destroy_ipsec_flow_table;
err = mlx5e_create_vxlan_flow_table(priv);
if (err)
@ -2222,13 +2208,19 @@ mlx5e_open_flow_tables(struct mlx5e_priv *priv)
if (err)
goto err_destroy_main_flow_table_false;
err = mlx5e_accel_ipsec_fs_rx_catchall_rules(priv);
if (err)
goto err_destroy_vxlan_catchall_rule;
err = mlx5e_accel_fs_tcp_create(priv);
if (err)
goto err_del_vxlan_catchall_rule;
goto err_destroy_ipsec_catchall_rules;
return (0);
err_del_vxlan_catchall_rule:
err_destroy_ipsec_catchall_rules:
mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(priv);
err_destroy_vxlan_catchall_rule:
mlx5e_del_vxlan_catchall_rule(priv);
err_destroy_main_flow_table_false:
mlx5e_destroy_main_flow_table(priv);
@ -2240,6 +2232,8 @@ mlx5e_open_flow_tables(struct mlx5e_priv *priv)
mlx5e_destroy_vxlan_flow_table(priv);
err_destroy_vlan_flow_table:
mlx5e_destroy_vlan_flow_table(priv);
err_destroy_ipsec_flow_table:
mlx5e_accel_ipsec_fs_rx_tables_destroy(priv);
return (err);
}
@ -2248,12 +2242,14 @@ void
mlx5e_close_flow_tables(struct mlx5e_priv *priv)
{
mlx5e_accel_fs_tcp_destroy(priv);
mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(priv);
mlx5e_del_vxlan_catchall_rule(priv);
mlx5e_destroy_main_flow_table(priv);
mlx5e_destroy_inner_rss_flow_table(priv);
mlx5e_destroy_main_vxlan_flow_table(priv);
mlx5e_destroy_vxlan_flow_table(priv);
mlx5e_destroy_vlan_flow_table(priv);
mlx5e_accel_ipsec_fs_rx_tables_destroy(priv);
}
int

View File

@ -31,6 +31,7 @@
#include <dev/mlx5/mlx5_en/en.h>
#include <dev/mlx5/tls.h>
#include <dev/mlx5/crypto.h>
#include <linux/delay.h>
#include <sys/ktls.h>
@ -237,6 +238,7 @@ mlx5e_tls_work(struct work_struct *work)
/* try to allocate a DEK context ID */
err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data),
MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len),
&ptag->dek_index);

View File

@ -29,6 +29,7 @@
#include <dev/mlx5/mlx5_en/en.h>
#include <dev/mlx5/crypto.h>
#include <dev/mlx5/tls.h>
#include <dev/mlx5/fs.h>
@ -551,6 +552,7 @@ mlx5e_tls_rx_work(struct work_struct *work)
/* try to allocate a DEK context ID */
err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, key.key_data),
MLX5_GET(sw_tls_rx_cntx, ptag->crypto_params, key.key_len),
&ptag->dek_index);
@ -659,7 +661,7 @@ mlx5e_tls_rx_snd_tag_alloc(if_t ifp,
struct mlx5e_iq *iq;
struct mlx5e_priv *priv;
struct mlx5e_tls_rx_tag *ptag;
struct mlx5_flow_rule *flow_rule;
struct mlx5_flow_handle *flow_rule;
const struct tls_session_params *en;
uint32_t value;
int error;

View File

@ -24,6 +24,7 @@
* SUCH DAMAGE.
*/
#include "opt_ipsec.h"
#include "opt_kern_tls.h"
#include "opt_rss.h"
#include "opt_ratelimit.h"
@ -35,6 +36,8 @@
#include <machine/atomic.h>
#include <net/debugnet.h>
#include <netipsec/keydb.h>
#include <netipsec/ipsec_offload.h>
static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
static if_snd_tag_query_t mlx5e_ul_snd_tag_query;
@ -3640,6 +3643,18 @@ mlx5e_ioctl(if_t ifp, u_long command, caddr_t data)
if_togglecapenable2(ifp, IFCAP2_BIT(IFCAP2_RXTLS4));
if ((mask & IFCAP2_BIT(IFCAP2_RXTLS6)) != 0)
if_togglecapenable2(ifp, IFCAP2_BIT(IFCAP2_RXTLS6));
#ifdef IPSEC_OFFLOAD
if ((mask & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) != 0) {
bool was_enabled = (if_getcapenable2(ifp) &
IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) != 0;
mlx5e_close_locked(ifp);
if (was_enabled)
ipsec_accel_on_ifdown(priv->ifp);
if_togglecapenable2(ifp,
IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD));
mlx5e_open_locked(ifp);
}
#endif
out:
PRIV_UNLOCK(priv);
break;
@ -4521,6 +4536,11 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
if_setcapabilities2bit(ifp, IFCAP2_BIT(IFCAP2_RXTLS4) |
IFCAP2_BIT(IFCAP2_RXTLS6), 0);
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
if_setcapabilities2bit(ifp, IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD),
0);
if_setsndtagallocfn(ifp, mlx5e_snd_tag_alloc);
#ifdef RATELIMIT
if_setratelimitqueryfn(ifp, mlx5e_ratelimit_query);
@ -4620,10 +4640,18 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
goto err_rl_init;
}
if ((if_getcapenable2(ifp) & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) != 0) {
err = mlx5e_ipsec_init(priv);
if (err) {
if_printf(ifp, "%s: mlx5e_tls_init failed\n", __func__);
goto err_tls_init;
}
}
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
if_printf(ifp, "%s: mlx5e_open_drop_rq failed (%d)\n", __func__, err);
goto err_tls_init;
goto err_ipsec_init;
}
err = mlx5e_open_rqts(priv);
@ -4799,6 +4827,9 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
err_open_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_ipsec_init:
mlx5e_ipsec_cleanup(priv);
err_tls_init:
mlx5e_tls_cleanup(priv);
@ -4905,10 +4936,14 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
ether_ifdetach(ifp);
mlx5e_tls_rx_cleanup(priv);
#ifdef IPSEC_OFFLOAD
ipsec_accel_on_ifdown(priv->ifp);
#endif
mlx5e_close_flow_tables(priv);
mlx5e_close_tirs(priv);
mlx5e_close_rqts(priv);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_ipsec_cleanup(priv);
mlx5e_tls_cleanup(priv);
mlx5e_rl_cleanup(priv);
@ -5023,6 +5058,7 @@ mlx5e_cleanup(void)
module_init_order(mlx5e_init, SI_ORDER_SIXTH);
module_exit_order(mlx5e_cleanup, SI_ORDER_SIXTH);
MODULE_DEPEND(mlx5en, ipsec, 1, 1, 1);
MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
MODULE_VERSION(mlx5en, 1);

View File

@ -28,6 +28,7 @@
#include <dev/mlx5/mlx5_en/en.h>
#include <machine/in_cksum.h>
#include <dev/mlx5/mlx5_accel/ipsec.h>
static inline int
mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
@ -69,6 +70,9 @@ mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
/* get IP header aligned */
m_adj(mb, MLX5E_NET_IP_ALIGN);
err = mlx5_accel_ipsec_rx_tag_add(rq->ifp, mb);
if (err)
goto err_free_mbuf;
err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map,
mb, segs, &nsegs, BUS_DMA_NOWAIT);
if (err != 0)
@ -418,6 +422,8 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
default:
break;
}
mlx5e_accel_ipsec_handle_rx(mb, cqe);
}
static inline void
@ -563,7 +569,9 @@ mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
("Filter returned %d!\n", rv));
}
}
if ((MHLEN - MLX5E_NET_IP_ALIGN) >= byte_cnt &&
if (!mlx5e_accel_ipsec_flow(cqe) /* tag is already assigned
to rq->mbuf */ &&
MHLEN - MLX5E_NET_IP_ALIGN >= byte_cnt &&
(mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
/* set maximum mbuf length */
mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN;

View File

@ -30,6 +30,7 @@
#include <dev/mlx5/mlx5_en/en.h>
#include <machine/atomic.h>
#include <dev/mlx5/mlx5_accel/ipsec.h>
static inline bool
mlx5e_do_send_cqe_inline(struct mlx5e_sq *sq)
@ -744,6 +745,8 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
/* get pointer to mbuf */
mb = *mbp;
mlx5e_accel_ipsec_handle_tx(mb, wqe);
/* Send a copy of the frame to the BPF listener, if any */
if (ifp != NULL)
ETHER_BPF_MTAP(ifp, mb);

View File

@ -170,7 +170,7 @@ struct mlx5_ib_flow_handler {
struct list_head list;
struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
};
struct mlx5_ib_flow_db {

View File

@ -201,7 +201,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
wc->ex.imm_data = cqe->imm_inval_pkey;
wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
@ -213,12 +213,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
wc->ex.imm_data = cqe->imm_inval_pkey;
wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
break;
}
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
@ -226,7 +226,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
if (unlikely(is_qp1(qp->ibqp.qp_type))) {
u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
&wc->pkey_index);

View File

@ -2072,13 +2072,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mutex_lock(&dev->flow_db.lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rule(&iter->rule);
mlx5_del_flow_rules(&iter->rule);
put_flow_table(dev, iter->prio, true);
list_del(&iter->list);
kfree(iter);
}
mlx5_del_flow_rule(&handler->rule);
mlx5_del_flow_rules(&handler->rule);
put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock);
@ -2107,6 +2107,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
enum flow_table_type ft_type)
{
bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_table *ft;
@ -2155,10 +2156,11 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft) {
ft = mlx5_create_auto_grouped_flow_table(ns, priority, "bypass",
num_entries,
num_groups,
0);
ft_attr.prio = priority;
ft_attr.max_fte = num_entries;
ft_attr.autogroup.max_num_groups = num_groups;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (!IS_ERR(ft)) {
prio->refcount = 0;
@ -2181,10 +2183,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_flow_spec *spec;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
struct mlx5_flow_act flow_act = {
.actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
};
struct mlx5_flow_act flow_act = {};
u32 action;
int err = 0;
@ -2198,6 +2198,9 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
goto free;
}
spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
INIT_LIST_HEAD(&handler->list);
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
@ -2211,12 +2214,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_RULE_FWD_ACTION_DEST : 0;
handler->rule = mlx5_add_flow_rule(ft, spec->match_criteria_enable,
spec->match_criteria,
spec->match_value,
action,
&flow_act,
dst);
flow_act.action = action;
handler->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
@ -2247,7 +2246,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
handler_dst = create_flow_rule(dev, ft_prio,
flow_attr, dst);
if (IS_ERR(handler_dst)) {
mlx5_del_flow_rule(&handler->rule);
mlx5_del_flow_rules(&handler->rule);
ft_prio->refcount--;
kfree(handler);
handler = handler_dst;
@ -2310,7 +2309,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr,
dst);
if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rule(&handler->rule);
mlx5_del_flow_rules(&handler->rule);
ft_prio->refcount--;
kfree(handler);
handler = handler_ucast;
@ -2353,7 +2352,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
return handler_rx;
err_tx:
mlx5_del_flow_rule(&handler_rx->rule);
mlx5_del_flow_rules(&handler_rx->rule);
ft_rx->refcount--;
kfree(handler_rx);
err:

View File

@ -63,7 +63,7 @@ enum {
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
MLX5_EVENT_TYPE_CODING_GENERAL_OBJ_EVENT = 0x27,
MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
};
enum {
@ -322,8 +322,13 @@ enum {
MLX5_ICMD_CMDS_OPCODE_ICMD_OPCODE_INIT_OCSD = 0xf004
};
enum {
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = 1ULL << 0x13,
};
enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
};
enum {
@ -336,7 +341,8 @@ enum {
};
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
};
struct mlx5_ifc_flow_table_fields_supported_bits {
@ -463,39 +469,70 @@ struct mlx5_ifc_eth_discard_cntrs_grp_bits {
u8 reserved_at_340[0x440];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
u8 flow_tag[0x1];
u8 reserved_at_1[0x1];
u8 flow_counter[0x1];
u8 flow_modify_en[0x1];
u8 modify_root[0x1];
u8 identified_miss_table[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
u8 encap[0x1];
u8 reformat[0x1];
u8 decap[0x1];
u8 reset_root_to_default[0x1];
u8 reserved_at_a[0x16];
u8 reserved_at_20[0x2];
u8 reserved_at_9[0x1];
u8 pop_vlan[0x1];
u8 push_vlan[0x1];
u8 reserved_at_c[0x1];
u8 pop_vlan_2[0x1];
u8 push_vlan_2[0x1];
u8 reformat_and_vlan_action[0x1];
u8 reserved_at_10[0x1];
u8 sw_owner[0x1];
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
u8 ignore_flow_level[0x1];
u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reformat_and_fwd_to_table[0x1];
u8 reserved_at_1a[0x2];
u8 ipsec_encrypt[0x1];
u8 ipsec_decrypt[0x1];
u8 sw_owner_v2[0x1];
u8 reserved_at_1f[0x1];
u8 termination_table_raw_traffic[0x1];
u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
u8 reserved_at_40[0x20];
u8 reserved_at_60[0x18];
u8 reformat_add_esp_trasport[0x1];
u8 reformat_l2_to_l3_esp_tunnel[0x1];
u8 reformat_add_esp_transport_over_udp[0x1];
u8 reformat_del_esp_trasport[0x1];
u8 reformat_l3_esp_tunnel_to_l2[0x1];
u8 reformat_del_esp_transport_over_udp[0x1];
u8 execute_aso[0x1];
u8 reserved_at_47[0x19];
u8 reserved_at_60[0x2];
u8 reformat_insert[0x1];
u8 reformat_remove[0x1];
u8 macsec_encrypt[0x1];
u8 macsec_decrypt[0x1];
u8 reserved_at_66[0x2];
u8 reformat_add_macsec[0x1];
u8 reformat_remove_macsec[0x1];
u8 reserved_at_6a[0xe];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
u8 reserved_at_a0[0x18];
u8 log_max_flow[0x8];
u8 reserved_at_c0[0x40];
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
@ -523,15 +560,18 @@ enum {
MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 0x2,
MLX5_FLOW_CONTEXT_DEST_TYPE_QP = 0x3,
MLX5_FLOW_CONTEXT_DEST_TYPE_TABLE_TYPE = 0xA,
};
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
u8 destination_type[0x8];
u8 destination_id[0x18];
u8 reserved_0[0x8];
u8 destination_table_type[0x8];
u8 reserved_at_1[0x10];
u8 destination_eswitch_owner_vhca_id_valid[0x1];
u8 packet_reformat[0x1];
u8 reserved_at_22[0x6];
u8 destination_table_type[0x8];
u8 destination_eswitch_owner_vhca_id[0x10];
};
struct mlx5_ifc_ipv4_layout_bits {
@ -585,11 +625,25 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
struct mlx5_ifc_nvgre_key_bits {
u8 hi[0x18];
u8 lo[0x8];
};
union mlx5_ifc_gre_key_bits {
struct mlx5_ifc_nvgre_key_bits nvgre;
u8 key[0x20];
};
struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_0[0x8];
u8 gre_c_present[0x1];
u8 reserved_at_1[0x1];
u8 gre_k_present[0x1];
u8 gre_s_present[0x1];
u8 source_vhca_port[0x4];
u8 source_sqn[0x18];
u8 reserved_1[0x10];
u8 source_eswitch_owner_vhca_id[0x10];
u8 source_port[0x10];
u8 outer_second_prio[0x3];
@ -599,35 +653,163 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 inner_second_cfi[0x1];
u8 inner_second_vid[0xc];
u8 outer_second_vlan_tag[0x1];
u8 inner_second_vlan_tag[0x1];
u8 reserved_2[0xe];
u8 outer_second_cvlan_tag[0x1];
u8 inner_second_cvlan_tag[0x1];
u8 outer_second_svlan_tag[0x1];
u8 inner_second_svlan_tag[0x1];
u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
u8 gre_key_h[0x18];
u8 gre_key_l[0x8];
union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
u8 reserved_3[0x8];
u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
u8 reserved4[0x7];
u8 reserved_at_d8[0x6];
u8 geneve_tlv_option_0_exist[0x1];
u8 geneve_oam[0x1];
u8 reserved_5[0xc];
u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14];
u8 reserved_6[0xc];
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
u8 reserved_7[0xa];
u8 reserved_at_120[0xa];
u8 geneve_opt_len[0x6];
u8 geneve_protocol_type[0x10];
u8 reserved_8[0x8];
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
u8 inner_esp_spi[0x20];
u8 outer_esp_spi[0x20];
u8 reserved_at_1a0[0x60];
};
u8 reserved_9[0xa0];
struct mlx5_ifc_fte_match_mpls_bits {
u8 mpls_label[0x14];
u8 mpls_exp[0x3];
u8 mpls_s_bos[0x1];
u8 mpls_ttl[0x8];
};
struct mlx5_ifc_fte_match_set_misc2_bits {
struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
u8 metadata_reg_c_7[0x20];
u8 metadata_reg_c_6[0x20];
u8 metadata_reg_c_5[0x20];
u8 metadata_reg_c_4[0x20];
u8 metadata_reg_c_3[0x20];
u8 metadata_reg_c_2[0x20];
u8 metadata_reg_c_1[0x20];
u8 metadata_reg_c_0[0x20];
u8 metadata_reg_a[0x20];
u8 reserved_at_1a0[0x8];
u8 macsec_syndrome[0x8];
u8 ipsec_syndrome[0x8];
u8 reserved_at_1b8[0x8];
u8 reserved_at_1c0[0x40];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
u8 inner_tcp_seq_num[0x20];
u8 outer_tcp_seq_num[0x20];
u8 inner_tcp_ack_num[0x20];
u8 outer_tcp_ack_num[0x20];
u8 reserved_at_80[0x8];
u8 outer_vxlan_gpe_vni[0x18];
u8 outer_vxlan_gpe_next_protocol[0x8];
u8 outer_vxlan_gpe_flags[0x8];
u8 reserved_at_b0[0x10];
u8 icmp_header_data[0x20];
u8 icmpv6_header_data[0x20];
u8 icmp_type[0x8];
u8 icmp_code[0x8];
u8 icmpv6_type[0x8];
u8 icmpv6_code[0x8];
u8 geneve_tlv_option_0_data[0x20];
u8 gtpu_teid[0x20];
u8 gtpu_msg_type[0x8];
u8 gtpu_msg_flags[0x8];
u8 reserved_at_170[0x10];
u8 gtpu_dw_2[0x20];
u8 gtpu_first_ext_dw_0[0x20];
u8 gtpu_dw_0[0x20];
u8 reserved_at_1e0[0x20];
};
struct mlx5_ifc_fte_match_set_misc4_bits {
u8 prog_sample_field_value_0[0x20];
u8 prog_sample_field_id_0[0x20];
u8 prog_sample_field_value_1[0x20];
u8 prog_sample_field_id_1[0x20];
u8 prog_sample_field_value_2[0x20];
u8 prog_sample_field_id_2[0x20];
u8 prog_sample_field_value_3[0x20];
u8 prog_sample_field_id_3[0x20];
u8 reserved_at_100[0x100];
};
struct mlx5_ifc_fte_match_set_misc5_bits {
u8 macsec_tag_0[0x20];
u8 macsec_tag_1[0x20];
u8 macsec_tag_2[0x20];
u8 macsec_tag_3[0x20];
u8 tunnel_header_0[0x20];
u8 tunnel_header_1[0x20];
u8 tunnel_header_2[0x20];
u8 tunnel_header_3[0x20];
u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits {
@ -863,6 +1045,20 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_1[0x7200];
};
struct mlx5_ifc_port_selection_cap_bits {
u8 reserved_at_0[0x10];
u8 port_select_flow_table[0x1];
u8 reserved_at_11[0x1];
u8 port_select_flow_table_bypass[0x1];
u8 reserved_at_13[0xd];
u8 reserved_at_20[0x1e0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
u8 reserved_at_400[0x7c00];
};
struct mlx5_ifc_pddr_module_info_bits {
u8 cable_technology[0x8];
u8 cable_breakout[0x8];
@ -1154,7 +1350,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_0[0x20];
u8 hca_cap_2[0x1];
u8 reserved_at_21[0x1f];
u8 create_lag_when_not_master_up[0x1];
u8 dtor[0x1];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
u8 event_on_vhca_state_allocated[0x1];
u8 event_on_vhca_state_invalid[0x1];
u8 reserved_at_28[0x8];
u8 vhca_id[0x10];
u8 reserved_at_40[0x40];
@ -1404,7 +1608,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_460[0x3];
u8 log_max_uctx[0x5];
u8 reserved_at_468[0x3];
u8 reserved_at_468[0x2];
u8 ipsec_offload[0x1];
u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
@ -1488,11 +1693,27 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_260[0x5a0];
};
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
enum mlx5_ifc_flow_destination_type {
MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
};
enum mlx5_flow_table_miss_action {
MLX5_FLOW_TABLE_MISS_ACTION_DEF,
MLX5_FLOW_TABLE_MISS_ACTION_FWD,
MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
};
struct mlx5_ifc_extended_dest_format_bits {
struct mlx5_ifc_dest_format_struct_bits destination_entry;
u8 packet_reformat_id[0x20];
u8 reserved_at_60[0x20];
};
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
@ -1502,13 +1723,21 @@ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
};
struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
u8 reserved_0[0xa00];
struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
u8 reserved_at_e00[0x200];
};
enum {
@ -2310,43 +2539,85 @@ struct mlx5_ifc_rdbc_bits {
u8 atomic_resp[32][0x8];
};
struct mlx5_ifc_vlan_bits {
u8 ethtype[0x10];
u8 prio[0x3];
u8 cfi[0x1];
u8 vid[0xc];
};
enum {
MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
MLX5_FLOW_METER_COLOR_RED = 0x0,
MLX5_FLOW_METER_COLOR_YELLOW = 0x1,
MLX5_FLOW_METER_COLOR_GREEN = 0x2,
MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3,
};
enum {
MLX5_EXE_ASO_FLOW_METER = 0x2,
};
struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits {
u8 return_reg_id[0x4];
u8 aso_type[0x4];
u8 reserved_at_8[0x14];
u8 action[0x1];
u8 init_color[0x2];
u8 meter_id[0x1];
};
union mlx5_ifc_exe_aso_ctrl {
struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter;
};
struct mlx5_ifc_execute_aso_bits {
u8 valid[0x1];
u8 reserved_at_1[0x7];
u8 aso_object_id[0x18];
union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl;
};
enum {
MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
};
struct mlx5_ifc_flow_context_bits {
u8 reserved_0[0x20];
struct mlx5_ifc_vlan_bits push_vlan;
u8 group_id[0x20];
u8 reserved_1[0x8];
u8 reserved_at_40[0x8];
u8 flow_tag[0x18];
u8 reserved_2[0x10];
u8 reserved_at_60[0x10];
u8 action[0x10];
u8 reserved_3[0x8];
u8 extended_destination[0x1];
u8 reserved_at_81[0x1];
u8 flow_source[0x2];
u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
u8 reserved_4[0x8];
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
u8 packet_reformat_id[0x20];
u8 modify_header_id[0x20];
u8 modify_header_id[0x20];
u8 reserved_6[0x100];
struct mlx5_ifc_vlan_bits push_vlan_2;
u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
u8 reserved_7[0x600];
struct mlx5_ifc_execute_aso_bits execute_aso[4];
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
u8 reserved_at_1300[0x500];
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
};
enum {
@ -3008,21 +3279,27 @@ enum {
};
struct mlx5_ifc_flow_table_context_bits {
u8 reformat_en[0x1];
u8 decap_en[0x1];
u8 reserved_at_2[0x2];
u8 table_miss_action[0x4];
u8 level[0x8];
u8 reserved_at_10[0x8];
u8 log_size[0x8];
u8 reformat_en[0x1];
u8 decap_en[0x1];
u8 sw_owner[0x1];
u8 termination_table[0x1];
u8 table_miss_action[0x4];
u8 level[0x8];
u8 reserved_at_10[0x8];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
u8 table_miss_id[0x18];
u8 reserved_at_20[0x8];
u8 table_miss_id[0x18];
u8 reserved_at_40[0x8];
u8 lag_master_next_table_id[0x18];
u8 reserved_at_40[0x8];
u8 lag_master_next_table_id[0x18];
u8 reserved_at_60[0x60];
u8 sw_owner_icm_root_1[0x40];
u8 sw_owner_icm_root_0[0x40];
u8 reserved_at_60[0xe0];
};
struct mlx5_ifc_esw_vport_context_bits {
@ -3980,28 +4257,32 @@ struct mlx5_ifc_set_flow_table_root_out_bits {
};
struct mlx5_ifc_set_flow_table_root_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_1[0x10];
u8 op_mod[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_2[0xf];
u8 vport_number[0x10];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_3[0x20];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_4[0x18];
u8 table_type[0x8];
u8 reserved_at_88[0x7];
u8 table_of_other_vport[0x1];
u8 table_vport_number[0x10];
u8 reserved_5[0x8];
u8 table_id[0x18];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_6[0x8];
u8 underlay_qpn[0x18];
u8 reserved_7[0x120];
u8 reserved_at_c0[0x8];
u8 underlay_qpn[0x18];
u8 table_eswitch_owner_vhca_id_valid[0x1];
u8 reserved_at_e1[0xf];
u8 table_eswitch_owner_vhca_id[0x10];
u8 reserved_at_100[0x100];
};
struct mlx5_ifc_set_fte_out_bits {
@ -4014,34 +4295,35 @@ struct mlx5_ifc_set_fte_out_bits {
};
struct mlx5_ifc_set_fte_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_1[0x10];
u8 op_mod[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_2[0xf];
u8 vport_number[0x10];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_3[0x20];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_4[0x18];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 reserved_5[0x8];
u8 table_id[0x18];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_6[0x18];
u8 modify_enable_mask[0x8];
u8 ignore_flow_level[0x1];
u8 reserved_at_c1[0x17];
u8 modify_enable_mask[0x8];
u8 reserved_7[0x20];
u8 reserved_at_e0[0x20];
u8 flow_index[0x20];
u8 flow_index[0x20];
u8 reserved_8[0xe0];
u8 reserved_at_120[0xe0];
struct mlx5_ifc_flow_context_bits flow_context;
struct mlx5_ifc_flow_context_bits flow_context;
};
struct mlx5_ifc_set_driver_version_out_bits {
@ -4322,6 +4604,23 @@ enum {
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_UPLINK = 0x2,
};
enum {
MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000,
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000,
MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
};
struct mlx5_ifc_query_vport_state_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@ -5542,6 +5841,12 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
@ -6252,6 +6557,11 @@ struct mlx5_ifc_modify_hca_vport_context_in_bits {
struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
};
enum {
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0),
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
};
struct mlx5_ifc_modify_flow_table_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@ -7895,24 +8205,24 @@ struct mlx5_ifc_create_flow_table_out_bits {
};
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 reserved_at_a0[0x20];
u8 reserved_at_a0[0x20];
struct mlx5_ifc_flow_table_context_bits flow_table_context;
struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_create_flow_group_out_bits {
@ -7928,46 +8238,54 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_create_flow_group_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_1[0x10];
u8 op_mod[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_2[0xf];
u8 vport_number[0x10];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_3[0x20];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_4[0x18];
u8 table_type[0x8];
u8 reserved_at_88[0x4];
u8 group_type[0x4];
u8 reserved_at_90[0x10];
u8 reserved_5[0x8];
u8 table_id[0x18];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_6[0x20];
u8 source_eswitch_owner_vhca_id_valid[0x1];
u8 start_flow_index[0x20];
u8 reserved_at_c1[0x1f];
u8 reserved_7[0x20];
u8 start_flow_index[0x20];
u8 end_flow_index[0x20];
u8 reserved_at_100[0x20];
u8 reserved_8[0xa0];
u8 end_flow_index[0x20];
u8 reserved_9[0x18];
u8 match_criteria_enable[0x8];
u8 reserved_at_140[0x10];
u8 match_definer_id[0x10];
struct mlx5_ifc_fte_match_param_bits match_criteria;
u8 reserved_at_160[0x80];
u8 reserved_10[0xe00];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
struct mlx5_ifc_fte_match_param_bits match_criteria;
u8 reserved_at_1200[0xe00];
};
struct mlx5_ifc_create_encryption_key_out_bits {
@ -11618,5 +11936,120 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
struct mlx5_ifc_ipsec_cap_bits {
u8 ipsec_full_offload[0x1];
u8 ipsec_crypto_offload[0x1];
u8 ipsec_esn[0x1];
u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1];
u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1];
u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1];
u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1];
u8 reserved_at_7[0x4];
u8 log_max_ipsec_offload[0x5];
u8 reserved_at_10[0x10];
u8 min_log_ipsec_full_replay_window[0x8];
u8 max_log_ipsec_full_replay_window[0x8];
u8 reserved_at_30[0x7d0];
};
enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
};
enum {
MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
};
enum {
MLX5_IPSEC_ASO_MODE = 0x0,
MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
enum {
MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
};
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];
u8 mode[0x2];
u8 window_sz[0x2];
u8 soft_lft_arm[0x1];
u8 hard_lft_arm[0x1];
u8 remove_flow_enable[0x1];
u8 esn_event_arm[0x1];
u8 reserved_at_20a[0x16];
u8 remove_flow_pkt_cnt[0x20];
u8 remove_flow_soft_lft[0x20];
u8 reserved_at_260[0x80];
u8 mode_parameter[0x20];
u8 replay_protection_window[0x100];
};
struct mlx5_ifc_ipsec_obj_bits {
u8 modify_field_select[0x40];
u8 full_offload[0x1];
u8 reserved_at_41[0x1];
u8 esn_en[0x1];
u8 esn_overlap[0x1];
u8 reserved_at_44[0x2];
u8 icv_length[0x2];
u8 reserved_at_48[0x4];
u8 aso_return_reg[0x4];
u8 reserved_at_50[0x10];
u8 esn_msb[0x20];
u8 reserved_at_80[0x8];
u8 dekn[0x18];
u8 salt[0x20];
u8 implicit_iv[0x40];
u8 reserved_at_100[0x8];
u8 ipsec_aso_access_pd[0x18];
u8 reserved_at_120[0xe0];
struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
};
struct mlx5_ifc_create_ipsec_obj_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
struct mlx5_ifc_ipsec_obj_bits ipsec_object;
};
enum {
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = 1 << 0,
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = 1 << 1,
};
struct mlx5_ifc_query_ipsec_obj_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
struct mlx5_ifc_ipsec_obj_bits ipsec_object;
};
struct mlx5_ifc_modify_ipsec_obj_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
struct mlx5_ifc_ipsec_obj_bits ipsec_object;
};
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
};
#endif /* MLX5_IFC_H */

View File

@ -0,0 +1,92 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_LIB_ASO_H__
#define __MLX5_LIB_ASO_H__
#include <dev/mlx5/qp.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#define MLX5_ASO_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
#define MLX5_ASO_WQEBBS_DATA \
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
#define ASO_CTRL_READ_EN BIT(0)
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
#define ASO_CTRL_READ_EN BIT(0)
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
__be32 va_l; /* include read_enable */
__be32 l_key;
u8 data_mask_mode;
u8 condition_1_0_operand;
u8 condition_1_0_offset;
u8 data_offset_condition_operand;
__be32 condition_0_data;
__be32 condition_0_mask;
__be32 condition_1_data;
__be32 condition_1_mask;
__be64 bitwise_data;
__be64 data_mask;
};
struct mlx5_wqe_aso_data_seg {
__be32 bytewise_data[16];
};
struct mlx5_aso_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
};
struct mlx5_aso_wqe_data {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
struct mlx5_wqe_aso_data_seg aso_data;
};
enum {
MLX5_ASO_LOGICAL_AND,
MLX5_ASO_LOGICAL_OR,
};
enum {
MLX5_ASO_ALWAYS_FALSE,
MLX5_ASO_ALWAYS_TRUE,
MLX5_ASO_EQUAL,
MLX5_ASO_NOT_EQUAL,
MLX5_ASO_GREATER_OR_EQUAL,
MLX5_ASO_LESSER_OR_EQUAL,
MLX5_ASO_LESSER,
MLX5_ASO_GREATER,
MLX5_ASO_CYCLIC_GREATER,
MLX5_ASO_CYCLIC_LESSER,
};
enum {
MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT,
MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE,
MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE,
};
enum {
MLX5_ACCESS_ASO_OPC_MOD_IPSEC = 0x0,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
};
struct mlx5_aso;
struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso);
void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
struct mlx5_aso_wqe *aso_wqe,
u32 obj_id, u32 opc_mode);
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
struct mlx5_wqe_ctrl_seg *doorbell_cseg);
int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data);
struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
void mlx5_aso_destroy(struct mlx5_aso *aso);
#endif /* __MLX5_LIB_ASO_H__ */

View File

@ -0,0 +1,428 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/printk.h>
#include <dev/mlx5/driver.h>
#include <dev/mlx5/mlx5_core/transobj.h>
#include "aso.h"
#include <dev/mlx5/mlx5_core/wq.h>
#include <dev/mlx5/cq.h>
struct mlx5_aso_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
/* data path - accessed per napi poll */
struct mlx5_core_cq mcq;
/* control */
struct mlx5_core_dev *mdev;
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
struct mlx5_aso {
/* data path */
u16 cc;
u16 pc;
struct mlx5_wqe_ctrl_seg *doorbell_cseg;
struct mlx5_aso_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
{
mlx5_wq_destroy(&cq->wq_ctrl);
}
static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
void *cqc_data, struct mlx5_aso_cq *cq)
{
struct mlx5_core_cq *mcq = &cq->mcq;
struct mlx5_wq_param param;
int err;
u32 i;
param.linear = 1;
err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
if (err)
return err;
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe->op_own = 0xf1;
}
cq->mdev = mdev;
return 0;
}
static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
{
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_core_dev *mdev = cq->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int inlen, eqn, irqn_not_used;
void *in, *cqc;
int err;
err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn_not_used);
if (err)
return err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
mlx5_fill_page_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, cq_period_mode, 0);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
kvfree(in);
return err;
}
static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
{
mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
mlx5_wq_destroy(&cq->wq_ctrl);
}
static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
struct mlx5_aso_cq *cq)
{
void *cqc_data;
int err;
cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
if (!cqc_data)
return -ENOMEM;
MLX5_SET(cqc, cqc_data, log_cq_size, 1);
MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
if (err) {
mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
goto err_out;
}
err = create_aso_cq(cq, cqc_data);
if (err) {
mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
goto err_free_cq;
}
kvfree(cqc_data);
return 0;
err_free_cq:
mlx5_aso_free_cq(cq);
err_out:
kvfree(cqc_data);
return err;
}
static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
void *sqc_data, struct mlx5_aso *sq)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wq_param param;
int err;
sq->uar_map = mdev->priv.uar->map;
param.linear = 1;
err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
if (err)
return err;
wq->db = &wq->db[MLX5_SND_DBR];
return 0;
}
static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
void *sqc_data, struct mlx5_aso *sq)
{
void *in, *sqc, *wq;
int inlen, err;
u8 ts_format;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
sizeof(u64) * sq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
ts_format = mlx5_get_sq_default_ts(mdev);
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, mdev->priv.uar->index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
kvfree(in);
return err;
}
static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
{
void *in, *sqc;
int inlen, err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
MLX5_SET(modify_sq_in, in, sqn, sqn);
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
err = mlx5_core_modify_sq(mdev, in, inlen);
kvfree(in);
return err;
}
static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
void *sqc_data, struct mlx5_aso *sq)
{
int err;
err = create_aso_sq(mdev, pdn, sqc_data, sq);
if (err)
return err;
err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
if (err)
mlx5_core_destroy_sq(mdev, sq->sqn);
return err;
}
static void mlx5_aso_free_sq(struct mlx5_aso *sq)
{
mlx5_wq_destroy(&sq->wq_ctrl);
}
static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
{
mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
mlx5_aso_free_sq(sq);
}
static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
u32 pdn, struct mlx5_aso *sq)
{
void *sqc_data, *wq;
int err;
sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
if (!sqc_data)
return -ENOMEM;
wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, pdn);
MLX5_SET(wq, wq, log_wq_sz, 1);
err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
if (err) {
mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
goto err_out;
}
err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
if (err) {
mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
goto err_free_asosq;
}
mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
kvfree(sqc_data);
return 0;
err_free_asosq:
mlx5_aso_free_sq(sq);
err_out:
kvfree(sqc_data);
return err;
}
struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
{
int numa_node = dev_to_node(&mdev->pdev->dev);
struct mlx5_aso *aso;
int err;
aso = kzalloc(sizeof(*aso), GFP_KERNEL);
if (!aso)
return ERR_PTR(-ENOMEM);
err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
if (err)
goto err_cq;
err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
if (err)
goto err_sq;
return aso;
err_sq:
mlx5_aso_destroy_cq(&aso->cq);
err_cq:
kfree(aso);
return ERR_PTR(err);
}
void mlx5_aso_destroy(struct mlx5_aso *aso)
{
mlx5_aso_destroy_sq(aso);
mlx5_aso_destroy_cq(&aso->cq);
kfree(aso);
}
void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
struct mlx5_aso_wqe *aso_wqe,
u32 obj_id, u32 opc_mode)
{
struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
(aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_ACCESS_ASO);
cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
cseg->general_id = cpu_to_be32(obj_id);
}
struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
{
struct mlx5_aso_wqe *wqe;
u16 pi;
pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
memset(wqe, 0, sizeof(*wqe));
return wqe;
}
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
struct mlx5_wqe_ctrl_seg *doorbell_cseg)
{
doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
wmb();
if (with_data)
aso->pc += MLX5_ASO_WQEBBS_DATA;
else
aso->pc += MLX5_ASO_WQEBBS;
*aso->wq.db = cpu_to_be32(aso->pc);
/* ensure doorbell record is visible to device before ringing the
* doorbell
*/
wmb();
mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map + MLX5_BF_OFFSET, NULL);
/* Ensure doorbell is written on uar_page before poll_cq */
WRITE_ONCE(doorbell_cseg, NULL);
}
int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
{
struct mlx5_aso_cq *cq = &aso->cq;
struct mlx5_cqe64 *cqe;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
return -ETIMEDOUT;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
mlx5_cqwq_pop(&cq->wq);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
struct mlx5_err_cqe *err_cqe;
mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
err_cqe = (struct mlx5_err_cqe *)cqe;
mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
err_cqe->vendor_err_synd);
mlx5_core_err(cq->mdev, "syndrome=%x\n",
err_cqe->syndrome);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
16, 1, err_cqe,
sizeof(*err_cqe), false);
}
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
wmb();
if (with_data)
aso->cc += MLX5_ASO_WQEBBS_DATA;
else
aso->cc += MLX5_ASO_WQEBBS;
return 0;
}

View File

@ -144,6 +144,8 @@ enum {
#define MLX5_SEND_WQE_DS 16
#define MLX5_SEND_WQE_BB 64
#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
#define MLX5_WQE_CTRL_QPN_SHIFT 8
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
@ -192,7 +194,10 @@ struct mlx5_wqe_ctrl_seg {
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
__be32 imm;
union {
__be32 imm;
__be32 general_id;
};
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
@ -226,6 +231,10 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_TYPE = 1 << 5,
};
enum {
MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
};
struct mlx5_wqe_eth_seg {
u8 swp_outer_l4_offset;
u8 swp_outer_l3_offset;
@ -234,7 +243,7 @@ struct mlx5_wqe_eth_seg {
u8 cs_flags;
u8 swp_flags;
__be16 mss;
__be32 rsvd2;
__be32 flow_table_metadata;
union {
struct {
__be16 inline_hdr_sz;

View File

@ -28,9 +28,6 @@
struct mlx5_core_dev;
int mlx5_encryption_key_create(struct mlx5_core_dev *mdev, u32 pdn,
const void *p_key, u32 key_len, u32 * p_obj_id);
int mlx5_encryption_key_destroy(struct mlx5_core_dev *mdev, u32 oid);
int mlx5_tls_open_tis(struct mlx5_core_dev *mdev, int tc, int tdn, int pdn, u32 *p_tisn);
void mlx5_tls_close_tis(struct mlx5_core_dev *mdev, u32 tisn);
int mlx5_tls_open_tir(struct mlx5_core_dev *mdev, int tdn, int rqtn, u32 *p_tirn);

View File

@ -1,21 +1,25 @@
.PATH: ${SRCTOP}/sys/dev/mlx5/mlx5_core \
${SRCTOP}/sys/dev/mlx5/mlx5_lib \
${SRCTOP}/sys/dev/mlx5/mlx5_fpga
${SRCTOP}/sys/dev/mlx5/mlx5_fpga \
${SRCTOP}/sys/dev/mlx5/mlx5_accel
KMOD=mlx5
SRCS= \
mlx5_alloc.c \
mlx5_aso.c \
mlx5_cmd.c \
mlx5_crypto.c \
mlx5_cq.c \
mlx5_diag_cnt.c \
mlx5_diagnostics.c \
mlx5_eq.c \
mlx5_eswitch.c \
mlx5_fc_cmd.c \
mlx5_fs_chains.c \
mlx5_fs_cmd.c \
mlx5_fs_tcp.c \
mlx5_fs_tree.c \
mlx5_fs_core.c \
mlx5_fs_counters.c \
mlx5_fs_ft_pool.c \
mlx5_fs_tcp.c \
mlx5_fw.c \
mlx5_fwdump.c \
mlx5_health.c \
@ -36,7 +40,11 @@ mlx5_uar.c \
mlx5_vport.c \
mlx5_vsc.c \
mlx5_wq.c \
mlx5_gid.c
mlx5_gid.c \
mlx5_ipsec_fs.c \
mlx5_ipsec_offload.c \
mlx5_ipsec.c \
mlx5_ipsec_rxtx.c
SRCS+= ${LINUXKPI_GENSRCS}
SRCS+= opt_inet.h opt_inet6.h opt_rss.h opt_ratelimit.h