1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-12 09:58:36 +00:00

ice(4): Update to version 0.29.4-k

Includes various feature improvements and bug fixes.

Notable changes include:
- Firmware logging support
- Link management flow changes
- New sysctl to report aggregated error counts
- Health Status Event reporting from firmware (Use the new read-only
  tunables hw.ice.enable_health_events / dev.ice.#.enable_health_events
  to turn this off)

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Sponsored by:	Intel Corporation
This commit is contained in:
Eric Joyner 2021-06-23 13:41:54 -07:00
parent afc5ab870d
commit 9cf1841c4a
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
29 changed files with 2877 additions and 886 deletions

View File

@ -167,6 +167,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_SKU 0x0074
#define ICE_AQC_CAPS_PORT_MAP 0x0075
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
@ -412,6 +413,40 @@ struct ice_aqc_get_allocd_res_desc {
__le32 addr_low;
};
/* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */
struct ice_aqc_set_vlan_mode {
u8 reserved;
u8 l2tag_prio_tagging;
#define ICE_AQ_VLAN_PRIO_TAG_S 0
#define ICE_AQ_VLAN_PRIO_TAG_M (0x7 << ICE_AQ_VLAN_PRIO_TAG_S)
#define ICE_AQ_VLAN_PRIO_TAG_NOT_SUPPORTED 0x0
#define ICE_AQ_VLAN_PRIO_TAG_STAG 0x1
#define ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG 0x2
#define ICE_AQ_VLAN_PRIO_TAG_OUTER_VLAN 0x3
#define ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG 0x4
#define ICE_AQ_VLAN_PRIO_TAG_MAX 0x4
#define ICE_AQ_VLAN_PRIO_TAG_ERROR 0x7
u8 l2tag_reserved[64];
u8 rdma_packet;
#define ICE_AQ_VLAN_RDMA_TAG_S 0
#define ICE_AQ_VLAN_RDMA_TAG_M (0x3F << ICE_AQ_VLAN_RDMA_TAG_S)
#define ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING 0x10
#define ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING 0x1A
u8 rdma_reserved[2];
u8 mng_vlan_prot_id;
#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER 0x10
#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER 0x11
u8 prot_id_reserved[30];
};
/* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */
struct ice_aqc_get_vlan_mode {
u8 vlan_mode;
#define ICE_AQ_VLAN_MODE_DVM_ENA BIT(0)
u8 l2tag_prio_tagging;
u8 reserved[98];
};
/* Add VSI (indirect 0x0210)
* Update VSI (indirect 0x0211)
* Get VSI (indirect 0x0212)
@ -485,108 +520,114 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7)
u8 sw_flags2;
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \
(0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
u8 veb_stat_id;
#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5)
/* security section */
u8 sec_flags;
#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0)
#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2)
#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0)
u8 sec_reserved;
/* VLAN section */
__le16 pvid; /* VLANS include priority bits */
u8 pvlan_reserved[2];
u8 vlan_flags;
#define ICE_AQ_VSI_VLAN_MODE_S 0
#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
#define ICE_AQ_VSI_VLAN_EMOD_S 3
#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
u8 pvlan_reserved2[3];
__le16 port_based_inner_vlan; /* VLANS include priority bits */
u8 inner_vlan_reserved[2];
u8 inner_vlan_flags;
#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_S 0
#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_TX_MODE_S)
#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL 0x3
#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2)
#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3
#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
#define ICE_AQ_VSI_INNER_VLAN_BLOCK_TX_DESC BIT(5)
u8 inner_vlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
__le32 egress_table; /* same defines as for ingress table */
/* outer tags section */
__le16 outer_tag;
u8 outer_tag_flags;
#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0
#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)
#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0
#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1
#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2
#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4)
#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)
u8 outer_tag_reserved;
__le16 port_based_outer_vlan;
u8 outer_vlan_flags;
#define ICE_AQ_VSI_OUTER_VLAN_EMODE_S 0
#define ICE_AQ_VSI_OUTER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_EMODE_S)
#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH 0x0
#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_UP 0x1
#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW 0x2
#define ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING 0x3
#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
#define ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT BIT(4)
#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S 5
#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S)
#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL 0x3
#define ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC BIT(7)
u8 outer_vlan_reserved;
/* queue mapping section */
__le16 mapping_flags;
#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
__le16 q_mapping[16];
#define ICE_AQ_VSI_Q_S 0
#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
#define ICE_AQ_VSI_Q_S 0
#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
__le16 tc_mapping[8];
#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
#define ICE_AQ_VSI_TC_Q_NUM_S 11
#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
#define ICE_AQ_VSI_TC_Q_NUM_S 11
#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
/* queueing option section */
u8 q_opt_rss;
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
u8 q_opt_tc;
#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
u8 q_opt_flags;
#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
u8 q_opt_reserved[3];
/* outer up section */
__le32 outer_up_table; /* same structure and defines as ingress tbl */
@ -594,27 +635,27 @@ struct ice_aqc_vsi_props {
__le16 sect_10_reserved;
/* flow director section */
__le16 fd_options;
#define ICE_AQ_VSI_FD_ENABLE BIT(0)
#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
#define ICE_AQ_VSI_FD_ENABLE BIT(0)
#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
__le16 max_fd_fltr_dedicated;
__le16 max_fd_fltr_shared;
__le16 fd_def_q;
#define ICE_AQ_VSI_FD_DEF_Q_S 0
#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
#define ICE_AQ_VSI_FD_DEF_GRP_S 12
#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
#define ICE_AQ_VSI_FD_DEF_Q_S 0
#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
#define ICE_AQ_VSI_FD_DEF_GRP_S 12
#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
__le16 fd_report_opt;
#define ICE_AQ_VSI_FD_REPORT_Q_S 0
#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
#define ICE_AQ_VSI_FD_REPORT_Q_S 0
#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
/* PASID section */
__le32 pasid_id;
#define ICE_AQ_VSI_PASID_ID_S 0
#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
#define ICE_AQ_VSI_PASID_ID_S 0
#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
u8 reserved[24];
};
@ -992,7 +1033,8 @@ struct ice_aqc_txsched_move_grp_info_hdr {
__le32 src_parent_teid;
__le32 dest_parent_teid;
__le16 num_elems;
__le16 reserved;
u8 flags;
u8 reserved;
};
struct ice_aqc_move_elem {
@ -1197,16 +1239,18 @@ struct ice_aqc_get_phy_caps {
__le16 param0;
/* 18.0 - Report qualified modules */
#define ICE_AQC_GET_PHY_RQM BIT(0)
/* 18.1 - 18.2 : Report mode
* 00b - Report NVM capabilities
* 01b - Report topology capabilities
* 10b - Report SW configured
/* 18.1 - 18.3 : Report mode
* 000b - Report NVM capabilities
* 001b - Report topology capabilities
* 010b - Report SW configured
* 100b - Report default capabilities
*/
#define ICE_AQC_REPORT_MODE_S 1
#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S)
#define ICE_AQC_REPORT_NVM_CAP 0
#define ICE_AQC_REPORT_TOPO_CAP BIT(1)
#define ICE_AQC_REPORT_SW_CFG BIT(2)
#define ICE_AQC_REPORT_MODE_S 1
#define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S)
#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0
#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1)
#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2)
#define ICE_AQC_REPORT_DFLT_CFG BIT(3)
__le32 reserved1;
__le32 addr_high;
__le32 addr_low;
@ -1446,11 +1490,13 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 link_cfg_err;
#define ICE_AQ_LINK_CFG_ERR BIT(0)
#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2)
#define ICE_AQ_LINK_CFG_ERR BIT(0)
#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2)
#define ICE_AQ_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
#define ICE_AQ_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
#define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
#define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
#define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
#define ICE_AQ_LINK_FAULT BIT(1)
@ -1932,7 +1978,11 @@ struct ice_aqc_get_port_options {
#define ICE_AQC_PORT_OPT_ACTIVE_M (0xF << ICE_AQC_PORT_OPT_ACTIVE_S)
#define ICE_AQC_PORT_OPT_FORCED BIT(6)
#define ICE_AQC_PORT_OPT_VALID BIT(7)
u8 rsvd[3];
u8 pending_port_option_status;
#define ICE_AQC_PENDING_PORT_OPT_IDX_S 0
#define ICE_AQC_PENDING_PORT_OPT_IDX_M (0xF << ICE_AQC_PENDING_PORT_OPT_IDX_S)
#define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7)
u8 rsvd[2];
__le32 addr_high;
__le32 addr_low;
};
@ -1957,6 +2007,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
u8 global_scid[2];
u8 phy_scid[2];
u8 pf2port_cid[2];
};
/* Set Port Option (direct, 0x06EB) */
@ -2021,6 +2072,25 @@ struct ice_aqc_sw_gpio {
u8 rsvd[12];
};
/* Program topology device NVM (direct, 0x06F2) */
struct ice_aqc_program_topology_device_nvm {
u8 lport_num;
u8 lport_num_valid;
u8 node_type_ctx;
u8 index;
u8 rsvd[12];
};
/* Read topology device NVM (indirect, 0x06F3) */
struct ice_aqc_read_topology_device_nvm {
u8 lport_num;
u8 lport_num_valid;
u8 node_type_ctx;
u8 index;
__le32 start_address;
u8 data_read[8];
};
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Write commands (indirect 0x0703)
@ -2050,6 +2120,7 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_POR_FLAG 0 /* Used by NVM Write completion on ARQ */
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0)
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@ -2667,6 +2738,50 @@ struct ice_aqc_clear_health_status {
__le32 reserved[4];
};
/* Set FW Logging configuration (indirect 0xFF30)
* Register for FW Logging (indirect 0xFF31)
* Query FW Logging (indirect 0xFF32)
* FW Log Event (indirect 0xFF33)
* Get FW Log (indirect 0xFF34)
* Clear FW Log (indirect 0xFF35)
*/
struct ice_aqc_fw_log {
u8 cmd_flags;
#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
#define ICE_AQC_FW_LOG_PERSISTENT BIT(0)
u8 rsp_flag;
#define ICE_AQC_FW_LOG_MORE_DATA BIT(1)
__le16 fw_rt_msb;
union {
struct {
__le32 fw_rt_lsb;
} sync;
struct {
__le16 log_resolution;
#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
__le16 mdl_cnt;
} cfg;
} ops;
__le32 addr_high;
__le32 addr_low;
};
/* Response Buffer for:
* Set Firmware Logging Configuration (0xFF30)
* Query FW Logging (0xFF32)
*/
struct ice_aqc_fw_log_cfg_resp {
__le16 module_identifier;
u8 log_level;
u8 rsvd0;
};
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@ -2718,10 +2833,13 @@ struct ice_aq_desc {
struct ice_aqc_dnl_read_log_command dnl_read_log;
struct ice_aqc_dnl_read_log_response dnl_read_log_resp;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
struct ice_aqc_mdio read_write_mdio;
struct ice_aqc_gpio_by_func read_write_gpio_by_func;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_set_led set_led;
struct ice_aqc_mdio read_mdio;
struct ice_aqc_mdio write_mdio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_port_options get_port_options;
@ -2888,6 +3006,8 @@ enum ice_adminq_opc {
ice_aqc_opc_alloc_res = 0x0208,
ice_aqc_opc_free_res = 0x0209,
ice_aqc_opc_get_allocd_res_desc = 0x020A,
ice_aqc_opc_set_vlan_mode_parameters = 0x020C,
ice_aqc_opc_get_vlan_mode_parameters = 0x020D,
/* VSI commands */
ice_aqc_opc_add_vsi = 0x0210,
@ -2965,6 +3085,8 @@ enum ice_adminq_opc {
ice_aqc_opc_sff_eeprom = 0x06EE,
ice_aqc_opc_sw_set_gpio = 0x06EF,
ice_aqc_opc_sw_get_gpio = 0x06F0,
ice_aqc_opc_program_topology_device_nvm = 0x06F2,
ice_aqc_opc_read_topology_device_nvm = 0x06F3,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
@ -3030,7 +3152,15 @@ enum ice_adminq_opc {
ice_aqc_opc_set_health_status_config = 0xFF20,
ice_aqc_opc_get_supported_health_status_codes = 0xFF21,
ice_aqc_opc_get_health_status = 0xFF22,
ice_aqc_opc_clear_health_status = 0xFF23
ice_aqc_opc_clear_health_status = 0xFF23,
/* FW Logging Commands */
ice_aqc_opc_fw_logs_config = 0xFF30,
ice_aqc_opc_fw_logs_register = 0xFF31,
ice_aqc_opc_fw_logs_query = 0xFF32,
ice_aqc_opc_fw_logs_event = 0xFF33,
ice_aqc_opc_fw_logs_get = 0xFF34,
ice_aqc_opc_fw_logs_clear = 0xFF35
};
#endif /* _ICE_ADMINQ_CMD_H_ */

View File

@ -477,6 +477,51 @@ ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size)
return true;
}
/**
* ice_bitmap_from_array32 - copies u32 array source into bitmap destination
* @dst: the destination bitmap
* @src: the source u32 array
* @size: size of the bitmap (in bits)
*
* This function copies the src bitmap stored in an u32 array into the dst
* bitmap stored as an ice_bitmap_t.
*/
static inline void
ice_bitmap_from_array32(ice_bitmap_t *dst, u32 *src, u16 size)
{
u32 remaining_bits, i;
#define BITS_PER_U32 (sizeof(u32) * BITS_PER_BYTE)
/* clear bitmap so we only have to set when iterating */
ice_zero_bitmap(dst, size);
for (i = 0; i < (u32)(size / BITS_PER_U32); i++) {
u32 bit_offset = i * BITS_PER_U32;
u32 entry = src[i];
u32 j;
for (j = 0; j < BITS_PER_U32; j++) {
if (entry & BIT(j))
ice_set_bit((u16)(j + bit_offset), dst);
}
}
/* still need to check the leftover bits (i.e. if size isn't evenly
* divisible by BITS_PER_U32
**/
remaining_bits = size % BITS_PER_U32;
if (remaining_bits) {
u32 bit_offset = i * BITS_PER_U32;
u32 entry = src[i];
u32 j;
for (j = 0; j < remaining_bits; j++) {
if (entry & BIT(j))
ice_set_bit((u16)(j + bit_offset), dst);
}
}
}
#undef BIT_CHUNK
#undef BIT_IN_CHUNK
#undef LAST_CHUNK_BITS

View File

@ -189,6 +189,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
return ICE_ERR_PARAM;
hw = pi->hw;
if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
!ice_fw_supports_report_dflt_cfg(hw))
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
if (qual_mods)
@ -222,7 +226,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
pcaps->module_type[2]);
if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
@ -454,6 +458,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
li->link_info = link_data.link_info;
li->link_cfg_err = link_data.link_cfg_err;
li->an_info = link_data.an_info;
li->ext_info = link_data.ext_info;
li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
@ -803,10 +808,11 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/* Initialize port_info struct with PHY capabilities */
status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
ice_free(hw, pcaps);
if (status)
ice_debug(hw, ICE_DBG_PHY, "Get PHY capabilities failed, continuing anyway\n");
ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
status);
/* Initialize port_info struct with link information */
status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
@ -851,8 +857,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_fltr_mgmt_struct;
ice_init_lock(&hw->tnl_lock);
ice_init_vlan_mode_ops(hw);
return ICE_SUCCESS;
err_unroll_fltr_mgmt_struct:
@ -1363,6 +1367,97 @@ ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
/* FW Admin Queue command wrappers */
/**
* ice_should_retry_sq_send_cmd
* @opcode: AQ opcode
*
* Decide if we should retry the send command routine for the ATQ, depending
* on the opcode.
*/
static bool ice_should_retry_sq_send_cmd(u16 opcode)
{
switch (opcode) {
case ice_aqc_opc_dnl_get_status:
case ice_aqc_opc_dnl_run:
case ice_aqc_opc_dnl_call:
case ice_aqc_opc_dnl_read_sto:
case ice_aqc_opc_dnl_write_sto:
case ice_aqc_opc_dnl_set_breakpoints:
case ice_aqc_opc_dnl_read_log:
case ice_aqc_opc_get_link_topo:
case ice_aqc_opc_done_alt_write:
case ice_aqc_opc_lldp_stop:
case ice_aqc_opc_lldp_start:
case ice_aqc_opc_lldp_filter_ctrl:
return true;
}
return false;
}
/**
* ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command
* @buf: buffer to use for indirect commands (or NULL for direct commands)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
* Retry sending the FW Admin Queue command, multiple times, to the FW Admin
* Queue if the EBUSY AQ error is returned.
*/
static enum ice_status
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc_cpy;
enum ice_status status;
bool is_cmd_for_retry;
u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
opcode = LE16_TO_CPU(desc->opcode);
is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
if (is_cmd_for_retry) {
if (buf) {
buf_cpy = (u8 *)ice_malloc(hw, buf_size);
if (!buf_cpy)
return ICE_ERR_NO_MEMORY;
}
ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
ICE_NONDMA_TO_NONDMA);
}
do {
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
if (!is_cmd_for_retry || status == ICE_SUCCESS ||
hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
break;
if (buf_cpy)
ice_memcpy(buf, buf_cpy, buf_size,
ICE_NONDMA_TO_NONDMA);
ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
ICE_NONDMA_TO_NONDMA);
ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
if (buf_cpy)
ice_free(hw, buf_cpy);
return status;
}
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
* @hw: pointer to the HW struct
@ -1377,7 +1472,7 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
}
/**
@ -1817,15 +1912,15 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
* @hw: pointer to the ice_hw instance
* @caps: pointer to common caps instance
* @prefix: string to prefix when printing
* @debug: set to indicate debug print
* @dbg: set to indicate debug print
*/
static void
ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
char const *prefix, bool debug)
char const *prefix, bool dbg)
{
u8 i;
if (debug)
if (dbg)
ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
caps->led_pin_num);
else
@ -1836,7 +1931,7 @@ ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
if (!caps->led[i])
continue;
if (debug)
if (dbg)
ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
prefix, i, caps->led[i]);
else
@ -1850,15 +1945,15 @@ ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
* @hw: pointer to the ice_hw instance
* @caps: pointer to common caps instance
* @prefix: string to prefix when printing
* @debug: set to indicate debug print
* @dbg: set to indicate debug print
*/
static void
ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
char const *prefix, bool debug)
char const *prefix, bool dbg)
{
u8 i;
if (debug)
if (dbg)
ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
caps->sdp_pin_num);
else
@ -1869,7 +1964,7 @@ ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
if (!caps->sdp[i])
continue;
if (debug)
if (dbg)
ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
prefix, i, caps->sdp[i]);
else
@ -2825,7 +2920,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
if (status == ICE_SUCCESS)
@ -2933,7 +3028,6 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
if (!pi || !cfg)
return ICE_ERR_BAD_PTR;
switch (req_mode) {
case ICE_FC_AUTO:
{
@ -2944,11 +3038,10 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
ice_malloc(pi->hw, sizeof(*pcaps));
if (!pcaps)
return ICE_ERR_NO_MEMORY;
/* Query the value of FC that both the NIC and attached media
* can do.
*/
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
if (status) {
ice_free(pi->hw, pcaps);
@ -3017,8 +3110,9 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
return ICE_ERR_NO_MEMORY;
/* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
pcaps, NULL);
if (status) {
*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
goto out;
@ -3135,17 +3229,6 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
cfg->link_fec_opt = caps->link_fec_options;
cfg->module_compliance_enforcement =
caps->module_compliance_enforcement;
if (ice_fw_supports_link_override(pi->hw)) {
struct ice_link_default_override_tlv tlv;
if (ice_get_link_default_override(&tlv, pi))
return;
if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
cfg->module_compliance_enforcement |=
ICE_LINK_OVERRIDE_STRICT_MODE;
}
}
/**
@ -3172,8 +3255,11 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
if (!pcaps)
return ICE_ERR_NO_MEMORY;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
NULL);
status = ice_aq_get_phy_caps(pi, false,
(ice_fw_supports_report_dflt_cfg(hw) ?
ICE_AQC_REPORT_DFLT_CFG :
ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
if (status)
goto out;
@ -3212,7 +3298,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
break;
}
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
!ice_fw_supports_report_dflt_cfg(pi->hw)) {
struct ice_link_default_override_tlv tlv;
if (ice_get_link_default_override(&tlv, pi))
@ -5167,6 +5254,141 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
return false;
}
/**
* ice_is_fw_health_report_supported
* @hw: pointer to the hardware structure
*
* Return true if firmware supports health status reports,
* false otherwise
*/
bool ice_is_fw_health_report_supported(struct ice_hw *hw)
{
if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
return true;
if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
return true;
if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
return true;
}
return false;
}
/**
* ice_aq_set_health_status_config - Configure FW health events
* @hw: pointer to the HW struct
* @event_source: type of diagnostic events to enable
* @cd: pointer to command details structure or NULL
*
* Configure the health status event types that the firmware will send to this
* PF. The supported event types are: PF-specific, all PFs, and global
*/
enum ice_status
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_health_status_config *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.set_health_status_config;
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_set_health_status_config);
cmd->event_source = event_source;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_get_port_options
* @hw: pointer to the hw struct
* @options: buffer for the resultant port options
* @option_count: input - size of the buffer in port options structures,
* output - number of returned port options
* @lport: logical port to call the command with (optional)
* @lport_valid: when false, FW uses port owned by the PF instead of lport,
* when PF owns more than 1 port it must be true
* @active_option_idx: index of active port option in returned buffer
* @active_option_valid: active option in returned buffer is valid
*
* Calls Get Port Options AQC (0x06ea) and verifies result.
*/
enum ice_status
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid,
u8 *active_option_idx, bool *active_option_valid)
{
struct ice_aqc_get_port_options *cmd;
struct ice_aq_desc desc;
enum ice_status status;
u8 pmd_count;
u8 max_speed;
u8 i;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* options buffer shall be able to hold max returned options */
if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
return ICE_ERR_PARAM;
cmd = &desc.params.get_port_options;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
if (lport_valid)
cmd->lport_num = lport;
cmd->lport_num_valid = lport_valid;
status = ice_aq_send_cmd(hw, &desc, options,
*option_count * sizeof(*options), NULL);
if (status != ICE_SUCCESS)
return status;
/* verify direct FW response & set output parameters */
*option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
*active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
if (*active_option_valid) {
*active_option_idx = cmd->port_options &
ICE_AQC_PORT_OPT_ACTIVE_M;
if (*active_option_idx > (*option_count - 1))
return ICE_ERR_OUT_OF_RANGE;
ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
*active_option_idx);
}
/* verify indirect FW response & mask output options fields */
for (i = 0; i < *option_count; i++) {
options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
pmd_count = options[i].pmd;
max_speed = options[i].max_lane_speed;
ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
pmd_count, max_speed);
/* check only entries containing valid max pmd speed values,
* other reserved values may be returned, when logical port
* used is unrelated to specific option
*/
if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) {
if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV)
return ICE_ERR_OUT_OF_RANGE;
if (pmd_count > 2 &&
max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G)
return ICE_ERR_CFG;
if (pmd_count > 7 &&
max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G)
return ICE_ERR_CFG;
}
}
return ICE_SUCCESS;
}
/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
@ -5246,3 +5468,23 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
*
* Checks if the firmware supports report default configuration
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
return true;
if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
return true;
} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
return true;
}
return false;
}

View File

@ -39,6 +39,9 @@
#include "virtchnl.h"
#include "ice_switch.h"
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
enum ice_fw_modes {
ICE_FW_MODE_NORMAL,
ICE_FW_MODE_DBG,
@ -219,6 +222,11 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
bool write, struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid,
u8 *active_option_idx, bool *active_option_valid);
enum ice_status
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info);
enum ice_status
__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data);
@ -275,4 +283,9 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
enum ice_status
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
struct ice_sq_cd *cd);
bool ice_is_fw_health_report_supported(struct ice_hw *hw);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */

View File

@ -73,6 +73,18 @@ bool ice_enable_tx_fc_filter = true;
*/
bool ice_enable_tx_lldp_filter = true;
/**
* @var ice_enable_health_events
* @brief boolean indicating if health status events from the FW should be reported
*
* Global sysctl variable indicating whether the Health Status events from the
* FW should be enabled. If true, if an event occurs, the driver will print out
* a message with a description of the event and possible actions to take.
*
* @remark each PF has a separate sysctl which can override this value.
*/
bool ice_enable_health_events = true;
/* sysctls marked as tunable, (i.e. with the CTLFLAG_TUN set) will
* automatically load tunable values, without the need to manually create the
* TUNABLE definition.
@ -89,6 +101,10 @@ static SYSCTL_NODE(_hw, OID_AUTO, ice, CTLFLAG_RD, 0, "ICE driver parameters");
static SYSCTL_NODE(_hw_ice, OID_AUTO, debug, ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 0,
"ICE driver debug parameters");
SYSCTL_BOOL(_hw_ice, OID_AUTO, enable_health_events, CTLFLAG_RDTUN,
&ice_enable_health_events, 0,
"Enable FW health event reporting globally");
SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
&ice_enable_tx_fc_filter, 0,
"Drop Ethertype 0x8808 control frames originating from non-HW sources");

View File

@ -1094,7 +1094,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* ice_sq_send_cmd - send command to Control Queue (ATQ)
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command (non DMA mem)
* @desc: prefilled descriptor describing the command
* @buf: buffer to use for indirect commands (or NULL for direct commands)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
@ -1151,6 +1151,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
u16 ntc = cq->rq.next_to_clean;
enum ice_aq_err rq_last_status;
enum ice_status ret_code = ICE_SUCCESS;
struct ice_aq_desc *desc;
struct ice_dma_mem *bi;
@ -1184,13 +1185,12 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
desc = ICE_CTL_Q_DESC(cq->rq, ntc);
desc_idx = ntc;
cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
LE16_TO_CPU(desc->opcode),
cq->rq_last_status);
LE16_TO_CPU(desc->opcode), rq_last_status);
}
ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
datalen = LE16_TO_CPU(desc->datalen);

View File

@ -43,8 +43,8 @@
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1))
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
@ -112,7 +112,6 @@ struct ice_rq_event_info {
/* Control Queue information */
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
enum ice_aq_err rq_last_status; /* last status on receive queue */
struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */
u32 sq_cmd_timeout; /* send queue cmd write back timeout */

View File

@ -63,16 +63,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
const char ice_driver_version[] = "0.28.1-k";
const char ice_driver_version[] = "0.29.4-k";
const uint8_t ice_major_version = 0;
const uint8_t ice_minor_version = 28;
const uint8_t ice_patch_version = 1;
const uint8_t ice_minor_version = 29;
const uint8_t ice_patch_version = 4;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 0.28.1-k")
PVID(vendor, devid, name " - 0.29.4-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.28.1-k")
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.29.4-k")
/**
* @var ice_vendor_info_array
@ -116,6 +116,9 @@ static pci_vendor_info_t ice_vendor_info_array[] = {
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x000D, 0,
"Intel(R) Ethernet Network Adapter E810-L-Q2 for OCP3.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x000E, 0,
"Intel(R) Ethernet Network Adapter E810-2C-Q2"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
"Intel(R) Ethernet Controller E810-C for QSFP"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
@ -133,6 +136,9 @@ static pci_vendor_info_t ice_vendor_info_array[] = {
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0009, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 2.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x000C, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
"Intel(R) Ethernet Controller E810-C for SFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE,

View File

@ -63,7 +63,9 @@ enum feat_list {
ICE_FEATURE_RDMA,
ICE_FEATURE_SAFE_MODE,
ICE_FEATURE_LENIENT_LINK_MODE,
ICE_FEATURE_DEFAULT_OVERRIDE,
ICE_FEATURE_LINK_MGMT_VER_1,
ICE_FEATURE_LINK_MGMT_VER_2,
ICE_FEATURE_HEALTH_STATUS,
/* Must be last entry */
ICE_FEATURE_COUNT
};

View File

@ -38,6 +38,7 @@
/* To support tunneling entries by PF, the package will append the PF number to
* the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
*/
#define ICE_TNL_PRE "TNL_"
static const struct ice_tunnel_type_scan tnls[] = {
{ TNL_VXLAN, "TNL_VXLAN_PF" },
{ TNL_GENEVE, "TNL_GENEVE_PF" },
@ -364,6 +365,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
return NULL;
/* cppcheck-suppress nullPointer */
if (index > ICE_MAX_BST_TCAMS_IN_BUF)
return NULL;
@ -435,6 +437,7 @@ ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
if (!section)
return NULL;
/* cppcheck-suppress nullPointer */
if (index > ICE_MAX_LABELS_IN_BUF)
return NULL;
@ -480,6 +483,42 @@ ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
return label->name;
}
/**
* ice_add_tunnel_hint
* @hw: pointer to the HW structure
* @label_name: label text
* @val: value of the tunnel port boost entry
*/
static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
{
if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
u16 i;
for (i = 0; tnls[i].type != TNL_LAST; i++) {
size_t len = strlen(tnls[i].label_prefix);
/* Look for matching label start, before continuing */
if (strncmp(label_name, tnls[i].label_prefix, len))
continue;
/* Make sure this label matches our PF. Note that the PF
* character ('0' - '7') will be located where our
* prefix string's null terminator is located.
*/
if ((label_name[len] - '0') == hw->pf_id) {
hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
hw->tnl.tbl[hw->tnl.count].valid = false;
hw->tnl.tbl[hw->tnl.count].in_use = false;
hw->tnl.tbl[hw->tnl.count].marked = false;
hw->tnl.tbl[hw->tnl.count].boost_addr = val;
hw->tnl.tbl[hw->tnl.count].port = 0;
hw->tnl.count++;
break;
}
}
}
}
/**
* ice_init_pkg_hints
* @hw: pointer to the HW structure
@ -506,34 +545,15 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
&val);
while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
for (i = 0; tnls[i].type != TNL_LAST; i++) {
size_t len = strlen(tnls[i].label_prefix);
/* Look for matching label start, before continuing */
if (strncmp(label_name, tnls[i].label_prefix, len))
continue;
/* Make sure this label matches our PF. Note that the PF
* character ('0' - '7') will be located where our
* prefix string's null terminator is located.
*/
if ((label_name[len] - '0') == hw->pf_id) {
hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
hw->tnl.tbl[hw->tnl.count].valid = false;
hw->tnl.tbl[hw->tnl.count].in_use = false;
hw->tnl.tbl[hw->tnl.count].marked = false;
hw->tnl.tbl[hw->tnl.count].boost_addr = val;
hw->tnl.tbl[hw->tnl.count].port = 0;
hw->tnl.count++;
break;
}
}
while (label_name) {
if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
/* check for a tunnel entry */
ice_add_tunnel_hint(hw, label_name, val);
label_name = ice_enum_labels(NULL, 0, &state, &val);
}
/* Cache the appropriate boost TCAM entry pointers */
/* Cache the appropriate boost TCAM entry pointers for tunnels */
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
@ -943,6 +963,36 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
return NULL;
}
/**
* ice_update_pkg_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
*/
static enum ice_status
ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
enum ice_status status = ICE_SUCCESS;
u32 i;
for (i = 0; i < count; i++) {
struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
bool last = ((i + 1) == count);
u32 offset, info;
status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
last, &offset, &info, NULL);
if (status) {
ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
status, offset, info);
break;
}
}
return status;
}
/**
* ice_update_pkg
* @hw: pointer to the hardware structure
@ -955,25 +1005,12 @@ enum ice_status
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
enum ice_status status;
u32 offset, info, i;
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
return status;
for (i = 0; i < count; i++) {
struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
bool last = ((i + 1) == count);
status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
last, &offset, &info, NULL);
if (status) {
ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
status, offset, info);
break;
}
}
status = ice_update_pkg_no_lock(hw, bufs, count);
ice_release_change_lock(hw);
@ -1102,6 +1139,7 @@ static enum ice_status
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
enum ice_status status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
@ -1119,8 +1157,12 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
LE32_TO_CPU(ice_buf_tbl->buf_count));
return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
LE32_TO_CPU(ice_buf_tbl->buf_count));
status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
LE32_TO_CPU(ice_buf_tbl->buf_count));
ice_cache_vlan_mode(hw);
return status;
}
/**
@ -1882,7 +1924,7 @@ void ice_init_prof_result_bm(struct ice_hw *hw)
*
* Frees a package buffer
*/
static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
{
ice_free(hw, bld);
}
@ -1991,7 +2033,7 @@ ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
* Allocates a package buffer with a single section.
* Note: all package contents must be in Little Endian form.
*/
static struct ice_buf_build *
struct ice_buf_build *
ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
void **section)
{
@ -2105,7 +2147,7 @@ static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
*
* Return a pointer to the buffer's header
*/
static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
{
if (!bld)
return NULL;
@ -2342,7 +2384,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
u16 count = 0;
u16 index;
u16 size;
u16 i;
u16 i, j;
ice_acquire_lock(&hw->tnl_lock);
@ -2382,30 +2424,31 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
size);
if (!sect_rx)
goto ice_destroy_tunnel_err;
sect_rx->count = CPU_TO_LE16(1);
sect_rx->count = CPU_TO_LE16(count);
sect_tx = (struct ice_boost_tcam_section *)
ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
size);
if (!sect_tx)
goto ice_destroy_tunnel_err;
sect_tx->count = CPU_TO_LE16(1);
sect_tx->count = CPU_TO_LE16(count);
/* copy original boost entry to update package buffer, one copy to Rx
* section, another copy to the Tx section
*/
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
(all || hw->tnl.tbl[i].port == port)) {
ice_memcpy(sect_rx->tcam + i,
ice_memcpy(sect_rx->tcam + j,
hw->tnl.tbl[i].boost_entry,
sizeof(*sect_rx->tcam),
ICE_NONDMA_TO_NONDMA);
ice_memcpy(sect_tx->tcam + i,
ice_memcpy(sect_tx->tcam + j,
hw->tnl.tbl[i].boost_entry,
sizeof(*sect_tx->tcam),
ICE_NONDMA_TO_NONDMA);
hw->tnl.tbl[i].marked = true;
j++;
}
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
@ -2768,6 +2811,7 @@ ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
count++;
LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
chk_count++;
/* cppcheck-suppress knownConditionTrueFalse */
if (!count || count != chk_count)
return false;

View File

@ -64,12 +64,11 @@ enum ice_status
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
enum ice_status
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
enum ice_status
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
enum ice_status
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
u16 *port);
@ -120,5 +119,10 @@ ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
enum ice_status
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
struct ice_buf_build *
ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
void **section);
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
#endif /* _ICE_FLEX_PIPE_H_ */

View File

@ -612,8 +612,8 @@ struct ice_xlt1 {
#define ICE_PF_NUM_S 13
#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
#define ICE_VSIG_VALUE(vsig, pf_id) \
(u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
(((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
((u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
(((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)))
#define ICE_DEFAULT_VSIG 0
/* XLT2 Table */
@ -730,4 +730,30 @@ enum ice_prof_type {
ICE_PROF_TUN_ALL = 0x6,
ICE_PROF_ALL = 0xFF,
};
/* Number of bits/bytes contained in meta init entry. Note, this should be a
* multiple of 32 bits.
*/
#define ICE_META_INIT_BITS 192
#define ICE_META_INIT_DW_CNT (ICE_META_INIT_BITS / (sizeof(__le32) * \
BITS_PER_BYTE))
/* The meta init Flag field starts at this bit */
#define ICE_META_FLAGS_ST 123
/* The entry and bit to check for Double VLAN Mode (DVM) support */
#define ICE_META_VLAN_MODE_ENTRY 0
#define ICE_META_FLAG_VLAN_MODE 60
#define ICE_META_VLAN_MODE_BIT (ICE_META_FLAGS_ST + \
ICE_META_FLAG_VLAN_MODE)
struct ice_meta_init_entry {
__le32 bm[ICE_META_INIT_DW_CNT];
};
struct ice_meta_init_section {
__le16 count;
__le16 offset;
struct ice_meta_init_entry entry[1];
};
#endif /* _ICE_FLEX_TYPE_H_ */

View File

@ -1494,9 +1494,9 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
* 3 for tunneled with outer ipv6
*/
#define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
(((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
(((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))
((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
(((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
(((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
/**
* ice_add_rss_cfg_sync - add an RSS configuration

View File

@ -274,6 +274,9 @@ struct ice_softc {
/* Ethertype filters enabled */
bool enable_tx_fc_filter;
bool enable_tx_lldp_filter;
/* Other tunable flags */
bool enable_health_events;
int rebuild_ticks;

View File

@ -1079,6 +1079,7 @@ static const struct ice_ctx_ele ice_tx_cmpltnq_info[] = {
#pragma pack(1)
struct ice_tx_cmpltnq_ctx {
u64 base;
#define ICE_TX_CMPLTNQ_CTX_BASE_S 7
u32 q_len;
#define ICE_TX_CMPLTNQ_CTX_Q_LEN_S 4
u8 generation;
@ -1086,6 +1087,9 @@ struct ice_tx_cmpltnq_ctx {
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_VF 0
#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_VMQ 1
#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_PF 2
u8 tph_desc_wr;
u8 cpuid;
u32 cmpltn_cache[16];
@ -1115,10 +1119,15 @@ static const struct ice_ctx_ele ice_tx_drbell_fmt_info[] = {
#pragma pack(1)
struct ice_tx_drbell_q_ctx {
u64 base;
#define ICE_TX_DRBELL_Q_CTX_BASE_S 7
u16 ring_len;
#define ICE_TX_DRBELL_Q_CTX_RING_LEN_S 4
u8 pf_num;
u16 vf_num;
u8 vmvf_type;
#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_VF 0
#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_VMQ 1
#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_PF 2
u8 cpuid;
u8 tph_desc_rd;
u8 tph_desc_wr;
@ -1175,7 +1184,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
/* L2 Packet types */
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
ICE_PTT_UNUSED_ENTRY(2),
ICE_PTT_UNUSED_ENTRY(3),
ICE_PTT_UNUSED_ENTRY(4),
ICE_PTT_UNUSED_ENTRY(5),
@ -1289,7 +1298,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
/* Non Tunneled IPv6 */
ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(91),
ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),

File diff suppressed because it is too large Load Diff

View File

@ -114,6 +114,9 @@ extern bool ice_enable_tx_fc_filter;
/* global sysctl indicating whether the Tx LLDP filter should be enabled */
extern bool ice_enable_tx_lldp_filter;
/* global sysctl indicating whether FW health status events should be enabled */
extern bool ice_enable_health_events;
/**
* @struct ice_bar_info
* @brief PCI BAR mapping information
@ -243,6 +246,19 @@ struct ice_bar_info {
#define ICE_DEFAULT_VF_QUEUES 4
/*
* There are three settings that can be updated independently or
* altogether: Link speed, FEC, and Flow Control. These macros allow
* the caller to specify which setting(s) to update.
*/
#define ICE_APPLY_LS BIT(0)
#define ICE_APPLY_FEC BIT(1)
#define ICE_APPLY_FC BIT(2)
#define ICE_APPLY_LS_FEC (ICE_APPLY_LS | ICE_APPLY_FEC)
#define ICE_APPLY_LS_FC (ICE_APPLY_LS | ICE_APPLY_FC)
#define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC)
#define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC)
/**
* @enum ice_dyn_idx_t
* @brief Dynamic Control ITR indexes
@ -761,7 +777,7 @@ void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
struct ice_hw_port_stats *stats);
void ice_configure_misc_interrupts(struct ice_softc *sc);
int ice_sync_multicast_filters(struct ice_softc *sc);
int ice_sync_multicast_filters(struct ice_softc *sc);
enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent);
@ -789,7 +805,7 @@ void ice_get_and_print_bus_info(struct ice_softc *sc);
const char *ice_fec_str(enum ice_fec_mode mode);
const char *ice_fc_str(enum ice_fc_mode mode);
const char *ice_fwd_act_str(enum ice_sw_fwd_act_type action);
const char * ice_state_to_str(enum ice_state state);
const char *ice_state_to_str(enum ice_state state);
int ice_init_link_events(struct ice_softc *sc);
void ice_configure_rx_itr(struct ice_vsi *vsi);
void ice_configure_tx_itr(struct ice_vsi *vsi);
@ -797,17 +813,18 @@ void ice_setup_pf_vsi(struct ice_softc *sc);
void ice_handle_mdd_event(struct ice_softc *sc);
void ice_init_dcb_setup(struct ice_softc *sc);
int ice_send_version(struct ice_softc *sc);
int ice_cfg_pf_ethertype_filters(struct ice_softc *sc);
int ice_cfg_pf_ethertype_filters(struct ice_softc *sc);
void ice_init_link_configuration(struct ice_softc *sc);
void ice_init_saved_phy_cfg(struct ice_softc *sc);
void ice_apply_saved_phy_cfg(struct ice_softc *sc);
int ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings);
void ice_set_link_management_mode(struct ice_softc *sc);
int ice_module_event_handler(module_t mod, int what, void *arg);
int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req);
int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length);
int ice_alloc_intr_tracking(struct ice_softc *sc);
int ice_module_event_handler(module_t mod, int what, void *arg);
int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req);
int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length);
int ice_alloc_intr_tracking(struct ice_softc *sc);
void ice_free_intr_tracking(struct ice_softc *sc);
void ice_set_default_local_lldp_mib(struct ice_softc *sc);
void ice_init_health_events(struct ice_softc *sc);
#endif /* _ICE_LIB_H_ */

View File

@ -956,6 +956,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
/* Verify that the simple checksum is zero */
for (i = 0; i < sizeof(tmp); i++)
/* cppcheck-suppress objectIndex */
sum += ((u8 *)&tmp)[i];
if (sum) {

View File

@ -96,6 +96,10 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_IPV6_GTP_IPV4_UDP,
ICE_SW_TUN_IPV6_GTP_IPV6_TCP,
ICE_SW_TUN_IPV6_GTP_IPV6_UDP,
ICE_SW_TUN_IPV4_GTPU_IPV4,
ICE_SW_TUN_IPV4_GTPU_IPV6,
ICE_SW_TUN_IPV6_GTPU_IPV4,
ICE_SW_TUN_IPV6_GTPU_IPV6,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};

View File

@ -992,6 +992,50 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
return status;
}
/**
* ice_sched_add_nodes_to_hw_layer - Add nodes to hw layer
* @pi: port information structure
* @tc_node: pointer to TC node
* @parent: pointer to parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes to be added
* @first_node_teid: pointer to the first node TEID
* @num_nodes_added: pointer to number of nodes added
*
* Add nodes into specific hw layer.
*/
static enum ice_status
ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
u16 num_nodes, u32 *first_node_teid,
u16 *num_nodes_added)
{
u16 max_child_nodes;
*num_nodes_added = 0;
if (!num_nodes)
return ICE_SUCCESS;
if (!parent || layer < pi->hw->sw_entry_point_layer)
return ICE_ERR_PARAM;
/* max children per node per layer */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
/* current number of children + required nodes exceed max children */
if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */
if (parent == tc_node)
return ICE_ERR_CFG;
return ICE_ERR_MAX_LIMIT;
}
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
num_nodes_added, first_node_teid);
}
/**
* ice_sched_add_nodes_to_layer - Add nodes to a given layer
* @pi: port information structure
@ -1012,72 +1056,53 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u16 *num_nodes_added)
{
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes, max_child_nodes;
u16 new_num_nodes = num_nodes;
enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
u16 num_added = 0;
u32 temp;
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
u16 max_child_nodes, num_added = 0;
/* cppcheck-suppress unusedVariable */
u32 temp;
if (!num_nodes)
return status;
if (!parent || layer < hw->sw_entry_point_layer)
return ICE_ERR_PARAM;
/* max children per node per layer */
max_child_nodes = hw->max_children[parent->tx_sched_layer];
/* current number of children + required nodes exceed max children ? */
if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */
if (parent == tc_node)
return ICE_ERR_CFG;
status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
layer, new_num_nodes,
first_teid_ptr,
&num_added);
if (status == ICE_SUCCESS)
*num_nodes_added += num_added;
/* added more nodes than requested ? */
if (*num_nodes_added > num_nodes) {
ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
*num_nodes_added);
status = ICE_ERR_CFG;
break;
}
/* break if all the nodes are added successfully */
if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes))
break;
/* break if the error is not max limit */
if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT)
break;
/* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
/* utilize all the spaces if the parent is not full */
if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children;
/* this recursion is intentional, and wouldn't
* go more than 2 calls
} else {
/* This parent is full, try the next sibling */
parent = parent->sibling;
/* Don't modify the first node TEID memory if the
* first node was added already in the above call.
* Instead send some temp memory for all other
* recursive calls.
*/
status = ice_sched_add_nodes_to_layer(pi, tc_node,
parent, layer,
new_num_nodes,
first_node_teid,
&num_added);
if (status != ICE_SUCCESS)
return status;
if (num_added)
first_teid_ptr = &temp;
*num_nodes_added += num_added;
new_num_nodes = num_nodes - *num_nodes_added;
}
/* Don't modify the first node TEID memory if the first node was
* added already in the above call. Instead send some temp
* memory for all other recursive calls.
*/
if (num_added)
first_teid_ptr = &temp;
new_num_nodes = num_nodes - num_added;
/* This parent is full, try the next sibling */
parent = parent->sibling;
/* this recursion is intentional, for 1024 queues
* per VSI, it goes max of 16 iterations.
* 1024 / 8 = 128 layer 8 nodes
* 128 /8 = 16 (add 8 nodes per iteration)
*/
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
layer, new_num_nodes,
first_teid_ptr,
&num_added);
*num_nodes_added += num_added;
return status;
}
status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
num_nodes_added, first_node_teid);
return status;
}
@ -1373,7 +1398,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
ice_memdup(hw, buf->layer_props,
(hw->num_tx_sched_layers *
sizeof(*hw->layer_info)),
ICE_DMA_TO_DMA);
ICE_NONDMA_TO_NONDMA);
if (!hw->layer_info) {
status = ICE_ERR_NO_MEMORY;
goto sched_query_out;
@ -4301,7 +4326,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
ice_sched_rm_unused_rl_prof(hw);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers)
return ICE_ERR_PARAM;

View File

@ -38,6 +38,7 @@
#define ICE_ETH_ETHTYPE_OFFSET 12
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_MAX_VLAN_ID 0xFFF
#define ICE_ETH_P_8021Q 0x8100
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
* struct to configure any switch filter rules.
@ -1158,6 +1159,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
u16 vlan_tpid = ICE_ETH_P_8021Q;
void *daddr = NULL;
u16 eth_hdr_sz;
u8 *eth_hdr;
@ -1230,6 +1232,8 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_VLAN:
vlan_id = f_info->l_data.vlan.vlan_id;
if (f_info->l_data.vlan.tpid_valid)
vlan_tpid = f_info->l_data.vlan.tpid;
if (f_info->fltr_act == ICE_FWD_TO_VSI ||
f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
act |= ICE_SINGLE_ACT_PRUNE;
@ -1273,6 +1277,8 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = CPU_TO_BE16(vlan_id);
off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = CPU_TO_BE16(vlan_tpid);
}
/* Create the switch rule with the final dummy Ethernet header */
@ -1807,6 +1813,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
vsi_list_id);
if (!m_entry->vsi_list_info)
return ICE_ERR_NO_MEMORY;
/* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list
*/
@ -3011,6 +3020,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
fm_entry->fltr_info.vsi_handle == vsi_handle) ||
(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
fm_entry->vsi_list_info &&
(ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
vsi_handle))));
}
@ -3085,14 +3095,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
ice_fltr_mgmt_list_entry, list_entry) {
struct ice_fltr_info *fi;
fi = &fm_entry->fltr_info;
if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
vsi_list_head, fi);
vsi_list_head,
&fm_entry->fltr_info);
if (status)
return status;
}
@ -3595,7 +3603,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
&remove_list_head);
ice_release_lock(rule_lock);
if (status)
return;
goto free_fltr_list;
switch (lkup) {
case ICE_SW_LKUP_MAC:
@ -3623,6 +3631,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
break;
}
free_fltr_list:
LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
ice_fltr_list_entry, list_entry) {
LIST_DEL(&fm_entry->list_entry);

View File

@ -137,6 +137,8 @@ struct ice_fltr_info {
} mac_vlan;
struct {
u16 vlan_id;
u16 tpid;
u8 tpid_valid;
} vlan;
/* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
* if just using ethertype as filter. Set lkup_type as
@ -159,7 +161,6 @@ struct ice_fltr_info {
*/
u16 q_id:11;
u16 hw_vsi_id:10;
u16 vsi_id:10;
u16 vsi_list_id:10;
} fwd_id;
@ -412,7 +413,6 @@ ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id);
enum ice_status
ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id);
/* Switch/bridge related commands */
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id);
enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id);

View File

@ -259,6 +259,7 @@ struct ice_link_status {
u16 max_frame_size;
u16 link_speed;
u16 req_speeds;
u8 link_cfg_err;
u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
@ -661,6 +662,8 @@ enum ice_rl_type {
#define ICE_TXSCHED_GET_RL_WAKEUP_MV(p) LE16_TO_CPU((p)->info.wake_up_calc)
#define ICE_TXSCHED_GET_RL_ENCODE(p) LE16_TO_CPU((p)->info.rl_encode)
#define ICE_MAX_PORT_PER_PCI_DEV 8
/* The following tree example shows the naming conventions followed under
* ice_port_info struct for default scheduler tree topology.
*
@ -1024,7 +1027,7 @@ struct ice_hw {
struct ice_lock rss_locks; /* protect RSS configuration */
struct LIST_HEAD_TYPE rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
struct ice_vlan_mode_ops vlan_mode_ops;
u8 dvm_ena;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
@ -1277,4 +1280,14 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_LLDP_FLTR_MAJ 1
#define ICE_FW_API_LLDP_FLTR_MIN 7
#define ICE_FW_API_LLDP_FLTR_PATCH 1
/* AQ API version for report default configuration */
#define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
/* AQ API version for FW health reports */
#define ICE_FW_API_HEALTH_REPORT_MAJ 1
#define ICE_FW_API_HEALTH_REPORT_MIN 7
#define ICE_FW_API_HEALTH_REPORT_PATCH 6
#endif /* _ICE_TYPE_H_ */

View File

@ -30,28 +30,241 @@
*/
/*$FreeBSD$*/
#include "ice_vlan_mode.h"
#include "ice_common.h"
/**
* ice_pkg_supports_dvm - determine if DDP supports Double VLAN mode (DVM)
* @hw: pointer to the HW struct
* @dvm: output variable to determine if DDP supports DVM(true) or SVM(false)
*/
static enum ice_status
ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm)
{
u16 meta_init_size = sizeof(struct ice_meta_init_section);
struct ice_meta_init_section *sect;
struct ice_buf_build *bld;
enum ice_status status;
/* if anything fails, we assume there is no DVM support */
*dvm = false;
bld = ice_pkg_buf_alloc_single_section(hw,
ICE_SID_RXPARSER_METADATA_INIT,
meta_init_size, (void **)&sect);
if (!bld)
return ICE_ERR_NO_MEMORY;
/* only need to read a single section */
sect->count = CPU_TO_LE16(1);
sect->offset = CPU_TO_LE16(ICE_META_VLAN_MODE_ENTRY);
status = ice_aq_upload_section(hw,
(struct ice_buf_hdr *)ice_pkg_buf(bld),
ICE_PKG_BUF_SIZE, NULL);
if (!status) {
ice_declare_bitmap(entry, ICE_META_INIT_BITS);
u32 arr[ICE_META_INIT_DW_CNT];
u16 i;
/* convert to host bitmap format */
for (i = 0; i < ICE_META_INIT_DW_CNT; i++)
arr[i] = LE32_TO_CPU(sect->entry[0].bm[i]);
ice_bitmap_from_array32(entry, arr, (u16)ICE_META_INIT_BITS);
/* check if DVM is supported */
*dvm = ice_is_bit_set(entry, ICE_META_VLAN_MODE_BIT);
}
ice_pkg_buf_free(hw, bld);
return status;
}
/**
* ice_aq_get_vlan_mode - get the VLAN mode of the device
* @hw: pointer to the HW structure
* @get_params: structure FW fills in based on the current VLAN mode config
*
* Get VLAN Mode Parameters (0x020D)
*/
static enum ice_status
ice_aq_get_vlan_mode(struct ice_hw *hw,
struct ice_aqc_get_vlan_mode *get_params)
{
struct ice_aq_desc desc;
if (!get_params)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_get_vlan_mode_parameters);
return ice_aq_send_cmd(hw, &desc, get_params, sizeof(*get_params),
NULL);
}
/**
* ice_aq_is_dvm_ena - query FW to check if double VLAN mode is enabled
* @hw: pointer to the HW structure
*
* Returns true if the hardware/firmware is configured in double VLAN mode,
* else return false signaling that the hardware/firmware is configured in
* single VLAN mode.
*
* Also, return false if this call fails for any reason (i.e. firmware doesn't
* support this AQ call).
*/
static bool ice_aq_is_dvm_ena(struct ice_hw *hw)
{
struct ice_aqc_get_vlan_mode get_params = { 0 };
enum ice_status status;
status = ice_aq_get_vlan_mode(hw, &get_params);
if (status) {
ice_debug(hw, ICE_DBG_AQ, "Failed to get VLAN mode, status %d\n",
status);
return false;
}
return (get_params.vlan_mode & ICE_AQ_VLAN_MODE_DVM_ENA);
}
/**
* ice_is_dvm_ena - check if double VLAN mode is enabled
* @hw: pointer to the HW structure
*
* The device is configured in single or double VLAN mode on initialization and
* this cannot be dynamically changed during runtime. Based on this there is no
* need to make an AQ call every time the driver needs to know the VLAN mode.
* Instead, use the cached VLAN mode.
*/
bool ice_is_dvm_ena(struct ice_hw *hw)
{
return hw->dvm_ena;
}
/**
* ice_cache_vlan_mode - cache VLAN mode after DDP is downloaded
* @hw: pointer to the HW structure
*
* This is only called after downloading the DDP and after the global
* configuration lock has been released because all ports on a device need to
* cache the VLAN mode.
*/
void ice_cache_vlan_mode(struct ice_hw *hw)
{
hw->dvm_ena = ice_aq_is_dvm_ena(hw) ? true : false;
}
/**
* ice_is_dvm_supported - check if Double VLAN Mode is supported
* @hw: pointer to the hardware structure
*
* Returns true if Double VLAN Mode (DVM) is supported and false if only Single
* VLAN Mode (SVM) is supported. In order for DVM to be supported the DDP and
* firmware must support it, otherwise only SVM is supported. This function
* should only be called while the global config lock is held and after the
* package has been successfully downloaded.
*/
static bool ice_is_dvm_supported(struct ice_hw *hw)
{
struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 };
enum ice_status status;
bool pkg_supports_dvm;
status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm);
if (status) {
ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n",
status);
return false;
}
if (!pkg_supports_dvm)
return false;
/* If firmware returns success, then it supports DVM, else it only
* supports SVM
*/
status = ice_aq_get_vlan_mode(hw, &get_vlan_mode);
if (status) {
ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n",
status);
return false;
}
return true;
}
/**
* ice_aq_set_vlan_mode - set the VLAN mode of the device
* @hw: pointer to the HW structure
* @set_params: requested VLAN mode configuration
*
* Set VLAN Mode Parameters (0x020C)
*/
static enum ice_status
ice_aq_set_vlan_mode(struct ice_hw *hw,
struct ice_aqc_set_vlan_mode *set_params)
{
u8 rdma_packet, mng_vlan_prot_id;
struct ice_aq_desc desc;
if (!set_params)
return ICE_ERR_PARAM;
if (set_params->l2tag_prio_tagging > ICE_AQ_VLAN_PRIO_TAG_MAX)
return ICE_ERR_PARAM;
rdma_packet = set_params->rdma_packet;
if (rdma_packet != ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING &&
rdma_packet != ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING)
return ICE_ERR_PARAM;
mng_vlan_prot_id = set_params->mng_vlan_prot_id;
if (mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER &&
mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_set_vlan_mode_parameters);
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
NULL);
}
/**
* ice_set_svm - set single VLAN mode
* @hw: pointer to the HW structure
*/
static enum ice_status ice_set_svm_dflt(struct ice_hw *hw)
static enum ice_status ice_set_svm(struct ice_hw *hw)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
struct ice_aqc_set_vlan_mode *set_params;
enum ice_status status;
return ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL);
}
status = ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to set port parameters for single VLAN mode\n");
return status;
}
/**
* ice_init_vlan_mode_ops - initialize VLAN mode configuration ops
* @hw: pointer to the HW structure
*/
void ice_init_vlan_mode_ops(struct ice_hw *hw)
{
hw->vlan_mode_ops.set_dvm = NULL;
hw->vlan_mode_ops.set_svm = ice_set_svm_dflt;
set_params = (struct ice_aqc_set_vlan_mode *)
ice_malloc(hw, sizeof(*set_params));
if (!set_params)
return ICE_ERR_NO_MEMORY;
/* default configuration for SVM configurations */
set_params->l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG;
set_params->rdma_packet = ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING;
set_params->mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER;
status = ice_aq_set_vlan_mode(hw, set_params);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to configure port in single VLAN mode\n");
ice_free(hw, set_params);
return status;
}
/**
@ -60,13 +273,9 @@ void ice_init_vlan_mode_ops(struct ice_hw *hw)
*/
enum ice_status ice_set_vlan_mode(struct ice_hw *hw)
{
enum ice_status status = ICE_ERR_NOT_IMPL;
if (hw->vlan_mode_ops.set_dvm)
status = hw->vlan_mode_ops.set_dvm(hw);
if (!ice_is_dvm_supported(hw))
return ICE_SUCCESS;
if (status)
return hw->vlan_mode_ops.set_svm(hw);
return status;
return ice_set_svm(hw);
}

View File

@ -33,28 +33,12 @@
#ifndef _ICE_VLAN_MODE_H_
#define _ICE_VLAN_MODE_H_
#include "ice_osdep.h"
struct ice_hw;
bool ice_is_dvm_ena(struct ice_hw *hw);
void ice_cache_vlan_mode(struct ice_hw *hw);
enum ice_status ice_set_vlan_mode(struct ice_hw *hw);
void ice_init_vlan_mode_ops(struct ice_hw *hw);
/* This structure defines the VLAN mode configuration interface. It is used to set the VLAN mode.
*
* Note: These operations will be called while the global configuration lock is held.
*
* enum ice_status (*set_svm)(struct ice_hw *hw);
* This function is called when the DDP and/or Firmware don't support double VLAN mode (DVM) or
* if the set_dvm op is not implemented and/or returns failure. It will set the device in
* single VLAN mode (SVM).
*
* enum ice_status (*set_dvm)(struct ice_hw *hw);
* This function is called when the DDP and Firmware support double VLAN mode (DVM). It should
* be implemented to set double VLAN mode. If it fails or remains unimplemented, set_svm will
* be called as a fallback plan.
*/
struct ice_vlan_mode_ops {
enum ice_status (*set_svm)(struct ice_hw *hw);
enum ice_status (*set_dvm)(struct ice_hw *hw);
};
#endif /* _ICE_VLAN_MODE_H */

View File

@ -753,6 +753,9 @@ ice_if_attach_post(if_ctx_t ctx)
return err;
}
/* Enable FW health event reporting */
ice_init_health_events(sc);
/* Configure the main PF VSI for RSS */
err = ice_config_rss(&sc->pf_vsi);
if (err) {
@ -1946,7 +1949,7 @@ ice_poll_for_media_avail(struct ice_softc *sc)
enum ice_status status;
/* Re-enable link and re-apply user link settings */
ice_apply_saved_phy_cfg(sc);
ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC);
/* Update the OS about changes in media capability */
status = ice_add_media_types(sc, sc->media);
@ -2016,6 +2019,18 @@ ice_admin_timer(void *arg)
{
struct ice_softc *sc = (struct ice_softc *)arg;
/*
* There is a point where callout routines are no longer
* cancelable. So there exists a window of time where the
* driver enters detach() and tries to cancel the callout, but the
* callout routine has passed the cancellation point. The detach()
* routine is unaware of this and tries to free resources that the
* callout routine needs. So we check for the detach state flag to
* at least shrink the window of opportunity.
*/
if (ice_driver_is_detaching(sc))
return;
/* Fire off the admin task */
iflib_admin_intr_deferred(sc->ctx);
@ -2424,6 +2439,9 @@ ice_rebuild(struct ice_softc *sc)
if (err)
goto err_deinit_pf_vsi;
/* Re-enable FW health event reporting */
ice_init_health_events(sc);
/* Reconfigure the main PF VSI for RSS */
err = ice_config_rss(&sc->pf_vsi);
if (err) {
@ -2593,11 +2611,16 @@ ice_init_device_features(struct ice_softc *sc)
ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap);
ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap);
ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_cap);
ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_cap);
ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_cap);
ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
/* Disable features due to hardware limitations... */
if (!sc->hw.func_caps.common_cap.rss_table_size)
ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
/* Disable features due to firmware limitations... */
if (!ice_is_fw_health_report_supported(&sc->hw))
ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
/* Disable capabilities not supported by the OS */
ice_disable_unsupported_features(sc->feat_cap);

View File

@ -34,8 +34,9 @@
#define _VIRTCHNL_H_
/* Description:
* This header file describes the VF-PF communication protocol used
* by the drivers for all devices starting from our 40G product line
* This header file describes the Virtual Function (VF) - Physical Function
* (PF) communication protocol used by the drivers for all devices starting
* from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
@ -49,8 +50,8 @@
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value
* is of status_code type, defined in the shared type.h.
* except RESET_VF, which does not require any response. The returned value
* is of virtchnl_status_code type, defined in the shared type.h.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
@ -157,10 +158,20 @@ enum virtchnl_ops {
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
/* opcode 34 is reserved */
/* opcodes 39, 40, 41, 42 and 43 are reserved */
/* opcodes 38, 39, 40, 41, 42 and 43 are reserved */
/* opcode 44 is reserved */
/* opcode 45, 46, 47, 48 and 49 are reserved */
VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,
VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
VIRTCHNL_OP_ADD_VLAN_V2 = 52,
VIRTCHNL_OP_DEL_VLAN_V2 = 53,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
/* opcodes 60 through 69 are reserved */
VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
@ -236,6 +247,24 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS";
case VIRTCHNL_OP_ADD_VLAN_V2:
return "VIRTCHNL_OP_ADD_VLAN_V2";
case VIRTCHNL_OP_DEL_VLAN_V2:
return "VIRTCHNL_OP_DEL_VLAN_V2";
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2";
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2";
case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2";
case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2";
case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
case VIRTCHNL_OP_MAX:
return "VIRTCHNL_OP_MAX";
default:
@ -259,8 +288,12 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
struct virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
enum virtchnl_status_code v_retval; /* ditto for desc->retval */
/* avoid confusion with desc->opcode */
enum virtchnl_ops v_opcode;
/* ditto for desc->retval */
enum virtchnl_status_code v_retval;
u32 vfid; /* used by PF when sending to VF */
};
@ -282,6 +315,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
*/
#define VIRTCHNL_VERSION_MAJOR 1
#define VIRTCHNL_VERSION_MINOR 1
#define VIRTCHNL_VERSION_MAJOR_2 2
#define VIRTCHNL_VERSION_MINOR_0 0
#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
struct virtchnl_version_info {
@ -326,7 +361,9 @@ enum virtchnl_vsi_type {
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
enum virtchnl_vsi_type vsi_type;
/* see enum virtchnl_vsi_type */
s32 vsi_type;
u16 qset_handle;
u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
};
@ -347,6 +384,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
/* 0X00000100 is reserved */
#define VIRTCHNL_VF_LARGE_NUM_QPAIRS 0x00000200
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 0x00008000
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
@ -361,6 +399,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* 0X08000000 and 0X10000000 are reserved */
/* 0X20000000 is reserved */
/* 0X40000000 is reserved */
/* 0X80000000 is reserved */
/* Define below the capability flags that are not offloads */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
@ -425,7 +464,9 @@ struct virtchnl_rxq_info {
u8 crc_disable;
u8 pad1[3];
u64 dma_ring_addr;
enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
s32 rx_split_pos;
u32 pad2;
};
@ -628,6 +669,388 @@ struct virtchnl_vlan_filter_list {
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
* structures and opcodes.
*
* VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
* populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
*
* VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
* VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
* VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
*
* VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
* by the PF concurrently. For example, if the PF can support
* VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
* would OR the following bits:
*
* VIRTHCNL_VLAN_ETHERTYPE_8100 |
* VIRTCHNL_VLAN_ETHERTYPE_88A8 |
* VIRTCHNL_VLAN_ETHERTYPE_AND;
*
* The VF would interpret this as VLAN filtering can be supported on both 0x8100
* and 0x88A8 VLAN ethertypes.
*
* VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
* by the PF concurrently. For example if the PF can support
* VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
* offload it would OR the following bits:
*
* VIRTCHNL_VLAN_ETHERTYPE_8100 |
* VIRTCHNL_VLAN_ETHERTYPE_88A8 |
* VIRTCHNL_VLAN_ETHERTYPE_XOR;
*
* The VF would interpret this as VLAN stripping can be supported on either
* 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
* the previously set value.
*
* VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
* strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
*
* VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
* offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
*
* VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
* offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
*
* VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
* VLAN filtering if the underlying PF supports it.
*
* VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
* certain VLAN capability can be toggled. For example if the underlying PF/CP
* allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
* set this bit along with the supported ethertypes.
*/
enum virtchnl_vlan_support {
VIRTCHNL_VLAN_UNSUPPORTED = 0,
VIRTCHNL_VLAN_ETHERTYPE_8100 = 0x00000001,
VIRTCHNL_VLAN_ETHERTYPE_88A8 = 0x00000002,
VIRTCHNL_VLAN_ETHERTYPE_9100 = 0x00000004,
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = 0x00000100,
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = 0x00000200,
VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = 0x00000400,
VIRTCHNL_VLAN_PRIO = 0x01000000,
VIRTCHNL_VLAN_FILTER_MASK = 0x10000000,
VIRTCHNL_VLAN_ETHERTYPE_AND = 0x20000000,
VIRTCHNL_VLAN_ETHERTYPE_XOR = 0x40000000,
VIRTCHNL_VLAN_TOGGLE = 0x80000000
};
/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
* for filtering, insertion, and stripping capabilities.
*
* If only outer capabilities are supported (for filtering, insertion, and/or
* stripping) then this refers to the outer most or single VLAN from the VF's
* perspective.
*
* If only inner capabilities are supported (for filtering, insertion, and/or
* stripping) then this refers to the outer most or single VLAN from the VF's
* perspective. Functionally this is the same as if only outer capabilities are
* supported. The VF driver is just forced to use the inner fields when
* adding/deleting filters and enabling/disabling offloads (if supported).
*
* If both outer and inner capabilities are supported (for filtering, insertion,
* and/or stripping) then outer refers to the outer most or single VLAN and
* inner refers to the second VLAN, if it exists, in the packet.
*
* There is no support for tunneled VLAN offloads, so outer or inner are never
* referring to a tunneled packet from the VF's perspective.
*/
struct virtchnl_vlan_supported_caps {
u32 outer;
u32 inner;
};
/* The PF populates these fields based on the supported VLAN filtering. If a
* field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
* reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
* the unsupported fields.
*
* Also, a VF is only allowed to toggle its VLAN filtering setting if the
* VIRTCHNL_VLAN_TOGGLE bit is set.
*
* The ethertype(s) specified in the ethertype_init field are the ethertypes
* enabled for VLAN filtering. VLAN filtering in this case refers to the outer
* most VLAN from the VF's perspective. If both inner and outer filtering are
* allowed then ethertype_init only refers to the outer most VLAN as only
* VLAN ethertype supported for inner VLAN filtering is
* VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
* when both inner and outer filtering are allowed.
*
* The max_filters field tells the VF how many VLAN filters it's allowed to have
* at any one time. If it exceeds this amount and tries to add another filter,
* then the request will be rejected by the PF. To prevent failures, the VF
* should keep track of how many VLAN filters it has added and not attempt to
* add more than max_filters.
*/
struct virtchnl_vlan_filtering_caps {
struct virtchnl_vlan_supported_caps filtering_support;
u32 ethertype_init;
u16 max_filters;
u8 pad[2];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
* if the PF supports a different ethertype for stripping and insertion.
*
* VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
* for stripping affect the ethertype(s) specified for insertion and visa versa
* as well. If the VF tries to configure VLAN stripping via
* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
* that will be the ethertype for both stripping and insertion.
*
* VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
* stripping do not affect the ethertype(s) specified for insertion and visa
* versa.
*/
enum virtchnl_vlan_ethertype_match {
VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
};
/* The PF populates these fields based on the supported VLAN offloads. If a
* field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
* reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
* VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
*
* Also, a VF is only allowed to toggle its VLAN offload setting if the
* VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
*
* The VF driver needs to be aware of how the tags are stripped by hardware and
* inserted by the VF driver based on the level of offload support. The PF will
* populate these fields based on where the VLAN tags are expected to be
* offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
* interpret these fields. See the definition of the
* VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
* enumeration.
*/
struct virtchnl_vlan_offload_caps {
struct virtchnl_vlan_supported_caps stripping_support;
struct virtchnl_vlan_supported_caps insertion_support;
u32 ethertype_init;
u8 ethertype_match;
u8 pad[3];
};
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
* VF sends this message to determine its VLAN capabilities.
*
* PF will mark which capabilities it supports based on hardware support and
* current configuration. For example, if a port VLAN is configured the PF will
* not allow outer VLAN filtering, stripping, or insertion to be configured so
* it will block these features from the VF.
*
* The VF will need to cross reference its capabilities with the PFs
* capabilities in the response message from the PF to determine the VLAN
* support.
*/
struct virtchnl_vlan_caps {
struct virtchnl_vlan_filtering_caps filtering;
struct virtchnl_vlan_offload_caps offloads;
};
VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
struct virtchnl_vlan {
u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
* filtering caps
*/
u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
* filtering caps. Note that tpid here does not refer to
* VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
* actual 2-byte VLAN TPID
*/
u8 pad[2];
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
struct virtchnl_vlan_filter {
struct virtchnl_vlan inner;
struct virtchnl_vlan outer;
u8 pad[16];
};
VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
/* VIRTCHNL_OP_ADD_VLAN_V2
* VIRTCHNL_OP_DEL_VLAN_V2
*
* VF sends these messages to add/del one or more VLAN tag filters for Rx
* traffic.
*
* The PF attempts to add the filters and returns status.
*
* The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
* supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
*/
struct virtchnl_vlan_filter_list_v2 {
u16 vport_id;
u16 num_elements;
u8 pad[4];
struct virtchnl_vlan_filter filters[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
* VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
* VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
* VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
*
* VF sends this message to enable or disable VLAN stripping or insertion. It
* also needs to specify an ethertype. The VF knows which VLAN ethertypes are
* allowed and whether or not it's allowed to enable/disable the specific
* offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
* parse the virtchnl_vlan_caps.offloads fields to determine which offload
* messages are allowed.
*
* For example, if the PF populates the virtchnl_vlan_caps.offloads in the
* following manner the VF will be allowed to enable and/or disable 0x8100 inner
* VLAN insertion and/or stripping via the opcodes listed above. Inner in this
* case means the outer most or single VLAN from the VF's perspective. This is
* because no outer offloads are supported. See the comments above the
* virtchnl_vlan_supported_caps structure for more details.
*
* virtchnl_vlan_caps.offloads.stripping_support.inner =
* VIRTCHNL_VLAN_TOGGLE |
* VIRTCHNL_VLAN_ETHERTYPE_8100;
*
* virtchnl_vlan_caps.offloads.insertion_support.inner =
* VIRTCHNL_VLAN_TOGGLE |
* VIRTCHNL_VLAN_ETHERTYPE_8100;
*
* In order to enable inner (again note that in this case inner is the outer
* most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
* VLANs, the VF would populate the virtchnl_vlan_setting structure in the
* following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
*
* virtchnl_vlan_setting.inner_ethertype_setting =
* VIRTCHNL_VLAN_ETHERTYPE_8100;
*
* virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
* initialization.
*
* The reason that VLAN TPID(s) are not being used for the
* outer_ethertype_setting and inner_ethertype_setting fields is because it's
* possible a device could support VLAN insertion and/or stripping offload on
* multiple ethertypes concurrently, so this method allows a VF to request
* multiple ethertypes in one message using the virtchnl_vlan_support
* enumeration.
*
* For example, if the PF populates the virtchnl_vlan_caps.offloads in the
* following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
* VLAN insertion and stripping simultaneously. The
* virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
* populated based on what the PF can support.
*
* virtchnl_vlan_caps.offloads.stripping_support.outer =
* VIRTCHNL_VLAN_TOGGLE |
* VIRTCHNL_VLAN_ETHERTYPE_8100 |
* VIRTCHNL_VLAN_ETHERTYPE_88A8 |
* VIRTCHNL_VLAN_ETHERTYPE_AND;
*
* virtchnl_vlan_caps.offloads.insertion_support.outer =
* VIRTCHNL_VLAN_TOGGLE |
* VIRTCHNL_VLAN_ETHERTYPE_8100 |
* VIRTCHNL_VLAN_ETHERTYPE_88A8 |
* VIRTCHNL_VLAN_ETHERTYPE_AND;
*
* In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
* would populate the virthcnl_vlan_offload_structure in the following manner
* and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
*
* virtchnl_vlan_setting.outer_ethertype_setting =
* VIRTHCNL_VLAN_ETHERTYPE_8100 |
* VIRTHCNL_VLAN_ETHERTYPE_88A8;
*
* virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
* initialization.
*
* There is also the case where a PF and the underlying hardware can support
* VLAN offloads on multiple ethertypes, but not concurrently. For example, if
* the PF populates the virtchnl_vlan_caps.offloads in the following manner the
* VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
* offloads. The ethertypes must match for stripping and insertion.
*
* virtchnl_vlan_caps.offloads.stripping_support.outer =
* VIRTCHNL_VLAN_TOGGLE |
* VIRTCHNL_VLAN_ETHERTYPE_8100 |
* VIRTCHNL_VLAN_ETHERTYPE_88A8 |
* VIRTCHNL_VLAN_ETHERTYPE_XOR;
*
* virtchnl_vlan_caps.offloads.insertion_support.outer =
* VIRTCHNL_VLAN_TOGGLE |
* VIRTCHNL_VLAN_ETHERTYPE_8100 |
* VIRTCHNL_VLAN_ETHERTYPE_88A8 |
* VIRTCHNL_VLAN_ETHERTYPE_XOR;
*
* virtchnl_vlan_caps.offloads.ethertype_match =
* VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
*
* In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
* populate the virtchnl_vlan_setting structure in the following manner and send
* the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
* ethertype for VLAN insertion if it's enabled. So, for completeness, a
* VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
*
* virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
*
* virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
* initialization.
*
* VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2
* VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2
*
* VF sends this message to enable or disable VLAN filtering. It also needs to
* specify an ethertype. The VF knows which VLAN ethertypes are allowed and
* whether or not it's allowed to enable/disable filtering via the
* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
* parse the virtchnl_vlan_caps.filtering fields to determine which, if any,
* filtering messages are allowed.
*
* For example, if the PF populates the virtchnl_vlan_caps.filtering in the
* following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8
* outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND
* means that all filtering ethertypes will to be enabled and disabled together
* regardless of the request from the VF. This means that the underlying
* hardware only supports VLAN filtering for all VLAN the specified ethertypes
* or none of them.
*
* virtchnl_vlan_caps.filtering.filtering_support.outer =
* VIRTCHNL_VLAN_TOGGLE |
* VIRTCHNL_VLAN_ETHERTYPE_8100 |
* VIRTHCNL_VLAN_ETHERTYPE_88A8 |
* VIRTCHNL_VLAN_ETHERTYPE_9100 |
* VIRTCHNL_VLAN_ETHERTYPE_AND;
*
* In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100
* VLANs aren't supported by the VF driver), the VF would populate the
* virtchnl_vlan_setting structure in the following manner and send the
* VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used
* to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the
* VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used.
*
* virtchnl_vlan_setting.outer_ethertype_setting =
* VIRTCHNL_VLAN_ETHERTYPE_8100 |
* VIRTCHNL_VLAN_ETHERTYPE_88A8;
*
*/
struct virtchnl_vlan_setting {
u32 outer_ethertype_setting;
u32 inner_ethertype_setting;
u16 vport_id;
u8 pad[6];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
@ -790,8 +1213,12 @@ enum virtchnl_flow_type {
struct virtchnl_filter {
union virtchnl_flow_spec data;
union virtchnl_flow_spec mask;
enum virtchnl_flow_type flow_type;
enum virtchnl_action action;
/* see enum virtchnl_flow_type */
s32 flow_type;
/* see enum virtchnl_action */
s32 action;
u32 action_meta;
u8 field_flags;
};
@ -816,7 +1243,8 @@ enum virtchnl_event_codes {
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
/* see enum virtchnl_event_codes */
s32 event;
union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
@ -828,16 +1256,25 @@ struct virtchnl_pf_event {
*/
struct {
enum virtchnl_link_speed link_speed;
u8 link_status;
bool link_status;
u8 pad[3];
} link_event;
struct {
/* link_speed provided in Mbps */
u32 link_speed;
u8 link_status;
u8 pad[3];
} link_event_adv;
struct {
/* link_speed provided in Mbps */
u32 link_speed;
u16 vport_id;
u8 link_status;
u8 pad;
} link_event_adv_vport;
} event_data;
int severity;
s32 severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
@ -875,7 +1312,8 @@ enum virtchnl_queue_type {
/* structure to specify a chunk of contiguous queues */
struct virtchnl_queue_chunk {
enum virtchnl_queue_type type;
/* see enum virtchnl_queue_type */
s32 type;
u16 start_queue_id;
u16 num_queues;
};
@ -895,7 +1333,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks);
* VIRTCHNL_OP_DISABLE_QUEUES_V2
* VIRTCHNL_OP_DEL_QUEUES
*
* If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* If VIRTCHNL version was negotiated in VIRTCHNL_OP_VERSION as 2.0
* then all of these ops are available.
*
* If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
@ -927,17 +1365,17 @@ struct virtchnl_queue_vector {
u16 queue_id;
u16 vector_id;
u8 pad[4];
enum virtchnl_itr_idx itr_idx;
enum virtchnl_queue_type queue_type;
/* see enum virtchnl_itr_idx */
s32 itr_idx;
/* see enum virtchnl_queue_type */
s32 queue_type;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
/* VIRTCHNL_OP_MAP_QUEUE_VECTOR
* VIRTCHNL_OP_UNMAP_QUEUE_VECTOR
*
* If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then all of these ops are available.
*
* If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available.
@ -989,6 +1427,10 @@ enum virtchnl_vector_limits {
VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX =
((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) /
sizeof(struct virtchnl_queue_vector),
VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX =
((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) /
sizeof(struct virtchnl_vlan_filter),
};
/**
@ -1163,6 +1605,33 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
break;
case VIRTCHNL_OP_ADD_VLAN_V2:
case VIRTCHNL_OP_DEL_VLAN_V2:
valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list_v2 *vfl =
(struct virtchnl_vlan_filter_list_v2 *)msg;
if (vfl->num_elements == 0 || vfl->num_elements >
VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) {
err_msg_format = true;
break;
}
valid_len += (vfl->num_elements - 1) *
sizeof(struct virtchnl_vlan_filter);
}
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
valid_len = sizeof(struct virtchnl_vlan_setting);
break;
case VIRTCHNL_OP_ENABLE_QUEUES_V2:
case VIRTCHNL_OP_DISABLE_QUEUES_V2:
valid_len = sizeof(struct virtchnl_del_ena_dis_queues);

View File

@ -470,7 +470,7 @@ struct virtchnl_ipsec_sa_read {
};
#pragma pack()
/* Add whitelist entry in IES */
/* Add allowlist entry in IES */
struct virtchnl_ipsec_sp_cfg {
u32 spi;
u32 dip[4];
@ -489,7 +489,7 @@ struct virtchnl_ipsec_sp_cfg {
};
#pragma pack(1)
/* Delete whitelist entry in IES */
/* Delete allowlist entry in IES */
struct virtchnl_ipsec_sp_destroy {
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
@ -497,7 +497,7 @@ struct virtchnl_ipsec_sp_destroy {
};
#pragma pack()
/* Response from IES to whitelist operations */
/* Response from IES to allowlist operations */
struct virtchnl_ipsec_sp_cfg_resp {
u32 rule_id;
};

View File

@ -0,0 +1,549 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _VIRTCHNL_LAN_DESC_H_
#define _VIRTCHNL_LAN_DESC_H_
/* Rx */
/* For virtchnl_splitq_base_rx_flex desc members */
#define VIRTCHNL_RXD_FLEX_PTYPE_S 0
#define VIRTCHNL_RXD_FLEX_PTYPE_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RXD_FLEX_PTYPE_S)
#define VIRTCHNL_RXD_FLEX_UMBCAST_S 10
#define VIRTCHNL_RXD_FLEX_UMBCAST_M \
MAKEMASK(0x3UL, VIRTCHNL_RXD_FLEX_UMBCAST_S)
#define VIRTCHNL_RXD_FLEX_FF0_S 12
#define VIRTCHNL_RXD_FLEX_FF0_M MAKEMASK(0xFUL, VIRTCHNL_RXD_FLEX_FF0_S)
#define VIRTCHNL_RXD_FLEX_LEN_PBUF_S 0
#define VIRTCHNL_RXD_FLEX_LEN_PBUF_M \
MAKEMASK(0x3FFFUL, VIRTCHNL_RXD_FLEX_LEN_PBUF_S)
#define VIRTCHNL_RXD_FLEX_GEN_S 14
#define VIRTCHNL_RXD_FLEX_GEN_M BIT_ULL(VIRTCHNL_RXD_FLEX_GEN_S)
#define VIRTCHNL_RXD_FLEX_BUFQ_ID_S 15
#define VIRTCHNL_RXD_FLEX_BUFQ_ID_M \
BIT_ULL(VIRTCHNL_RXD_FLEX_BUFQ_ID_S)
#define VIRTCHNL_RXD_FLEX_LEN_HDR_S 0
#define VIRTCHNL_RXD_FLEX_LEN_HDR_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RXD_FLEX_LEN_HDR_S)
#define VIRTCHNL_RXD_FLEX_RSC_S 10
#define VIRTCHNL_RXD_FLEX_RSC_M BIT_ULL(VIRTCHNL_RXD_FLEX_RSC_S)
#define VIRTCHNL_RXD_FLEX_SPH_S 11
#define VIRTCHNL_RXD_FLEX_SPH_M BIT_ULL(VIRTCHNL_RXD_FLEX_SPH_S)
#define VIRTCHNL_RXD_FLEX_MISS_S 12
#define VIRTCHNL_RXD_FLEX_MISS_M \
BIT_ULL(VIRTCHNL_RXD_FLEX_MISS_S)
#define VIRTCHNL_RXD_FLEX_FF1_S 13
#define VIRTCHNL_RXD_FLEX_FF1_M MAKEMASK(0x7UL, VIRTCHNL_RXD_FLEX_FF1_M)
/* For virtchnl_singleq_base_rx_legacy desc members */
#define VIRTCHNL_RXD_QW1_LEN_SPH_S 63
#define VIRTCHNL_RXD_QW1_LEN_SPH_M BIT_ULL(VIRTCHNL_RXD_QW1_LEN_SPH_S)
#define VIRTCHNL_RXD_QW1_LEN_HBUF_S 52
#define VIRTCHNL_RXD_QW1_LEN_HBUF_M \
MAKEMASK(0x7FFULL, VIRTCHNL_RXD_QW1_LEN_HBUF_S)
#define VIRTCHNL_RXD_QW1_LEN_PBUF_S 38
#define VIRTCHNL_RXD_QW1_LEN_PBUF_M \
MAKEMASK(0x3FFFULL, VIRTCHNL_RXD_QW1_LEN_PBUF_S)
#define VIRTCHNL_RXD_QW1_PTYPE_S 30
#define VIRTCHNL_RXD_QW1_PTYPE_M \
MAKEMASK(0xFFULL, VIRTCHNL_RXD_QW1_PTYPE_S)
#define VIRTCHNL_RXD_QW1_ERROR_S 19
#define VIRTCHNL_RXD_QW1_ERROR_M \
MAKEMASK(0xFFUL, VIRTCHNL_RXD_QW1_ERROR_S)
#define VIRTCHNL_RXD_QW1_STATUS_S 0
#define VIRTCHNL_RXD_QW1_STATUS_M \
MAKEMASK(0x7FFFFUL, VIRTCHNL_RXD_QW1_STATUS_S)
enum virtchnl_rx_flex_desc_status_error_0_qw1_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_STATUS0_DD_S = 0,
VIRTCHNL_RX_FLEX_DESC_STATUS0_EOF_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_HBO_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_L3L4P_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
};
enum virtchnl_rx_flex_desc_status_error_0_qw0_bits {
VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S = 0,
VIRTCHNL_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_RXE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_CRCP_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
enum virtchnl_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_STATUS1_RSVD_S = 0, /* 2 bits */
VIRTCHNL_RX_FLEX_DESC_STATUS1_ATRAEFAIL_S = 2,
VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 3,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 4,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 5,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 6,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 7,
VIRTCHNL_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
enum virtchnl_rx_base_desc_status_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_BASE_DESC_STATUS_DD_S = 0,
VIRTCHNL_RX_BASE_DESC_STATUS_EOF_S = 1,
VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2,
VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3,
VIRTCHNL_RX_BASE_DESC_STATUS_CRCP_S = 4,
VIRTCHNL_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8,
VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_FLM_S = 11,
VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_LPBK_S = 14,
VIRTCHNL_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15,
VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18,
VIRTCHNL_RX_BASE_DESC_STATUS_LAST /* this entry must be last!!! */
};
enum virtchnl_rx_desc_fltstat_values {
VIRTCHNL_RX_DESC_FLTSTAT_NO_DATA = 0,
VIRTCHNL_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
VIRTCHNL_RX_DESC_FLTSTAT_RSV = 2,
VIRTCHNL_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
enum virtchnl_rx_base_desc_error_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_BASE_DESC_ERROR_RXE_S = 0,
VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1,
VIRTCHNL_RX_BASE_DESC_ERROR_HBO_S = 2,
VIRTCHNL_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */
VIRTCHNL_RX_BASE_DESC_ERROR_IPE_S = 3,
VIRTCHNL_RX_BASE_DESC_ERROR_L4E_S = 4,
VIRTCHNL_RX_BASE_DESC_ERROR_EIPE_S = 5,
VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6,
VIRTCHNL_RX_BASE_DESC_ERROR_RSVD_S = 7
};
/* Receive Descriptors */
/* splitq buf
| 16| 0|
----------------------------------------------------------------
| RSV | Buffer ID |
----------------------------------------------------------------
| Rx packet buffer adresss |
----------------------------------------------------------------
| Rx header buffer adresss |
----------------------------------------------------------------
| RSV |
----------------------------------------------------------------
| 0|
*/
struct virtchnl_splitq_rx_buf_desc {
struct {
__le16 buf_id; /* Buffer Identifier */
__le16 rsvd0;
__le32 rsvd1;
} qword0;
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 rsvd2;
}; /* read used with buffer queues*/
/* singleq buf
| 0|
----------------------------------------------------------------
| Rx packet buffer adresss |
----------------------------------------------------------------
| Rx header buffer adresss |
----------------------------------------------------------------
| RSV |
----------------------------------------------------------------
| RSV |
----------------------------------------------------------------
| 0|
*/
struct virtchnl_singleq_rx_buf_desc {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 rsvd1;
__le64 rsvd2;
}; /* read used with buffer queues*/
union virtchnl_rx_buf_desc {
struct virtchnl_singleq_rx_buf_desc read;
struct virtchnl_splitq_rx_buf_desc split_rd;
};
/* (0x00) singleq wb(compl) */
struct virtchnl_singleq_base_rx_desc {
struct {
struct {
__le16 mirroring_status;
__le16 l2tag1;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fd_id; /* Flow Director filter id */
} hi_dword;
} qword0;
struct {
/* status/error/PTYPE/length */
__le64 status_error_ptype_len;
} qword1;
struct {
__le16 ext_status; /* extended status */
__le16 rsvd;
__le16 l2tag2_1;
__le16 l2tag2_2;
} qword2;
struct {
__le32 reserved;
__le32 fd_id;
} qword3;
}; /* writeback */
/* (0x01) singleq flex compl */
struct virtchnl_rx_flex_desc {
/* Qword 0 */
u8 rxdid; /* descriptor builder profile id */
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
__le16 pkt_len; /* [15:14] are reserved */
__le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
/* sph=[11:11] */
/* ff1/ext=[15:12] */
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 flex_meta0;
__le16 flex_meta1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 time_stamp_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 flex_meta2;
__le16 flex_meta3;
union {
struct {
__le16 flex_meta4;
__le16 flex_meta5;
} flex;
__le32 ts_high;
} flex_ts;
};
/* (0x02) */
struct virtchnl_rx_flex_desc_nic {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le32 rss_hash;
/* Qword 2 */
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 flow_id;
union {
struct {
__le16 rsvd;
__le16 flow_id_ipv6;
} flex;
__le32 ts_high;
} flex_ts;
};
/* Rx Flex Descriptor Switch Profile
* RxDID Profile Id 3
* Flex-field 0: Source Vsi
*/
struct virtchnl_rx_flex_desc_sw {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 src_vsi; /* [10:15] are reserved */
__le16 flex_md1_rsvd;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 rsvd; /* flex words 2-3 are reserved */
__le32 ts_high;
};
/* Rx Flex Descriptor NIC VEB Profile
* RxDID Profile Id 4
* Flex-field 0: Destination Vsi
*/
struct virtchnl_rx_flex_desc_nic_veb_dbg {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 dst_vsi; /* [0:12]: destination vsi */
/* 13: vsi valid bit */
/* [14:15] are reserved */
__le16 flex_field_1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 rsvd; /* flex words 2-3 are reserved */
__le32 ts_high;
};
/* Rx Flex Descriptor NIC ACL Profile
* RxDID Profile Id 5
* Flex-field 0: ACL Counter 0
* Flex-field 1: ACL Counter 1
* Flex-field 2: ACL Counter 2
*/
struct virtchnl_rx_flex_desc_nic_acl_dbg {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 acl_ctr0;
__le16 acl_ctr1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 acl_ctr2;
__le16 rsvd; /* flex words 2-3 are reserved */
__le32 ts_high;
};
/* Rx Flex Descriptor NIC Profile
* RxDID Profile Id 6
* Flex-field 0: RSS hash lower 16-bits
* Flex-field 1: RSS hash upper 16-bits
* Flex-field 2: Flow Id lower 16-bits
* Flex-field 3: Source Vsi
* Flex-field 4: reserved, Vlan id taken from L2Tag
*/
struct virtchnl_rx_flex_desc_nic_2 {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le32 rss_hash;
/* Qword 2 */
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 flow_id;
__le16 src_vsi;
union {
struct {
__le16 rsvd;
__le16 flow_id_ipv6;
} flex;
__le32 ts_high;
} flex_ts;
};
/* Rx Flex Descriptor Advanced (Split Queue Model)
* RxDID Profile Id 7
*/
struct virtchnl_rx_flex_desc_adv {
/* Qword 0 */
u8 rxdid_ucast; /* profile_id=[3:0] */
/* rsvd=[5:4] */
/* ucast=[7:6] */
u8 status_err0_qw0;
__le16 ptype_err_fflags0; /* ptype=[9:0] */
/* ip_hdr_err=[10:10] */
/* udp_len_err=[11:11] */
/* ff0=[15:12] */
__le16 pktlen_gen_bufq_id; /* plen=[13:0] */
/* gen=[14:14] only in splitq */
/* bufq_id=[15:15] only in splitq */
__le16 hdrlen_flags; /* header=[9:0] */
/* rsc=[10:10] only in splitq */
/* sph=[11:11] only in splitq */
/* ext_udp_0=[12:12] */
/* int_udp_0=[13:13] */
/* trunc_mirr=[14:14] */
/* miss_prepend=[15:15] */
/* Qword 1 */
u8 status_err0_qw1;
u8 status_err1;
u8 fflags1;
u8 ts_low;
__le16 fmd0;
__le16 fmd1;
/* Qword 2 */
__le16 fmd2;
u8 fflags2;
u8 hash3;
__le16 fmd3;
__le16 fmd4;
/* Qword 3 */
__le16 fmd5;
__le16 fmd6;
__le16 fmd7_0;
__le16 fmd7_1;
}; /* writeback */
/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile
* RxDID Profile Id 8
* Flex-field 0: BufferID
* Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)
* Flex-field 2: Hash[15:0]
* Flex-flags 2: Hash[23:16]
* Flex-field 3: L2TAG2
* Flex-field 5: L2TAG1
* Flex-field 7: Timestamp (upper 32 bits)
*/
struct virtchnl_rx_flex_desc_adv_nic_3 {
/* Qword 0 */
u8 rxdid_ucast; /* profile_id=[3:0] */
/* rsvd=[5:4] */
/* ucast=[7:6] */
u8 status_err0_qw0;
__le16 ptype_err_fflags0; /* ptype=[9:0] */
/* ip_hdr_err=[10:10] */
/* udp_len_err=[11:11] */
/* ff0=[15:12] */
__le16 pktlen_gen_bufq_id; /* plen=[13:0] */
/* gen=[14:14] only in splitq */
/* bufq_id=[15:15] only in splitq */
__le16 hdrlen_flags; /* header=[9:0] */
/* rsc=[10:10] only in splitq */
/* sph=[11:11] only in splitq */
/* ext_udp_0=[12:12] */
/* int_udp_0=[13:13] */
/* trunc_mirr=[14:14] */
/* miss_prepend=[15:15] */
/* Qword 1 */
u8 status_err0_qw1;
u8 status_err1;
u8 fflags1;
u8 ts_low;
__le16 buf_id; /* only in splitq */
union {
__le16 raw_cs;
__le16 l2tag1;
__le16 rscseglen;
} misc;
/* Qword 2 */
__le16 hash1;
union {
u8 fflags2;
u8 mirrorid;
u8 hash2;
} ff2_mirrid_hash2;
u8 hash3;
__le16 l2tag2;
__le16 fmd4;
/* Qword 3 */
__le16 l2tag1;
__le16 fmd6;
__le32 ts_high;
}; /* writeback */
union virtchnl_rx_desc {
struct virtchnl_singleq_rx_buf_desc read;
struct virtchnl_singleq_base_rx_desc base_wb;
struct virtchnl_rx_flex_desc flex_wb;
struct virtchnl_rx_flex_desc_adv flex_wb_adv;
};
#endif /* _VIRTCHNL_LAN_DESC_H_ */