mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-24 16:10:11 +00:00
ixl(4): Add FW recovery mode support and other things
Update the iflib version of ixl driver based on the OOT version ixl-1.11.29. Major changes: - Extract iflib specific functions from ixl_pf_main.c to ixl_pf_iflib.c to simplify code sharing between legacy and iflib version of driver - Add support for most recent FW API version (1.10), which extends FW LLDP Agent control by user to X722 devices - Improve handling of device global reset - Add support for the FW recovery mode - Use virtchnl function to validate virtual channel messages instead of using separate checks - Fix MAC/VLAN filters accounting Submitted by: Krzysztof Galazka <krzysztof.galazka@intel.com> Reviewed by: erj@ Tested by: Jeffrey Pieper <jeffrey.e.pieper@intel.com> MFC after: 1 week Relnotes: yes Sponsored by: Intel Corporation Differential Revision: https://reviews.freebsd.org/D24564
This commit is contained in:
parent
a3d565a118
commit
b4a7ce0690
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=361992
@ -196,6 +196,8 @@ dev/ixl/if_ixl.c optional ixl pci \
|
||||
compile-with "${NORMAL_C} -I$S/dev/ixl"
|
||||
dev/ixl/ixl_pf_main.c optional ixl pci \
|
||||
compile-with "${NORMAL_C} -I$S/dev/ixl"
|
||||
dev/ixl/ixl_pf_iflib.c optional ixl pci \
|
||||
compile-with "${NORMAL_C} -I$S/dev/ixl"
|
||||
dev/ixl/ixl_pf_qmgr.c optional ixl pci \
|
||||
compile-with "${NORMAL_C} -I$S/dev/ixl"
|
||||
dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \
|
||||
|
@ -41,6 +41,8 @@ dev/ixl/if_ixl.c optional ixl pci powerpc64 \
|
||||
compile-with "${NORMAL_C} -I$S/dev/ixl"
|
||||
dev/ixl/ixl_pf_main.c optional ixl pci powerpc64 \
|
||||
compile-with "${NORMAL_C} -I$S/dev/ixl"
|
||||
dev/ixl/ixl_pf_iflib.c optional ixl pci powerpc64 \
|
||||
compile-with "${NORMAL_C} -I$S/dev/ixl"
|
||||
dev/ixl/ixl_pf_qmgr.c optional ixl pci powerpc64 \
|
||||
compile-with "${NORMAL_C} -I$S/dev/ixl"
|
||||
dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov powerpc64 \
|
||||
|
@ -125,6 +125,7 @@ enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
|
||||
**/
|
||||
void i40e_free_adminq_asq(struct i40e_hw *hw)
|
||||
{
|
||||
i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
|
||||
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
|
||||
}
|
||||
|
||||
@ -404,7 +405,7 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
|
||||
/* initialize base registers */
|
||||
ret_code = i40e_config_asq_regs(hw);
|
||||
if (ret_code != I40E_SUCCESS)
|
||||
goto init_adminq_free_rings;
|
||||
goto init_config_regs;
|
||||
|
||||
/* success! */
|
||||
hw->aq.asq.count = hw->aq.num_asq_entries;
|
||||
@ -412,6 +413,10 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
|
||||
|
||||
init_adminq_free_rings:
|
||||
i40e_free_adminq_asq(hw);
|
||||
return ret_code;
|
||||
|
||||
init_config_regs:
|
||||
i40e_free_asq_bufs(hw);
|
||||
|
||||
init_adminq_exit:
|
||||
return ret_code;
|
||||
@ -562,6 +567,70 @@ static void i40e_resume_aq(struct i40e_hw *hw)
|
||||
i40e_config_arq_regs(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_set_hw_flags - set HW flags
|
||||
* @hw: pointer to the hardware structure
|
||||
**/
|
||||
static void i40e_set_hw_flags(struct i40e_hw *hw)
|
||||
{
|
||||
struct i40e_adminq_info *aq = &hw->aq;
|
||||
|
||||
hw->flags = 0;
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case I40E_MAC_XL710:
|
||||
if (aq->api_maj_ver > 1 ||
|
||||
(aq->api_maj_ver == 1 &&
|
||||
aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
|
||||
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
|
||||
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
|
||||
/* The ability to RX (not drop) 802.1ad frames */
|
||||
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
|
||||
}
|
||||
break;
|
||||
case I40E_MAC_X722:
|
||||
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
|
||||
I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
|
||||
|
||||
if (aq->api_maj_ver > 1 ||
|
||||
(aq->api_maj_ver == 1 &&
|
||||
aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
|
||||
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
|
||||
|
||||
if (aq->api_maj_ver > 1 ||
|
||||
(aq->api_maj_ver == 1 &&
|
||||
aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
|
||||
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
|
||||
|
||||
if (aq->api_maj_ver > 1 ||
|
||||
(aq->api_maj_ver == 1 &&
|
||||
aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
|
||||
hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
|
||||
|
||||
/* fall through */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Newer versions of firmware require lock when reading the NVM */
|
||||
if (aq->api_maj_ver > 1 ||
|
||||
(aq->api_maj_ver == 1 &&
|
||||
aq->api_min_ver >= 5))
|
||||
hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
|
||||
|
||||
if (aq->api_maj_ver > 1 ||
|
||||
(aq->api_maj_ver == 1 &&
|
||||
aq->api_min_ver >= 8)) {
|
||||
hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
|
||||
hw->flags |= I40E_HW_FLAG_DROP_MODE;
|
||||
}
|
||||
|
||||
if (aq->api_maj_ver > 1 ||
|
||||
(aq->api_maj_ver == 1 &&
|
||||
aq->api_min_ver >= 9))
|
||||
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_init_adminq - main initialization routine for Admin Queue
|
||||
* @hw: pointer to the hardware structure
|
||||
@ -575,21 +644,22 @@ static void i40e_resume_aq(struct i40e_hw *hw)
|
||||
**/
|
||||
enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
|
||||
{
|
||||
struct i40e_adminq_info *aq = &hw->aq;
|
||||
enum i40e_status_code ret_code;
|
||||
u16 cfg_ptr, oem_hi, oem_lo;
|
||||
u16 eetrack_lo, eetrack_hi;
|
||||
enum i40e_status_code ret_code;
|
||||
int retry = 0;
|
||||
|
||||
/* verify input for valid configuration */
|
||||
if ((hw->aq.num_arq_entries == 0) ||
|
||||
(hw->aq.num_asq_entries == 0) ||
|
||||
(hw->aq.arq_buf_size == 0) ||
|
||||
(hw->aq.asq_buf_size == 0)) {
|
||||
if (aq->num_arq_entries == 0 ||
|
||||
aq->num_asq_entries == 0 ||
|
||||
aq->arq_buf_size == 0 ||
|
||||
aq->asq_buf_size == 0) {
|
||||
ret_code = I40E_ERR_CONFIG;
|
||||
goto init_adminq_exit;
|
||||
}
|
||||
i40e_init_spinlock(&hw->aq.asq_spinlock);
|
||||
i40e_init_spinlock(&hw->aq.arq_spinlock);
|
||||
i40e_init_spinlock(&aq->asq_spinlock);
|
||||
i40e_init_spinlock(&aq->arq_spinlock);
|
||||
|
||||
/* Set up register offsets */
|
||||
i40e_adminq_init_regs(hw);
|
||||
@ -616,11 +686,11 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
|
||||
*/
|
||||
do {
|
||||
ret_code = i40e_aq_get_firmware_version(hw,
|
||||
&hw->aq.fw_maj_ver,
|
||||
&hw->aq.fw_min_ver,
|
||||
&hw->aq.fw_build,
|
||||
&hw->aq.api_maj_ver,
|
||||
&hw->aq.api_min_ver,
|
||||
&aq->fw_maj_ver,
|
||||
&aq->fw_min_ver,
|
||||
&aq->fw_build,
|
||||
&aq->api_maj_ver,
|
||||
&aq->api_min_ver,
|
||||
NULL);
|
||||
if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
|
||||
break;
|
||||
@ -631,6 +701,12 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
|
||||
if (ret_code != I40E_SUCCESS)
|
||||
goto init_adminq_free_arq;
|
||||
|
||||
/*
|
||||
* Some features were introduced in different FW API version
|
||||
* for different MAC type.
|
||||
*/
|
||||
i40e_set_hw_flags(hw);
|
||||
|
||||
/* get the NVM version info */
|
||||
i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
|
||||
&hw->nvm.version);
|
||||
@ -644,25 +720,7 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
|
||||
&oem_lo);
|
||||
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
|
||||
|
||||
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
|
||||
if ((hw->aq.api_maj_ver > 1) ||
|
||||
((hw->aq.api_maj_ver == 1) &&
|
||||
(hw->aq.api_min_ver >= 7)))
|
||||
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
|
||||
|
||||
if (hw->mac.type == I40E_MAC_XL710 &&
|
||||
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
|
||||
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
|
||||
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
|
||||
}
|
||||
|
||||
/* Newer versions of firmware require lock when reading the NVM */
|
||||
if ((hw->aq.api_maj_ver > 1) ||
|
||||
((hw->aq.api_maj_ver == 1) &&
|
||||
(hw->aq.api_min_ver >= 5)))
|
||||
hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
|
||||
|
||||
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
|
||||
if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
|
||||
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
|
||||
goto init_adminq_free_arq;
|
||||
}
|
||||
@ -682,8 +740,8 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
|
||||
init_adminq_free_asq:
|
||||
i40e_shutdown_asq(hw);
|
||||
init_adminq_destroy_spinlocks:
|
||||
i40e_destroy_spinlock(&hw->aq.asq_spinlock);
|
||||
i40e_destroy_spinlock(&hw->aq.arq_spinlock);
|
||||
i40e_destroy_spinlock(&aq->asq_spinlock);
|
||||
i40e_destroy_spinlock(&aq->arq_spinlock);
|
||||
|
||||
init_adminq_exit:
|
||||
return ret_code;
|
||||
@ -728,7 +786,7 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
|
||||
desc = I40E_ADMINQ_DESC(*asq, ntc);
|
||||
details = I40E_ADMINQ_DETAILS(*asq, ntc);
|
||||
while (rd32(hw, hw->aq.asq.head) != ntc) {
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
|
||||
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
|
||||
|
||||
if (details->callback) {
|
||||
@ -808,7 +866,7 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
if (val >= hw->aq.num_asq_entries) {
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||
"AQTX: head overrun at %d\n", val);
|
||||
status = I40E_ERR_QUEUE_EMPTY;
|
||||
status = I40E_ERR_ADMIN_QUEUE_FULL;
|
||||
goto asq_send_command_error;
|
||||
}
|
||||
|
||||
@ -896,7 +954,7 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
}
|
||||
|
||||
/* bump the tail */
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
|
||||
buff, buff_size);
|
||||
(hw->aq.asq.next_to_use)++;
|
||||
@ -942,12 +1000,14 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
cmd_completed = TRUE;
|
||||
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
|
||||
status = I40E_SUCCESS;
|
||||
else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
|
||||
status = I40E_ERR_NOT_READY;
|
||||
else
|
||||
status = I40E_ERR_ADMIN_QUEUE_ERROR;
|
||||
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
|
||||
}
|
||||
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
|
||||
"AQTX: desc and buffer writeback:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
|
||||
|
||||
@ -1063,7 +1123,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
|
||||
hw->aq.arq.r.arq_bi[desc_idx].va,
|
||||
e->msg_len, I40E_DMA_TO_NONDMA);
|
||||
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
|
||||
hw->aq.arq_buf_size);
|
||||
|
||||
|
@ -43,8 +43,8 @@
|
||||
|
||||
|
||||
#define I40E_FW_API_VERSION_MAJOR 0x0001
|
||||
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
|
||||
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
|
||||
#define I40E_FW_API_VERSION_MINOR_X722 0x000A
|
||||
#define I40E_FW_API_VERSION_MINOR_X710 0x000A
|
||||
|
||||
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
|
||||
I40E_FW_API_VERSION_MINOR_X710 : \
|
||||
@ -52,6 +52,12 @@
|
||||
|
||||
/* API version 1.7 implements additional link and PHY-specific APIs */
|
||||
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
|
||||
/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
|
||||
#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
|
||||
/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
|
||||
#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
|
||||
/* API version 1.10 for X722 devices adds ability to request FEC encoding */
|
||||
#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
|
||||
|
||||
struct i40e_aq_desc {
|
||||
__le16 flags;
|
||||
@ -204,6 +210,7 @@ enum i40e_admin_queue_opc {
|
||||
i40e_aqc_opc_add_cloud_filters = 0x025C,
|
||||
i40e_aqc_opc_remove_cloud_filters = 0x025D,
|
||||
i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
|
||||
i40e_aqc_opc_replace_cloud_filters = 0x025F,
|
||||
|
||||
i40e_aqc_opc_add_mirror_rule = 0x0260,
|
||||
i40e_aqc_opc_delete_mirror_rule = 0x0261,
|
||||
@ -289,6 +296,7 @@ enum i40e_admin_queue_opc {
|
||||
i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
|
||||
i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
|
||||
i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
|
||||
i40e_aqc_opc_lldp_restore = 0x0A0A,
|
||||
|
||||
/* Tunnel commands */
|
||||
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
|
||||
@ -1382,14 +1390,17 @@ struct i40e_aqc_add_remove_cloud_filters {
|
||||
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
|
||||
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
|
||||
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
|
||||
u8 reserved2[4];
|
||||
u8 big_buffer_flag;
|
||||
#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
|
||||
#define I40E_AQC_ADD_CLOUD_CMD_BB 1
|
||||
u8 reserved2[3];
|
||||
__le32 addr_high;
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
|
||||
|
||||
struct i40e_aqc_add_remove_cloud_filters_element_data {
|
||||
struct i40e_aqc_cloud_filters_element_data {
|
||||
u8 outer_mac[6];
|
||||
u8 inner_mac[6];
|
||||
__le16 inner_vlan;
|
||||
@ -1401,13 +1412,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
|
||||
struct {
|
||||
u8 data[16];
|
||||
} v6;
|
||||
struct {
|
||||
__le16 data[8];
|
||||
} raw_v6;
|
||||
} ipaddr;
|
||||
__le16 flags;
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
|
||||
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
|
||||
/* 0x0000 reserved */
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
|
||||
/* 0x0001 reserved */
|
||||
/* 0x0002 reserved */
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
|
||||
@ -1419,6 +1433,13 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
|
||||
/* 0x000D reserved */
|
||||
/* 0x000E reserved */
|
||||
/* 0x000F reserved */
|
||||
/* 0x0010 to 0x0017 is for custom filters */
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
|
||||
|
||||
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
|
||||
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
|
||||
@ -1453,6 +1474,88 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
|
||||
u8 response_reserved[7];
|
||||
};
|
||||
|
||||
/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
|
||||
* I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set.
|
||||
*/
|
||||
struct i40e_aqc_add_rm_cloud_filt_elem_ext {
|
||||
struct i40e_aqc_cloud_filters_element_data element;
|
||||
u16 general_fields[32];
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
|
||||
};
|
||||
|
||||
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
|
||||
|
||||
/* i40e_aqc_cloud_filters_element_bb is used when
|
||||
* I40E_AQC_CLOUD_CMD_BB flag is set.
|
||||
*/
|
||||
struct i40e_aqc_cloud_filters_element_bb {
|
||||
struct i40e_aqc_cloud_filters_element_data element;
|
||||
u16 general_fields[32];
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
|
||||
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
|
||||
};
|
||||
|
||||
I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
|
||||
|
||||
struct i40e_aqc_remove_cloud_filters_completion {
|
||||
__le16 perfect_ovlan_used;
|
||||
__le16 perfect_ovlan_free;
|
||||
@ -1464,6 +1567,61 @@ struct i40e_aqc_remove_cloud_filters_completion {
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
|
||||
|
||||
/* Replace filter Command 0x025F
|
||||
* uses the i40e_aqc_replace_cloud_filters,
|
||||
* and the generic indirect completion structure
|
||||
*/
|
||||
struct i40e_filter_data {
|
||||
u8 filter_type;
|
||||
u8 input[3];
|
||||
};
|
||||
|
||||
I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
|
||||
|
||||
struct i40e_aqc_replace_cloud_filters_cmd {
|
||||
u8 valid_flags;
|
||||
#define I40E_AQC_REPLACE_L1_FILTER 0x0
|
||||
#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
|
||||
#define I40E_AQC_GET_CLOUD_FILTERS 0x2
|
||||
#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
|
||||
#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
|
||||
u8 old_filter_type;
|
||||
u8 new_filter_type;
|
||||
u8 tr_bit;
|
||||
u8 tr_bit2;
|
||||
u8 reserved[3];
|
||||
__le32 addr_high;
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
|
||||
|
||||
struct i40e_aqc_replace_cloud_filters_cmd_buf {
|
||||
u8 data[32];
|
||||
/* Filter type INPUT codes*/
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
|
||||
|
||||
/* Field Vector offsets */
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
|
||||
/* big FLU */
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
|
||||
/* big FLU */
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
|
||||
|
||||
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
|
||||
struct i40e_filter_data filters[8];
|
||||
};
|
||||
|
||||
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
|
||||
|
||||
/* Add Mirror Rule (indirect or direct 0x0260)
|
||||
* Delete Mirror Rule (indirect or direct 0x0261)
|
||||
* note: some rule types (4,5) do not use an external buffer.
|
||||
@ -1865,6 +2023,7 @@ struct i40e_aq_get_phy_abilities_resp {
|
||||
#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
|
||||
#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
|
||||
__le16 eee_capability;
|
||||
#define I40E_AQ_EEE_AUTO 0x0001
|
||||
#define I40E_AQ_EEE_100BASE_TX 0x0002
|
||||
#define I40E_AQ_EEE_1000BASE_T 0x0004
|
||||
#define I40E_AQ_EEE_10GBASE_T 0x0008
|
||||
@ -1931,20 +2090,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
|
||||
struct i40e_aq_set_mac_config {
|
||||
__le16 max_frame_size;
|
||||
u8 params;
|
||||
#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
|
||||
#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
|
||||
#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
|
||||
#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
|
||||
u8 tx_timer_priority; /* bitmap */
|
||||
__le16 tx_timer_value;
|
||||
__le16 fc_refresh_threshold;
|
||||
@ -2077,8 +2237,8 @@ struct i40e_aqc_set_lb_mode {
|
||||
#define I40E_AQ_LB_SERDES 2
|
||||
#define I40E_AQ_LB_PHY_INT 3
|
||||
#define I40E_AQ_LB_PHY_EXT 4
|
||||
#define I40E_AQ_LB_CPVL_PCS 5
|
||||
#define I40E_AQ_LB_CPVL_EXT 6
|
||||
#define I40E_AQ_LB_BASE_T_PCS 5
|
||||
#define I40E_AQ_LB_BASE_T_EXT 6
|
||||
#define I40E_AQ_LB_PHY_LOCAL 0x01
|
||||
#define I40E_AQ_LB_PHY_REMOTE 0x02
|
||||
#define I40E_AQ_LB_MAC_LOCAL 0x04
|
||||
@ -2142,7 +2302,13 @@ struct i40e_aqc_phy_register_access {
|
||||
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
|
||||
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
|
||||
u8 dev_addres;
|
||||
u8 reserved1[2];
|
||||
u8 cmd_flags;
|
||||
#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01
|
||||
#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02
|
||||
#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2
|
||||
#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \
|
||||
I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
|
||||
u8 reserved1;
|
||||
__le32 reg_address;
|
||||
__le32 reg_value;
|
||||
u8 reserved2[4];
|
||||
@ -2157,6 +2323,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
|
||||
struct i40e_aqc_nvm_update {
|
||||
u8 command_flags;
|
||||
#define I40E_AQ_NVM_LAST_CMD 0x01
|
||||
#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
|
||||
#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
|
||||
#define I40E_AQ_NVM_FLASH_ONLY 0x80
|
||||
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
|
||||
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
|
||||
@ -2404,18 +2572,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
|
||||
/* Stop LLDP (direct 0x0A05) */
|
||||
struct i40e_aqc_lldp_stop {
|
||||
u8 command;
|
||||
#define I40E_AQ_LLDP_AGENT_STOP 0x0
|
||||
#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
|
||||
#define I40E_AQ_LLDP_AGENT_STOP 0x0
|
||||
#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
|
||||
#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
|
||||
u8 reserved[15];
|
||||
};
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
|
||||
|
||||
/* Start LLDP (direct 0x0A06) */
|
||||
|
||||
struct i40e_aqc_lldp_start {
|
||||
u8 command;
|
||||
#define I40E_AQ_LLDP_AGENT_START 0x1
|
||||
#define I40E_AQ_LLDP_AGENT_START 0x1
|
||||
#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2
|
||||
u8 reserved[15];
|
||||
};
|
||||
|
||||
@ -2535,6 +2704,16 @@ struct i40e_aqc_lldp_stop_start_specific_agent {
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
|
||||
|
||||
/* Restore LLDP Agent factory settings (direct 0x0A0A) */
|
||||
struct i40e_aqc_lldp_restore {
|
||||
u8 command;
|
||||
#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0
|
||||
#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
|
||||
u8 reserved[15];
|
||||
};
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
|
||||
|
||||
/* Add Udp Tunnel command and completion (direct 0x0B00) */
|
||||
struct i40e_aqc_add_udp_tunnel {
|
||||
__le16 udp_port;
|
||||
|
@ -66,6 +66,8 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
|
||||
case I40E_DEV_ID_20G_KR2_A:
|
||||
case I40E_DEV_ID_25G_B:
|
||||
case I40E_DEV_ID_25G_SFP28:
|
||||
case I40E_DEV_ID_X710_N3000:
|
||||
case I40E_DEV_ID_XXV710_N3000:
|
||||
hw->mac.type = I40E_MAC_XL710;
|
||||
break;
|
||||
case I40E_DEV_ID_KX_X722:
|
||||
@ -319,32 +321,37 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
|
||||
void *buffer, u16 buf_len)
|
||||
{
|
||||
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
|
||||
u32 effective_mask = hw->debug_mask & mask;
|
||||
u8 *buf = (u8 *)buffer;
|
||||
u16 len;
|
||||
u16 i = 0;
|
||||
u16 i;
|
||||
|
||||
if ((!(mask & hw->debug_mask)) || (desc == NULL))
|
||||
if (!effective_mask || !desc)
|
||||
return;
|
||||
|
||||
len = LE16_TO_CPU(aq_desc->datalen);
|
||||
|
||||
i40e_debug(hw, mask,
|
||||
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
|
||||
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
|
||||
LE16_TO_CPU(aq_desc->opcode),
|
||||
LE16_TO_CPU(aq_desc->flags),
|
||||
LE16_TO_CPU(aq_desc->datalen),
|
||||
LE16_TO_CPU(aq_desc->retval));
|
||||
i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
|
||||
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
|
||||
"\tcookie (h,l) 0x%08X 0x%08X\n",
|
||||
LE32_TO_CPU(aq_desc->cookie_high),
|
||||
LE32_TO_CPU(aq_desc->cookie_low));
|
||||
i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
|
||||
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
|
||||
"\tparam (0,1) 0x%08X 0x%08X\n",
|
||||
LE32_TO_CPU(aq_desc->params.internal.param0),
|
||||
LE32_TO_CPU(aq_desc->params.internal.param1));
|
||||
i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
|
||||
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
|
||||
"\taddr (h,l) 0x%08X 0x%08X\n",
|
||||
LE32_TO_CPU(aq_desc->params.external.addr_high),
|
||||
LE32_TO_CPU(aq_desc->params.external.addr_low));
|
||||
|
||||
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
|
||||
if (buffer && (buf_len != 0) && (len != 0) &&
|
||||
(effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
|
||||
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
|
||||
if (buf_len < len)
|
||||
len = buf_len;
|
||||
@ -1011,9 +1018,17 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
|
||||
else
|
||||
hw->pf_id = (u8)(func_rid & 0x7);
|
||||
|
||||
if (hw->mac.type == I40E_MAC_X722)
|
||||
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
|
||||
I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
|
||||
/* NVMUpdate features structure initialization */
|
||||
hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR;
|
||||
hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR;
|
||||
hw->nvmupd_features.size = sizeof(hw->nvmupd_features);
|
||||
i40e_memset(hw->nvmupd_features.features, 0x0,
|
||||
I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN *
|
||||
sizeof(*hw->nvmupd_features.features),
|
||||
I40E_NONDMA_MEM);
|
||||
|
||||
/* No features supported at the moment */
|
||||
hw->nvmupd_features.features[0] = 0;
|
||||
|
||||
status = i40e_init_nvm(hw);
|
||||
return status;
|
||||
@ -1272,6 +1287,29 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
|
||||
return media;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_poll_globr - Poll for Global Reset completion
|
||||
* @hw: pointer to the hardware structure
|
||||
* @retry_limit: how many times to retry before failure
|
||||
**/
|
||||
static enum i40e_status_code i40e_poll_globr(struct i40e_hw *hw,
|
||||
u32 retry_limit)
|
||||
{
|
||||
u32 cnt, reg = 0;
|
||||
|
||||
for (cnt = 0; cnt < retry_limit; cnt++) {
|
||||
reg = rd32(hw, I40E_GLGEN_RSTAT);
|
||||
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
|
||||
return I40E_SUCCESS;
|
||||
i40e_msec_delay(100);
|
||||
}
|
||||
|
||||
DEBUGOUT("Global reset failed.\n");
|
||||
DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg);
|
||||
|
||||
return I40E_ERR_RESET_FAILED;
|
||||
}
|
||||
|
||||
#define I40E_PF_RESET_WAIT_COUNT 200
|
||||
/**
|
||||
* i40e_pf_reset - Reset the PF
|
||||
@ -1295,7 +1333,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
|
||||
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
|
||||
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
|
||||
|
||||
grst_del = grst_del * 20;
|
||||
grst_del = min(grst_del * 20, 160U);
|
||||
|
||||
for (cnt = 0; cnt < grst_del; cnt++) {
|
||||
reg = rd32(hw, I40E_GLGEN_RSTAT);
|
||||
@ -1341,14 +1379,14 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
|
||||
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
|
||||
break;
|
||||
reg2 = rd32(hw, I40E_GLGEN_RSTAT);
|
||||
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
|
||||
DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
|
||||
DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
|
||||
return I40E_ERR_NOT_READY;
|
||||
}
|
||||
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
|
||||
break;
|
||||
i40e_msec_delay(1);
|
||||
}
|
||||
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
|
||||
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
|
||||
if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS)
|
||||
return I40E_ERR_RESET_FAILED;
|
||||
} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
|
||||
DEBUGOUT("PF reset polling failed to complete.\n");
|
||||
return I40E_ERR_RESET_FAILED;
|
||||
}
|
||||
@ -1480,7 +1518,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
|
||||
|
||||
if (!hw->func_caps.led[idx])
|
||||
return 0;
|
||||
|
||||
gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
|
||||
port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
|
||||
I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
|
||||
@ -1499,8 +1536,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
|
||||
#define I40E_FILTER_ACTIVITY 0xE
|
||||
#define I40E_LINK_ACTIVITY 0xC
|
||||
#define I40E_MAC_ACTIVITY 0xD
|
||||
#define I40E_FW_LED BIT(4)
|
||||
#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
|
||||
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
|
||||
|
||||
#define I40E_LED0 22
|
||||
|
||||
#define I40E_PIN_FUNC_SDP 0x0
|
||||
#define I40E_PIN_FUNC_LED 0x1
|
||||
|
||||
/**
|
||||
* i40e_led_get - return current on/off mode
|
||||
* @hw: pointer to the hw struct
|
||||
@ -1562,8 +1606,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
|
||||
u32 current_mode = 0;
|
||||
int i;
|
||||
|
||||
if (mode & 0xfffffff0)
|
||||
if (mode & ~I40E_LED_MODE_VALID) {
|
||||
DEBUGOUT1("invalid mode passed in %X\n", mode);
|
||||
return;
|
||||
}
|
||||
|
||||
/* as per the documentation GPIO 22-29 are the LED
|
||||
* GPIO pins named LED0..LED7
|
||||
@ -1648,19 +1694,22 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
|
||||
status = i40e_asq_send_command(hw, &desc, abilities,
|
||||
abilities_size, cmd_details);
|
||||
|
||||
if (status != I40E_SUCCESS)
|
||||
break;
|
||||
|
||||
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
|
||||
switch (hw->aq.asq_last_status) {
|
||||
case I40E_AQ_RC_EIO:
|
||||
status = I40E_ERR_UNKNOWN_PHY;
|
||||
break;
|
||||
} else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
|
||||
case I40E_AQ_RC_EAGAIN:
|
||||
i40e_msec_delay(1);
|
||||
total_delay++;
|
||||
status = I40E_ERR_TIMEOUT;
|
||||
break;
|
||||
/* also covers I40E_AQ_RC_OK */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
|
||||
(total_delay < max_delay));
|
||||
|
||||
} while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
|
||||
(total_delay < max_delay));
|
||||
|
||||
if (status != I40E_SUCCESS)
|
||||
return status;
|
||||
@ -1803,6 +1852,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
|
||||
* @max_frame_size: Maximum Frame Size to be supported by the port
|
||||
* @crc_en: Tell HW to append a CRC to outgoing frames
|
||||
* @pacing: Pacing configurations
|
||||
* @auto_drop_blocking_packets: Tell HW to drop packets if TC queue is blocked
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Configure MAC settings for frame size, jumbo frame support and the
|
||||
@ -1811,6 +1861,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
|
||||
enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
|
||||
u16 max_frame_size,
|
||||
bool crc_en, u16 pacing,
|
||||
bool auto_drop_blocking_packets,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
@ -1829,6 +1880,19 @@ enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
|
||||
if (crc_en)
|
||||
cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
|
||||
|
||||
if (auto_drop_blocking_packets) {
|
||||
if (hw->flags & I40E_HW_FLAG_DROP_MODE)
|
||||
cmd->params |=
|
||||
I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN;
|
||||
else
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"This FW api version does not support drop mode.\n");
|
||||
}
|
||||
|
||||
#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
|
||||
cmd->fc_refresh_threshold =
|
||||
CPU_TO_LE16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
return status;
|
||||
@ -1969,8 +2033,8 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
|
||||
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
|
||||
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
|
||||
|
||||
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
|
||||
hw->aq.api_min_ver >= 7) {
|
||||
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
|
||||
hw->mac.type != I40E_MAC_X722) {
|
||||
__le32 tmp;
|
||||
|
||||
i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
|
||||
@ -2198,7 +2262,7 @@ enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
|
||||
sizeof(vsi_ctx->info), cmd_details);
|
||||
sizeof(vsi_ctx->info), cmd_details);
|
||||
|
||||
if (status != I40E_SUCCESS)
|
||||
goto aq_add_vsi_exit;
|
||||
@ -2615,7 +2679,7 @@ enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
|
||||
sizeof(vsi_ctx->info), cmd_details);
|
||||
sizeof(vsi_ctx->info), cmd_details);
|
||||
|
||||
vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
|
||||
vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
|
||||
@ -2830,9 +2894,16 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
hw->phy.link_info.req_fec_info =
|
||||
abilities.fec_cfg_curr_mod_ext_info &
|
||||
(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
|
||||
if (abilities.fec_cfg_curr_mod_ext_info &
|
||||
I40E_AQ_ENABLE_FEC_AUTO)
|
||||
hw->phy.link_info.req_fec_info =
|
||||
(I40E_AQ_REQUEST_FEC_KR |
|
||||
I40E_AQ_REQUEST_FEC_RS);
|
||||
else
|
||||
hw->phy.link_info.req_fec_info =
|
||||
abilities.fec_cfg_curr_mod_ext_info &
|
||||
(I40E_AQ_REQUEST_FEC_KR |
|
||||
I40E_AQ_REQUEST_FEC_RS);
|
||||
|
||||
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
|
||||
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
|
||||
@ -4209,7 +4280,7 @@ enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
|
||||
|
||||
cmd->type = mib_type;
|
||||
cmd->length = CPU_TO_LE16(buff_size);
|
||||
cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
|
||||
cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buff));
|
||||
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
|
||||
@ -4245,151 +4316,39 @@ enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_add_lldp_tlv
|
||||
* i40e_aq_restore_lldp
|
||||
* @hw: pointer to the hw struct
|
||||
* @bridge_type: type of bridge
|
||||
* @buff: buffer with TLV to add
|
||||
* @buff_size: length of the buffer
|
||||
* @tlv_len: length of the TLV to be added
|
||||
* @mib_len: length of the LLDP MIB returned in response
|
||||
* @setting: pointer to factory setting variable or NULL
|
||||
* @restore: True if factory settings should be restored
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Add the specified TLV to LLDP Local MIB for the given bridge type,
|
||||
* it is responsibility of the caller to make sure that the TLV is not
|
||||
* already present in the LLDPDU.
|
||||
* In return firmware will write the complete LLDP MIB with the newly
|
||||
* added TLV in the response buffer.
|
||||
* Restore LLDP Agent factory settings if @restore set to True. In other case
|
||||
* only returns factory setting in AQ response.
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
|
||||
void *buff, u16 buff_size, u16 tlv_len,
|
||||
u16 *mib_len,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
enum i40e_status_code
|
||||
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_lldp_add_tlv *cmd =
|
||||
(struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
|
||||
struct i40e_aqc_lldp_restore *cmd =
|
||||
(struct i40e_aqc_lldp_restore *)&desc.params.raw;
|
||||
enum i40e_status_code status;
|
||||
|
||||
if (buff_size == 0 || !buff || tlv_len == 0)
|
||||
return I40E_ERR_PARAM;
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
|
||||
|
||||
/* Indirect Command */
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
if (buff_size > I40E_AQ_LARGE_BUF)
|
||||
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
|
||||
desc.datalen = CPU_TO_LE16(buff_size);
|
||||
|
||||
cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
|
||||
I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
|
||||
cmd->len = CPU_TO_LE16(tlv_len);
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
|
||||
if (!status) {
|
||||
if (mib_len != NULL)
|
||||
*mib_len = LE16_TO_CPU(desc.datalen);
|
||||
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"Restore LLDP not supported by current FW version.\n");
|
||||
return I40E_ERR_DEVICE_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
|
||||
|
||||
/**
|
||||
* i40e_aq_update_lldp_tlv
|
||||
* @hw: pointer to the hw struct
|
||||
* @bridge_type: type of bridge
|
||||
* @buff: buffer with TLV to update
|
||||
* @buff_size: size of the buffer holding original and updated TLVs
|
||||
* @old_len: Length of the Original TLV
|
||||
* @new_len: Length of the Updated TLV
|
||||
* @offset: offset of the updated TLV in the buff
|
||||
* @mib_len: length of the returned LLDP MIB
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Update the specified TLV to the LLDP Local MIB for the given bridge type.
|
||||
* Firmware will place the complete LLDP MIB in response buffer with the
|
||||
* updated TLV.
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
|
||||
u8 bridge_type, void *buff, u16 buff_size,
|
||||
u16 old_len, u16 new_len, u16 offset,
|
||||
u16 *mib_len,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_lldp_update_tlv *cmd =
|
||||
(struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
|
||||
enum i40e_status_code status;
|
||||
if (restore)
|
||||
cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
|
||||
|
||||
if (buff_size == 0 || !buff || offset == 0 ||
|
||||
old_len == 0 || new_len == 0)
|
||||
return I40E_ERR_PARAM;
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
|
||||
|
||||
/* Indirect Command */
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
if (buff_size > I40E_AQ_LARGE_BUF)
|
||||
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
|
||||
desc.datalen = CPU_TO_LE16(buff_size);
|
||||
|
||||
cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
|
||||
I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
|
||||
cmd->old_len = CPU_TO_LE16(old_len);
|
||||
cmd->new_offset = CPU_TO_LE16(offset);
|
||||
cmd->new_len = CPU_TO_LE16(new_len);
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
|
||||
if (!status) {
|
||||
if (mib_len != NULL)
|
||||
*mib_len = LE16_TO_CPU(desc.datalen);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_delete_lldp_tlv
|
||||
* @hw: pointer to the hw struct
|
||||
* @bridge_type: type of bridge
|
||||
* @buff: pointer to a user supplied buffer that has the TLV
|
||||
* @buff_size: length of the buffer
|
||||
* @tlv_len: length of the TLV to be deleted
|
||||
* @mib_len: length of the returned LLDP MIB
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Delete the specified TLV from LLDP Local MIB for the given bridge type.
|
||||
* The firmware places the entire LLDP MIB in the response buffer.
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
|
||||
u8 bridge_type, void *buff, u16 buff_size,
|
||||
u16 tlv_len, u16 *mib_len,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_lldp_add_tlv *cmd =
|
||||
(struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
|
||||
enum i40e_status_code status;
|
||||
|
||||
if (buff_size == 0 || !buff)
|
||||
return I40E_ERR_PARAM;
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
|
||||
|
||||
/* Indirect Command */
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
if (buff_size > I40E_AQ_LARGE_BUF)
|
||||
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
|
||||
desc.datalen = CPU_TO_LE16(buff_size);
|
||||
cmd->len = CPU_TO_LE16(tlv_len);
|
||||
cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
|
||||
I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
|
||||
if (!status) {
|
||||
if (mib_len != NULL)
|
||||
*mib_len = LE16_TO_CPU(desc.datalen);
|
||||
}
|
||||
if (setting)
|
||||
*setting = cmd->command & 1;
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -4398,11 +4357,13 @@ enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
|
||||
* i40e_aq_stop_lldp
|
||||
* @hw: pointer to the hw struct
|
||||
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
|
||||
* @persist: True if stop of LLDP should be persistent across power cycles
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Stop or Shutdown the embedded LLDP Agent
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
|
||||
bool persist,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
@ -4415,6 +4376,14 @@ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
|
||||
if (shutdown_agent)
|
||||
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
|
||||
|
||||
if (persist) {
|
||||
if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
|
||||
cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
|
||||
else
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"Persistent Stop LLDP not supported by current FW version.\n");
|
||||
}
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
return status;
|
||||
@ -4423,11 +4392,13 @@ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
|
||||
/**
|
||||
* i40e_aq_start_lldp
|
||||
* @hw: pointer to the hw struct
|
||||
* @persist: True if start of LLDP should be persistent across power cycles
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Start the embedded LLDP Agent on all ports.
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
|
||||
bool persist,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
@ -4438,6 +4409,15 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
|
||||
|
||||
cmd->command = I40E_AQ_LLDP_AGENT_START;
|
||||
|
||||
if (persist) {
|
||||
if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
|
||||
cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
|
||||
else
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"Persistent Start LLDP not supported by current FW version.\n");
|
||||
}
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
return status;
|
||||
@ -4459,9 +4439,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
|
||||
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
|
||||
enum i40e_status_code status;
|
||||
|
||||
if ((hw->mac.type != I40E_MAC_XL710) ||
|
||||
((hw->aq.api_maj_ver < 1) ||
|
||||
((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 6))))
|
||||
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
|
||||
return I40E_ERR_DEVICE_NOT_SUPPORTED;
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc,
|
||||
@ -4655,7 +4633,6 @@ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
|
||||
|
||||
cmd->seid = CPU_TO_LE16(seid);
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
return status;
|
||||
@ -4836,8 +4813,6 @@ enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
|
||||
cmd->num_unicast_etags = num_tags_in_buf;
|
||||
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
if (length > I40E_AQ_LARGE_BUF)
|
||||
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
|
||||
|
||||
@ -5634,10 +5609,10 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
|
||||
* to be shifted 1 byte over from the VxLAN VNI
|
||||
**/
|
||||
static void i40e_fix_up_geneve_vni(
|
||||
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
|
||||
struct i40e_aqc_cloud_filters_element_data *filters,
|
||||
u8 filter_count)
|
||||
{
|
||||
struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
|
||||
struct i40e_aqc_cloud_filters_element_data *f = filters;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < filter_count; i++) {
|
||||
@ -5662,13 +5637,13 @@ static void i40e_fix_up_geneve_vni(
|
||||
* @filter_count: number of filters contained in the buffer
|
||||
*
|
||||
* Set the cloud filters for a given VSI. The contents of the
|
||||
* i40e_aqc_add_remove_cloud_filters_element_data are filled
|
||||
* i40e_aqc_cloud_filters_element_data are filled
|
||||
* in by the caller of the function.
|
||||
*
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
|
||||
u16 seid,
|
||||
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
|
||||
struct i40e_aqc_cloud_filters_element_data *filters,
|
||||
u8 filter_count)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
@ -5694,21 +5669,78 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_remove_cloud_filters
|
||||
* i40e_aq_add_cloud_filters_bb
|
||||
* @hw: pointer to the hardware structure
|
||||
* @seid: VSI seid to add cloud filters from
|
||||
* @filters: Buffer which contains the filters in big buffer to be added
|
||||
* @filter_count: number of filters contained in the buffer
|
||||
*
|
||||
* Set the cloud filters for a given VSI. The contents of the
|
||||
* i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
|
||||
* the function.
|
||||
*
|
||||
**/
|
||||
enum i40e_status_code
|
||||
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
|
||||
struct i40e_aqc_cloud_filters_element_bb *filters,
|
||||
u8 filter_count)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_add_remove_cloud_filters *cmd =
|
||||
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
|
||||
enum i40e_status_code status;
|
||||
u16 buff_len;
|
||||
int i;
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc,
|
||||
i40e_aqc_opc_add_cloud_filters);
|
||||
|
||||
buff_len = filter_count * sizeof(*filters);
|
||||
desc.datalen = CPU_TO_LE16(buff_len);
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
cmd->num_filters = filter_count;
|
||||
cmd->seid = CPU_TO_LE16(seid);
|
||||
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
|
||||
|
||||
for (i = 0; i < filter_count; i++) {
|
||||
u16 tnl_type;
|
||||
u32 ti;
|
||||
|
||||
tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
|
||||
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
|
||||
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
|
||||
|
||||
/* Due to hardware eccentricities, the VNI for Geneve is shifted
|
||||
* one more byte further than normally used for Tenant ID in
|
||||
* other tunnel types.
|
||||
*/
|
||||
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
|
||||
ti = LE32_TO_CPU(filters[i].element.tenant_id);
|
||||
filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
|
||||
}
|
||||
}
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_rem_cloud_filters
|
||||
* @hw: pointer to the hardware structure
|
||||
* @seid: VSI seid to remove cloud filters from
|
||||
* @filters: Buffer which contains the filters to be removed
|
||||
* @filter_count: number of filters contained in the buffer
|
||||
*
|
||||
* Remove the cloud filters for a given VSI. The contents of the
|
||||
* i40e_aqc_add_remove_cloud_filters_element_data are filled
|
||||
* in by the caller of the function.
|
||||
* i40e_aqc_cloud_filters_element_data are filled in by the caller
|
||||
* of the function.
|
||||
*
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
|
||||
u16 seid,
|
||||
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
|
||||
u8 filter_count)
|
||||
enum i40e_status_code
|
||||
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
|
||||
struct i40e_aqc_cloud_filters_element_data *filters,
|
||||
u8 filter_count)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_add_remove_cloud_filters *cmd =
|
||||
@ -5732,6 +5764,115 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_rem_cloud_filters_bb
|
||||
* @hw: pointer to the hardware structure
|
||||
* @seid: VSI seid to remove cloud filters from
|
||||
* @filters: Buffer which contains the filters in big buffer to be removed
|
||||
* @filter_count: number of filters contained in the buffer
|
||||
*
|
||||
* Remove the big buffer cloud filters for a given VSI. The contents of the
|
||||
* i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
|
||||
* function.
|
||||
*
|
||||
**/
|
||||
enum i40e_status_code
|
||||
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
|
||||
struct i40e_aqc_cloud_filters_element_bb *filters,
|
||||
u8 filter_count)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_add_remove_cloud_filters *cmd =
|
||||
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
|
||||
enum i40e_status_code status;
|
||||
u16 buff_len;
|
||||
int i;
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc,
|
||||
i40e_aqc_opc_remove_cloud_filters);
|
||||
|
||||
buff_len = filter_count * sizeof(*filters);
|
||||
desc.datalen = CPU_TO_LE16(buff_len);
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
cmd->num_filters = filter_count;
|
||||
cmd->seid = CPU_TO_LE16(seid);
|
||||
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
|
||||
|
||||
for (i = 0; i < filter_count; i++) {
|
||||
u16 tnl_type;
|
||||
u32 ti;
|
||||
|
||||
tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
|
||||
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
|
||||
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
|
||||
|
||||
/* Due to hardware eccentricities, the VNI for Geneve is shifted
|
||||
* one more byte further than normally used for Tenant ID in
|
||||
* other tunnel types.
|
||||
*/
|
||||
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
|
||||
ti = LE32_TO_CPU(filters[i].element.tenant_id);
|
||||
filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
|
||||
}
|
||||
}
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_replace_cloud_filters - Replace cloud filter command
|
||||
* @hw: pointer to the hw struct
|
||||
* @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
|
||||
* @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
|
||||
*
|
||||
**/
|
||||
enum
|
||||
i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
|
||||
struct i40e_aqc_replace_cloud_filters_cmd *filters,
|
||||
struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_replace_cloud_filters_cmd *cmd =
|
||||
(struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
|
||||
enum i40e_status_code status = I40E_SUCCESS;
|
||||
int i = 0;
|
||||
|
||||
/* X722 doesn't support this command */
|
||||
if (hw->mac.type == I40E_MAC_X722)
|
||||
return I40E_ERR_DEVICE_NOT_SUPPORTED;
|
||||
|
||||
/* need FW version greater than 6.00 */
|
||||
if (hw->aq.fw_maj_ver < 6)
|
||||
return I40E_NOT_SUPPORTED;
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc,
|
||||
i40e_aqc_opc_replace_cloud_filters);
|
||||
|
||||
desc.datalen = CPU_TO_LE16(32);
|
||||
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
|
||||
cmd->old_filter_type = filters->old_filter_type;
|
||||
cmd->new_filter_type = filters->new_filter_type;
|
||||
cmd->valid_flags = filters->valid_flags;
|
||||
cmd->tr_bit = filters->tr_bit;
|
||||
cmd->tr_bit2 = filters->tr_bit2;
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, cmd_buf,
|
||||
sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL);
|
||||
|
||||
/* for get cloud filters command */
|
||||
for (i = 0; i < 32; i += 4) {
|
||||
cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
|
||||
cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
|
||||
cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
|
||||
cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* i40e_aq_alternate_write
|
||||
* @hw: pointer to the hardware structure
|
||||
@ -6554,8 +6695,8 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
|
||||
* @led_addr: LED register address
|
||||
* @reg_val: read register value
|
||||
**/
|
||||
static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
u32 *reg_val)
|
||||
enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
u32 *reg_val)
|
||||
{
|
||||
enum i40e_status_code status;
|
||||
u8 phy_addr = 0;
|
||||
@ -6564,7 +6705,7 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
|
||||
status = i40e_aq_get_phy_register(hw,
|
||||
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
|
||||
I40E_PHY_COM_REG_PAGE,
|
||||
I40E_PHY_COM_REG_PAGE, TRUE,
|
||||
I40E_PHY_LED_PROV_REG_1,
|
||||
reg_val, NULL);
|
||||
} else {
|
||||
@ -6583,8 +6724,8 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
* @led_addr: LED register address
|
||||
* @reg_val: register value to write
|
||||
**/
|
||||
static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
u32 reg_val)
|
||||
enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
u32 reg_val)
|
||||
{
|
||||
enum i40e_status_code status;
|
||||
u8 phy_addr = 0;
|
||||
@ -6592,7 +6733,7 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
|
||||
status = i40e_aq_set_phy_register(hw,
|
||||
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
|
||||
I40E_PHY_COM_REG_PAGE,
|
||||
I40E_PHY_COM_REG_PAGE, TRUE,
|
||||
I40E_PHY_LED_PROV_REG_1,
|
||||
reg_val, NULL);
|
||||
} else {
|
||||
@ -6626,7 +6767,7 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
|
||||
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
|
||||
status = i40e_aq_get_phy_register(hw,
|
||||
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
|
||||
I40E_PHY_COM_REG_PAGE,
|
||||
I40E_PHY_COM_REG_PAGE, TRUE,
|
||||
I40E_PHY_LED_PROV_REG_1,
|
||||
®_val_aq, NULL);
|
||||
if (status == I40E_SUCCESS)
|
||||
@ -6827,20 +6968,51 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_set_phy_register
|
||||
* i40e_mdio_if_number_selection - MDIO I/F number selection
|
||||
* @hw: pointer to the hw struct
|
||||
* @set_mdio: use MDIO I/F number specified by mdio_num
|
||||
* @mdio_num: MDIO I/F number
|
||||
* @cmd: pointer to PHY Register command structure
|
||||
**/
|
||||
static void
|
||||
i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num,
|
||||
struct i40e_aqc_phy_register_access *cmd)
|
||||
{
|
||||
if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
|
||||
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
|
||||
cmd->cmd_flags |=
|
||||
I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
|
||||
((mdio_num <<
|
||||
I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
|
||||
I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
|
||||
else
|
||||
i40e_debug(hw, I40E_DEBUG_PHY,
|
||||
"MDIO I/F number selection not supported by current FW version.\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_set_phy_register_ext
|
||||
* @hw: pointer to the hw struct
|
||||
* @phy_select: select which phy should be accessed
|
||||
* @dev_addr: PHY device address
|
||||
* @page_change: enable auto page change
|
||||
* @set_mdio: use MDIO I/F number specified by mdio_num
|
||||
* @mdio_num: MDIO I/F number
|
||||
* @reg_addr: PHY register address
|
||||
* @reg_val: new register value
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Write the external PHY register.
|
||||
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
|
||||
* may use simple wrapper i40e_aq_set_phy_register.
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
|
||||
u8 phy_select, u8 dev_addr,
|
||||
u32 reg_addr, u32 reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
enum i40e_status_code
|
||||
i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
|
||||
u8 phy_select, u8 dev_addr, bool page_change,
|
||||
bool set_mdio, u8 mdio_num,
|
||||
u32 reg_addr, u32 reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_phy_register_access *cmd =
|
||||
@ -6855,26 +7027,38 @@ enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
|
||||
cmd->reg_address = CPU_TO_LE32(reg_addr);
|
||||
cmd->reg_value = CPU_TO_LE32(reg_val);
|
||||
|
||||
if (!page_change)
|
||||
cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
|
||||
|
||||
i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_get_phy_register
|
||||
* i40e_aq_get_phy_register_ext
|
||||
* @hw: pointer to the hw struct
|
||||
* @phy_select: select which phy should be accessed
|
||||
* @dev_addr: PHY device address
|
||||
* @page_change: enable auto page change
|
||||
* @set_mdio: use MDIO I/F number specified by mdio_num
|
||||
* @mdio_num: MDIO I/F number
|
||||
* @reg_addr: PHY register address
|
||||
* @reg_val: read register value
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Read the external PHY register.
|
||||
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
|
||||
* may use simple wrapper i40e_aq_get_phy_register.
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
|
||||
u8 phy_select, u8 dev_addr,
|
||||
u32 reg_addr, u32 *reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
enum i40e_status_code
|
||||
i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
|
||||
u8 phy_select, u8 dev_addr, bool page_change,
|
||||
bool set_mdio, u8 mdio_num,
|
||||
u32 reg_addr, u32 *reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_phy_register_access *cmd =
|
||||
@ -6888,6 +7072,11 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
|
||||
cmd->dev_addres = dev_addr;
|
||||
cmd->reg_address = CPU_TO_LE32(reg_addr);
|
||||
|
||||
if (!page_change)
|
||||
cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
|
||||
|
||||
i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
if (!status)
|
||||
*reg_val = LE32_TO_CPU(cmd->reg_value);
|
||||
@ -6895,7 +7084,6 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* i40e_aq_send_msg_to_pf
|
||||
* @hw: pointer to the hardware structure
|
||||
|
@ -893,22 +893,41 @@ enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw)
|
||||
/**
|
||||
* i40e_init_dcb
|
||||
* @hw: pointer to the hw struct
|
||||
* @enable_mib_change: enable mib change event
|
||||
*
|
||||
* Update DCB configuration from the Firmware
|
||||
**/
|
||||
enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
|
||||
enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
|
||||
{
|
||||
enum i40e_status_code ret = I40E_SUCCESS;
|
||||
struct i40e_lldp_variables lldp_cfg;
|
||||
u8 adminstatus = 0;
|
||||
|
||||
if (!hw->func_caps.dcb)
|
||||
return ret;
|
||||
return I40E_NOT_SUPPORTED;
|
||||
|
||||
/* Read LLDP NVM area */
|
||||
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
|
||||
if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
|
||||
u8 offset = 0;
|
||||
|
||||
if (hw->mac.type == I40E_MAC_XL710)
|
||||
offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
|
||||
else if (hw->mac.type == I40E_MAC_X722)
|
||||
offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
|
||||
else
|
||||
return I40E_NOT_SUPPORTED;
|
||||
|
||||
ret = i40e_read_nvm_module_data(hw,
|
||||
I40E_SR_EMP_SR_SETTINGS_PTR,
|
||||
offset,
|
||||
I40E_LLDP_CURRENT_STATUS_OFFSET,
|
||||
I40E_LLDP_CURRENT_STATUS_SIZE,
|
||||
&lldp_cfg.adminstatus);
|
||||
} else {
|
||||
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
return I40E_ERR_NOT_READY;
|
||||
|
||||
/* Get the LLDP AdminStatus for the current port */
|
||||
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
|
||||
@ -917,7 +936,7 @@ enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
|
||||
/* LLDP agent disabled */
|
||||
if (!adminstatus) {
|
||||
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
|
||||
return ret;
|
||||
return I40E_ERR_NOT_READY;
|
||||
}
|
||||
|
||||
/* Get DCBX status */
|
||||
@ -926,30 +945,67 @@ enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
|
||||
return ret;
|
||||
|
||||
/* Check the DCBX Status */
|
||||
switch (hw->dcbx_status) {
|
||||
case I40E_DCBX_STATUS_DONE:
|
||||
case I40E_DCBX_STATUS_IN_PROGRESS:
|
||||
if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
|
||||
hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
|
||||
/* Get current DCBX configuration */
|
||||
ret = i40e_get_dcb_config(hw);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case I40E_DCBX_STATUS_DISABLED:
|
||||
return ret;
|
||||
case I40E_DCBX_STATUS_NOT_STARTED:
|
||||
case I40E_DCBX_STATUS_MULTIPLE_PEERS:
|
||||
default:
|
||||
break;
|
||||
} else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
|
||||
return I40E_ERR_NOT_READY;
|
||||
}
|
||||
|
||||
/* Configure the LLDP MIB change event */
|
||||
ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (enable_mib_change)
|
||||
ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_fw_lldp_status
|
||||
* @hw: pointer to the hw struct
|
||||
* @lldp_status: pointer to the status enum
|
||||
*
|
||||
* Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
|
||||
* Status of agent is reported via @lldp_status parameter.
|
||||
**/
|
||||
enum i40e_status_code
|
||||
i40e_get_fw_lldp_status(struct i40e_hw *hw,
|
||||
enum i40e_get_fw_lldp_status_resp *lldp_status)
|
||||
{
|
||||
enum i40e_status_code ret;
|
||||
struct i40e_virt_mem mem;
|
||||
u8 *lldpmib;
|
||||
|
||||
if (!lldp_status)
|
||||
return I40E_ERR_PARAM;
|
||||
|
||||
/* Allocate buffer for the LLDPDU */
|
||||
ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
lldpmib = (u8 *)mem.va;
|
||||
ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
|
||||
I40E_LLDPDU_SIZE, NULL, NULL, NULL);
|
||||
|
||||
if (ret == I40E_SUCCESS) {
|
||||
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
|
||||
} else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
|
||||
/* MIB is not available yet but the agent is running */
|
||||
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
|
||||
ret = I40E_SUCCESS;
|
||||
} else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
|
||||
*lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
|
||||
ret = I40E_SUCCESS;
|
||||
}
|
||||
|
||||
i40e_free_virt_mem(hw, &mem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
|
||||
* @tlv: Fill the ETS config data in IEEE format
|
||||
@ -1242,7 +1298,8 @@ enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw)
|
||||
|
||||
/**
|
||||
* i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
|
||||
* @hw: pointer to the hw struct
|
||||
* @lldpmib: pointer to mib to be output
|
||||
* @miblen: pointer to u16 for length of lldpmib
|
||||
* @dcbcfg: store for LLDPDU data
|
||||
*
|
||||
* send DCB configuration to FW
|
||||
|
@ -69,6 +69,11 @@
|
||||
#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
|
||||
#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
|
||||
|
||||
#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
|
||||
#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
|
||||
#define I40E_LLDP_CURRENT_STATUS_OFFSET 1
|
||||
#define I40E_LLDP_CURRENT_STATUS_SIZE 1
|
||||
|
||||
/* Defines for LLDP TLV header */
|
||||
#define I40E_LLDP_MIB_HLEN 14
|
||||
#define I40E_LLDP_TLV_LEN_SHIFT 0
|
||||
@ -208,6 +213,12 @@ struct i40e_dcbx_variables {
|
||||
u32 deftsaassignment;
|
||||
};
|
||||
|
||||
|
||||
enum i40e_get_fw_lldp_status_resp {
|
||||
I40E_GET_FW_LLDP_STATUS_DISABLED = 0,
|
||||
I40E_GET_FW_LLDP_STATUS_ENABLED = 1
|
||||
};
|
||||
|
||||
enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
|
||||
u16 *status);
|
||||
enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
|
||||
@ -216,9 +227,12 @@ enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
|
||||
u8 bridgetype,
|
||||
struct i40e_dcbx_config *dcbcfg);
|
||||
enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
|
||||
enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
|
||||
enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw,
|
||||
bool enable_mib_change);
|
||||
enum i40e_status_code
|
||||
i40e_get_fw_lldp_status(struct i40e_hw *hw,
|
||||
enum i40e_get_fw_lldp_status_resp *lldp_status);
|
||||
enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
|
||||
enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
|
||||
struct i40e_dcbx_config *dcbcfg);
|
||||
|
||||
#endif /* _I40E_DCB_H_ */
|
||||
|
@ -39,6 +39,8 @@
|
||||
#define I40E_INTEL_VENDOR_ID 0x8086
|
||||
|
||||
/* Device IDs */
|
||||
#define I40E_DEV_ID_X710_N3000 0x0CF8
|
||||
#define I40E_DEV_ID_XXV710_N3000 0x0D58
|
||||
#define I40E_DEV_ID_SFP_XL710 0x1572
|
||||
#define I40E_DEV_ID_QEMU 0x1574
|
||||
#define I40E_DEV_ID_KX_B 0x1580
|
||||
|
@ -144,7 +144,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
||||
DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
||||
txq_num, obj->max_cnt, ret_code);
|
||||
goto init_lan_hmc_out;
|
||||
goto free_hmc_out;
|
||||
}
|
||||
|
||||
/* aggregate values into the full LAN object for later */
|
||||
@ -167,7 +167,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
||||
DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
||||
rxq_num, obj->max_cnt, ret_code);
|
||||
goto init_lan_hmc_out;
|
||||
goto free_hmc_out;
|
||||
}
|
||||
|
||||
/* aggregate values into the full LAN object for later */
|
||||
@ -190,7 +190,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
||||
DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
||||
fcoe_cntx_num, obj->max_cnt, ret_code);
|
||||
goto init_lan_hmc_out;
|
||||
goto free_hmc_out;
|
||||
}
|
||||
|
||||
/* aggregate values into the full LAN object for later */
|
||||
@ -213,7 +213,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
||||
DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
||||
fcoe_filt_num, obj->max_cnt, ret_code);
|
||||
goto init_lan_hmc_out;
|
||||
goto free_hmc_out;
|
||||
}
|
||||
|
||||
/* aggregate values into the full LAN object for later */
|
||||
@ -234,7 +234,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
||||
(sizeof(struct i40e_hmc_sd_entry) *
|
||||
hw->hmc.sd_table.sd_cnt));
|
||||
if (ret_code)
|
||||
goto init_lan_hmc_out;
|
||||
goto free_hmc_out;
|
||||
hw->hmc.sd_table.sd_entry =
|
||||
(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
|
||||
}
|
||||
@ -242,6 +242,11 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
||||
full_obj->size = l2fpm_size;
|
||||
|
||||
init_lan_hmc_out:
|
||||
return ret_code;
|
||||
free_hmc_out:
|
||||
if (hw->hmc.hmc_obj_virt_mem.va)
|
||||
i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
@ -366,6 +366,77 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
|
||||
* @hw: Pointer to the HW structure
|
||||
* @module_ptr: Pointer to module in words with respect to NVM beginning
|
||||
* @module_offset: Offset in words from module start
|
||||
* @data_offset: Offset in words from reading data area start
|
||||
* @words_data_size: Words to read from NVM
|
||||
* @data_ptr: Pointer to memory location where resulting buffer will be stored
|
||||
**/
|
||||
enum i40e_status_code
|
||||
i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
|
||||
u16 data_offset, u16 words_data_size, u16 *data_ptr)
|
||||
{
|
||||
enum i40e_status_code status;
|
||||
u16 specific_ptr = 0;
|
||||
u16 ptr_value = 0;
|
||||
u16 offset = 0;
|
||||
|
||||
if (module_ptr != 0) {
|
||||
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
|
||||
if (status != I40E_SUCCESS) {
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"Reading nvm word failed.Error code: %d.\n",
|
||||
status);
|
||||
return I40E_ERR_NVM;
|
||||
}
|
||||
}
|
||||
#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
|
||||
#define I40E_NVM_INVALID_VAL 0xFFFF
|
||||
|
||||
/* Pointer not initialized */
|
||||
if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
|
||||
ptr_value == I40E_NVM_INVALID_VAL) {
|
||||
i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
|
||||
return I40E_ERR_BAD_PTR;
|
||||
}
|
||||
|
||||
/* Check whether the module is in SR mapped area or outside */
|
||||
if (ptr_value & I40E_PTR_TYPE) {
|
||||
/* Pointer points outside of the Shared RAM mapped area */
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
|
||||
|
||||
return I40E_ERR_PARAM;
|
||||
} else {
|
||||
/* Read from the Shadow RAM */
|
||||
|
||||
status = i40e_read_nvm_word(hw, ptr_value + module_offset,
|
||||
&specific_ptr);
|
||||
if (status != I40E_SUCCESS) {
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"Reading nvm word failed.Error code: %d.\n",
|
||||
status);
|
||||
return I40E_ERR_NVM;
|
||||
}
|
||||
|
||||
offset = ptr_value + module_offset + specific_ptr +
|
||||
data_offset;
|
||||
|
||||
status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
|
||||
data_ptr);
|
||||
if (status != I40E_SUCCESS) {
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"Reading nvm buffer failed.Error code: %d.\n",
|
||||
status);
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
|
||||
* @hw: pointer to the HW structure
|
||||
@ -504,10 +575,10 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
|
||||
} else {
|
||||
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
|
||||
}
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* i40e_write_nvm_aq - Writes Shadow RAM.
|
||||
* @hw: pointer to the HW structure.
|
||||
@ -826,6 +897,7 @@ static const char *i40e_nvm_update_state_str[] = {
|
||||
"I40E_NVMUPD_EXEC_AQ",
|
||||
"I40E_NVMUPD_GET_AQ_RESULT",
|
||||
"I40E_NVMUPD_GET_AQ_EVENT",
|
||||
"I40E_NVMUPD_GET_FEATURES",
|
||||
};
|
||||
|
||||
/**
|
||||
@ -888,6 +960,31 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
|
||||
return I40E_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* A supported features request returns immediately
|
||||
* rather than going into state machine
|
||||
*/
|
||||
if (upd_cmd == I40E_NVMUPD_FEATURES) {
|
||||
if (cmd->data_size < hw->nvmupd_features.size) {
|
||||
*perrno = -EFAULT;
|
||||
return I40E_ERR_BUF_TOO_SHORT;
|
||||
}
|
||||
|
||||
/*
|
||||
* If buffer is bigger than i40e_nvmupd_features structure,
|
||||
* make sure the trailing bytes are set to 0x0.
|
||||
*/
|
||||
if (cmd->data_size > hw->nvmupd_features.size)
|
||||
i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
|
||||
cmd->data_size - hw->nvmupd_features.size,
|
||||
I40E_NONDMA_MEM);
|
||||
|
||||
i40e_memcpy(bytes, &hw->nvmupd_features,
|
||||
hw->nvmupd_features.size, I40E_NONDMA_MEM);
|
||||
|
||||
return I40E_SUCCESS;
|
||||
}
|
||||
|
||||
/* Clear status even it is not read and log */
|
||||
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
|
||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||
@ -1354,10 +1451,20 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
|
||||
upd_cmd = I40E_NVMUPD_READ_SA;
|
||||
break;
|
||||
case I40E_NVM_EXEC:
|
||||
if (module == 0xf)
|
||||
upd_cmd = I40E_NVMUPD_STATUS;
|
||||
else if (module == 0)
|
||||
switch (module) {
|
||||
case I40E_NVM_EXEC_GET_AQ_RESULT:
|
||||
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
|
||||
break;
|
||||
case I40E_NVM_EXEC_FEATURES:
|
||||
upd_cmd = I40E_NVMUPD_FEATURES;
|
||||
break;
|
||||
case I40E_NVM_EXEC_STATUS:
|
||||
upd_cmd = I40E_NVMUPD_STATUS;
|
||||
break;
|
||||
default:
|
||||
*perrno = -EFAULT;
|
||||
return I40E_NVMUPD_INVALID;
|
||||
}
|
||||
break;
|
||||
case I40E_NVM_AQE:
|
||||
upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
|
||||
|
@ -33,6 +33,7 @@
|
||||
/*$FreeBSD$*/
|
||||
|
||||
#include <sys/limits.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "ixl.h"
|
||||
|
||||
@ -45,14 +46,13 @@ i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
|
||||
if (error)
|
||||
return;
|
||||
*(bus_addr_t *) arg = segs->ds_addr;
|
||||
return;
|
||||
}
|
||||
|
||||
i40e_status
|
||||
i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size)
|
||||
{
|
||||
mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
return(mem->va == NULL);
|
||||
return (mem->va == NULL);
|
||||
}
|
||||
|
||||
i40e_status
|
||||
@ -61,7 +61,7 @@ i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
|
||||
free(mem->va, M_DEVBUF);
|
||||
mem->va = NULL;
|
||||
|
||||
return(0);
|
||||
return (I40E_SUCCESS);
|
||||
}
|
||||
|
||||
i40e_status
|
||||
@ -113,7 +113,7 @@ i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
|
||||
mem->size = size;
|
||||
bus_dmamap_sync(mem->tag, mem->map,
|
||||
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
|
||||
return (0);
|
||||
return (I40E_SUCCESS);
|
||||
fail_2:
|
||||
bus_dmamem_free(mem->tag, mem->va, mem->map);
|
||||
fail_1:
|
||||
@ -161,25 +161,15 @@ i40e_destroy_spinlock(struct i40e_spinlock *lock)
|
||||
mtx_destroy(&lock->mutex);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ixl_ms_scale(int x)
|
||||
{
|
||||
if (hz == 1000)
|
||||
return (x);
|
||||
else if (hz > 1000)
|
||||
return (x*(hz/1000));
|
||||
else
|
||||
return (max(1, x/(1000/hz)));
|
||||
}
|
||||
#ifndef MSEC_2_TICKS
|
||||
#define MSEC_2_TICKS(m) max(1, (uint32_t)((hz == 1000) ? \
|
||||
(m) : ((uint64_t)(m) * (uint64_t)hz)/(uint64_t)1000))
|
||||
#endif
|
||||
|
||||
void
|
||||
i40e_msec_pause(int msecs)
|
||||
{
|
||||
if (cold || SCHEDULER_STOPPED())
|
||||
i40e_msec_delay(msecs);
|
||||
else
|
||||
// ERJ: (msecs * hz) could overflow
|
||||
pause("ixl", ixl_ms_scale(msecs));
|
||||
pause("i40e_msec_pause", MSEC_2_TICKS(msecs));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -100,7 +100,10 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
|
||||
u16 *val);
|
||||
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
|
||||
u32 time, u32 interval);
|
||||
|
||||
enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
u32 *reg_val);
|
||||
enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
|
||||
u32 reg_val);
|
||||
/* admin send queue commands */
|
||||
|
||||
enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
|
||||
@ -133,6 +136,7 @@ enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
|
||||
u16 max_frame_size, bool crc_en, u16 pacing,
|
||||
bool auto_drop_blocking_packets,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
|
||||
u64 *advt_reg,
|
||||
@ -276,26 +280,18 @@ enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
|
||||
enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
|
||||
bool enable_update,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
|
||||
void *buff, u16 buff_size, u16 tlv_len,
|
||||
u16 *mib_len,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
|
||||
u8 bridge_type, void *buff, u16 buff_size,
|
||||
u16 old_len, u16 new_len, u16 offset,
|
||||
u16 *mib_len,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
|
||||
u8 bridge_type, void *buff, u16 buff_size,
|
||||
u16 tlv_len, u16 *mib_len,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code
|
||||
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
|
||||
bool persist,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
|
||||
bool dcb_enable,
|
||||
struct i40e_asq_cmd_details
|
||||
*cmd_details);
|
||||
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
|
||||
bool persist,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
|
||||
void *buff, u16 buff_size,
|
||||
@ -394,17 +390,27 @@ enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code
|
||||
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
|
||||
struct i40e_aqc_cloud_filters_element_bb *filters,
|
||||
u8 filter_count);
|
||||
enum i40e_status_code
|
||||
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
|
||||
struct i40e_aqc_cloud_filters_element_data *filters,
|
||||
u8 filter_count);
|
||||
enum i40e_status_code
|
||||
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
|
||||
struct i40e_aqc_cloud_filters_element_data *filters,
|
||||
u8 filter_count);
|
||||
enum i40e_status_code
|
||||
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
|
||||
struct i40e_aqc_cloud_filters_element_bb *filters,
|
||||
u8 filter_count);
|
||||
enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
|
||||
struct i40e_lldp_variables *lldp_cfg);
|
||||
enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
|
||||
u16 vsi,
|
||||
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
|
||||
u8 filter_count);
|
||||
|
||||
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
|
||||
u16 vsi,
|
||||
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
|
||||
u8 filter_count);
|
||||
enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
|
||||
struct i40e_aqc_replace_cloud_filters_cmd *filters,
|
||||
struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
|
||||
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
|
||||
u32 reg_addr0, u32 *reg_val0,
|
||||
u32 reg_addr1, u32 *reg_val1);
|
||||
@ -446,6 +452,9 @@ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
|
||||
void i40e_release_nvm(struct i40e_hw *hw);
|
||||
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
|
||||
u16 *data);
|
||||
enum i40e_status_code
|
||||
i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
|
||||
u16 data_offset, u16 words_data_size, u16 *data_ptr);
|
||||
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
|
||||
u16 *words, u16 *data);
|
||||
enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
|
||||
@ -548,14 +557,24 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
|
||||
u32 reg_addr, u32 reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
|
||||
enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
|
||||
u8 phy_select, u8 dev_addr,
|
||||
u32 reg_addr, u32 reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
|
||||
u8 phy_select, u8 dev_addr,
|
||||
u32 reg_addr, u32 *reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code
|
||||
i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
|
||||
u8 phy_select, u8 dev_addr, bool page_change,
|
||||
bool set_mdio, u8 mdio_num,
|
||||
u32 reg_addr, u32 reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code
|
||||
i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
|
||||
u8 phy_select, u8 dev_addr, bool page_change,
|
||||
bool set_mdio, u8 mdio_num,
|
||||
u32 reg_addr, u32 *reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
|
||||
/* Convenience wrappers for most common use case */
|
||||
#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \
|
||||
i40e_aq_set_phy_register_ext(hw, ps, da, pc, FALSE, 0, ra, rv, cd)
|
||||
#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
|
||||
i40e_aq_get_phy_register_ext(hw, ps, da, pc, FALSE, 0, ra, rv, cd)
|
||||
|
||||
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
|
||||
struct i40e_aqc_arp_proxy_data *proxy_config,
|
||||
@ -587,4 +606,6 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
|
||||
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
|
||||
u8 page, u16 reg, u8 phy_addr, u16 value);
|
||||
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
|
||||
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
|
||||
u32 time, u32 interval);
|
||||
#endif /* _I40E_PROTOTYPE_H_ */
|
||||
|
@ -90,7 +90,7 @@
|
||||
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
|
||||
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
|
||||
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
|
||||
#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
|
||||
#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
|
||||
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
|
||||
#define I40E_PF_ARQT_ARQT_SHIFT 0
|
||||
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
|
||||
@ -113,7 +113,7 @@
|
||||
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
|
||||
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
|
||||
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
|
||||
#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
|
||||
#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
|
||||
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
|
||||
#define I40E_PF_ATQT_ATQT_SHIFT 0
|
||||
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
|
||||
@ -140,7 +140,7 @@
|
||||
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
|
||||
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
|
||||
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
|
||||
#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
|
||||
#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
|
||||
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
|
||||
#define I40E_VF_ARQT_MAX_INDEX 127
|
||||
#define I40E_VF_ARQT_ARQT_SHIFT 0
|
||||
@ -168,7 +168,7 @@
|
||||
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
|
||||
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
|
||||
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
|
||||
#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
|
||||
#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
|
||||
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
|
||||
#define I40E_VF_ATQT_MAX_INDEX 127
|
||||
#define I40E_VF_ATQT_ATQT_SHIFT 0
|
||||
@ -291,7 +291,7 @@
|
||||
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
|
||||
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
|
||||
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
|
||||
#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
|
||||
#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
|
||||
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
|
||||
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
|
||||
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
|
||||
@ -395,6 +395,20 @@
|
||||
#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
|
||||
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
|
||||
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK \
|
||||
I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK \
|
||||
I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK \
|
||||
I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK \
|
||||
I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK \
|
||||
I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK \
|
||||
I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
|
||||
#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
|
||||
#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
|
||||
#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
|
||||
@ -535,7 +549,7 @@
|
||||
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
|
||||
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
|
||||
#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
|
||||
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
|
||||
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
|
||||
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
|
||||
@ -1274,14 +1288,14 @@
|
||||
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
|
||||
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
|
||||
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
|
||||
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
|
||||
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
|
||||
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
|
||||
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
|
||||
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
|
||||
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
|
||||
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
|
||||
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
|
||||
#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
|
||||
#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
|
||||
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
|
||||
#define I40E_QRX_ENA_MAX_INDEX 1535
|
||||
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
|
||||
@ -1690,7 +1704,7 @@
|
||||
#define I40E_GLNVM_SRCTL_START_SHIFT 30
|
||||
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
|
||||
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
|
||||
#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
|
||||
#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
|
||||
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
|
||||
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
|
||||
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
|
||||
@ -3057,7 +3071,7 @@
|
||||
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
|
||||
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
|
||||
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
|
||||
#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
|
||||
#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
|
||||
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
|
||||
#define I40E_VP_MDET_RX_MAX_INDEX 127
|
||||
#define I40E_VP_MDET_RX_VALID_SHIFT 0
|
||||
@ -3193,7 +3207,7 @@
|
||||
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
|
||||
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
|
||||
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
|
||||
#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
|
||||
#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
|
||||
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
|
||||
#define I40E_VF_ARQT1_ARQT_SHIFT 0
|
||||
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
|
||||
@ -3216,7 +3230,7 @@
|
||||
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
|
||||
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
|
||||
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
|
||||
#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
|
||||
#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
|
||||
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
|
||||
#define I40E_VF_ATQT1_ATQT_SHIFT 0
|
||||
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
|
||||
|
@ -56,7 +56,7 @@
|
||||
#define I40E_MAX_PF_VSI 64
|
||||
#define I40E_MAX_PF_QP 128
|
||||
#define I40E_MAX_VSI_QP 16
|
||||
#define I40E_MAX_VF_VSI 3
|
||||
#define I40E_MAX_VF_VSI 4
|
||||
#define I40E_MAX_CHAINED_RX_BUFFERS 5
|
||||
#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
|
||||
|
||||
@ -95,8 +95,8 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
|
||||
#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
|
||||
#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
|
||||
|
||||
/* Number of Transmit Descriptors must be a multiple of 8. */
|
||||
#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
|
||||
/* Number of Transmit Descriptors must be a multiple of 32. */
|
||||
#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 32
|
||||
/* Number of Receive Descriptors must be a multiple of 32 if
|
||||
* the number of descriptors is greater than 32.
|
||||
*/
|
||||
@ -126,6 +126,8 @@ enum i40e_debug_mask {
|
||||
I40E_DEBUG_DIAG = 0x00000800,
|
||||
I40E_DEBUG_FD = 0x00001000,
|
||||
|
||||
I40E_DEBUG_IWARP = 0x00F00000,
|
||||
|
||||
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
|
||||
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
|
||||
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
|
||||
@ -188,7 +190,6 @@ enum i40e_memcpy_type {
|
||||
I40E_DMA_TO_NONDMA
|
||||
};
|
||||
|
||||
|
||||
/* These are structs for managing the hardware information and the operations.
|
||||
* The structures of function pointers are filled out at init time when we
|
||||
* know for sure exactly which hardware we're working with. This gives us the
|
||||
@ -242,6 +243,7 @@ enum i40e_vsi_type {
|
||||
I40E_VSI_MIRROR = 5,
|
||||
I40E_VSI_SRIOV = 6,
|
||||
I40E_VSI_FDIR = 7,
|
||||
I40E_VSI_IWARP = 8,
|
||||
I40E_VSI_TYPE_UNKNOWN
|
||||
};
|
||||
|
||||
@ -373,6 +375,7 @@ struct i40e_hw_capabilities {
|
||||
#define I40E_CLOUD_FILTER_MODE1 0x6
|
||||
#define I40E_CLOUD_FILTER_MODE2 0x7
|
||||
#define I40E_CLOUD_FILTER_MODE3 0x8
|
||||
#define I40E_SWITCH_MODE_MASK 0xF
|
||||
|
||||
u32 management_mode;
|
||||
u32 mng_protocols_over_mctp;
|
||||
@ -487,6 +490,7 @@ enum i40e_nvmupd_cmd {
|
||||
I40E_NVMUPD_EXEC_AQ,
|
||||
I40E_NVMUPD_GET_AQ_RESULT,
|
||||
I40E_NVMUPD_GET_AQ_EVENT,
|
||||
I40E_NVMUPD_FEATURES,
|
||||
};
|
||||
|
||||
enum i40e_nvmupd_state {
|
||||
@ -522,6 +526,10 @@ enum i40e_nvmupd_state {
|
||||
#define I40E_NVM_AQE 0xe
|
||||
#define I40E_NVM_EXEC 0xf
|
||||
|
||||
#define I40E_NVM_EXEC_GET_AQ_RESULT 0x0
|
||||
#define I40E_NVM_EXEC_FEATURES 0xe
|
||||
#define I40E_NVM_EXEC_STATUS 0xf
|
||||
|
||||
#define I40E_NVM_ADAPT_SHIFT 16
|
||||
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
|
||||
|
||||
@ -536,6 +544,20 @@ struct i40e_nvm_access {
|
||||
u8 data[1];
|
||||
};
|
||||
|
||||
/* NVMUpdate features API */
|
||||
#define I40E_NVMUPD_FEATURES_API_VER_MAJOR 0
|
||||
#define I40E_NVMUPD_FEATURES_API_VER_MINOR 14
|
||||
#define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12
|
||||
|
||||
#define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0)
|
||||
|
||||
struct i40e_nvmupd_features {
|
||||
u8 major;
|
||||
u8 minor;
|
||||
u16 size;
|
||||
u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
|
||||
};
|
||||
|
||||
/* (Q)SFP module access definitions */
|
||||
#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
|
||||
#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
|
||||
@ -727,6 +749,11 @@ struct i40e_hw {
|
||||
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
|
||||
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
|
||||
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
|
||||
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
|
||||
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
|
||||
#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
|
||||
#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
|
||||
#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
|
||||
u64 flags;
|
||||
|
||||
/* Used in set switch config AQ command */
|
||||
@ -734,6 +761,9 @@ struct i40e_hw {
|
||||
u16 first_tag;
|
||||
u16 second_tag;
|
||||
|
||||
/* NVMUpdate features */
|
||||
struct i40e_nvmupd_features nvmupd_features;
|
||||
|
||||
/* debug mask */
|
||||
u32 debug_mask;
|
||||
char err_str[16];
|
||||
|
@ -2124,7 +2124,7 @@ iavf_add_device_sysctls(struct iavf_sc *sc)
|
||||
|
||||
/* Add stats sysctls */
|
||||
ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
|
||||
ixl_add_queues_sysctls(dev, vsi);
|
||||
ixl_vsi_add_queues_stats(vsi, ctx);
|
||||
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@
|
||||
* Driver version
|
||||
*********************************************************************/
|
||||
#define IXL_DRIVER_VERSION_MAJOR 2
|
||||
#define IXL_DRIVER_VERSION_MINOR 1
|
||||
#define IXL_DRIVER_VERSION_MINOR 2
|
||||
#define IXL_DRIVER_VERSION_BUILD 0
|
||||
|
||||
#define IXL_DRIVER_VERSION_STRING \
|
||||
@ -126,6 +126,8 @@ static void ixl_if_vflr_handle(if_ctx_t ctx);
|
||||
static u_int ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int);
|
||||
static void ixl_save_pf_tunables(struct ixl_pf *);
|
||||
static int ixl_allocate_pci_resources(struct ixl_pf *);
|
||||
static void ixl_setup_ssctx(struct ixl_pf *pf);
|
||||
static void ixl_admin_timer(void *arg);
|
||||
|
||||
/*********************************************************************
|
||||
* FreeBSD Device Interface Entry Points
|
||||
@ -211,6 +213,7 @@ static driver_t ixl_if_driver = {
|
||||
static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
|
||||
"ixl driver parameters");
|
||||
|
||||
#ifdef IXL_DEBUG_FC
|
||||
/*
|
||||
* Leave this on unless you need to send flow control
|
||||
* frames (or other control frames) from software
|
||||
@ -221,6 +224,16 @@ TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
|
||||
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
|
||||
&ixl_enable_tx_fc_filter, 0,
|
||||
"Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
|
||||
#endif
|
||||
|
||||
#ifdef IXL_DEBUG
|
||||
static int ixl_debug_recovery_mode = 0;
|
||||
TUNABLE_INT("hw.ixl.debug_recovery_mode",
|
||||
&ixl_debug_recovery_mode);
|
||||
SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
|
||||
&ixl_debug_recovery_mode, 0,
|
||||
"Act like when FW entered recovery mode (for debuging)");
|
||||
#endif
|
||||
|
||||
static int ixl_i2c_access_method = 0;
|
||||
TUNABLE_INT("hw.ixl.i2c_access_method",
|
||||
@ -355,7 +368,7 @@ ixl_allocate_pci_resources(struct ixl_pf *pf)
|
||||
rid = PCIR_BAR(0);
|
||||
pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
|
||||
&rid, RF_ACTIVE);
|
||||
|
||||
|
||||
if (!(pf->pci_mem)) {
|
||||
device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
|
||||
return (ENXIO);
|
||||
@ -384,9 +397,79 @@ ixl_allocate_pci_resources(struct ixl_pf *pf)
|
||||
|
||||
pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
|
||||
pf->hw.back = &pf->osdep;
|
||||
|
||||
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ixl_setup_ssctx(struct ixl_pf *pf)
|
||||
{
|
||||
if_softc_ctx_t scctx = pf->vsi.shared;
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
|
||||
if (IXL_PF_IN_RECOVERY_MODE(pf)) {
|
||||
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
|
||||
scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
|
||||
} else if (hw->mac.type == I40E_MAC_X722)
|
||||
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
|
||||
else
|
||||
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
|
||||
|
||||
if (pf->vsi.enable_head_writeback) {
|
||||
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
|
||||
* sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
|
||||
scctx->isc_txrx = &ixl_txrx_hwb;
|
||||
} else {
|
||||
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
|
||||
* sizeof(struct i40e_tx_desc), DBA_ALIGN);
|
||||
scctx->isc_txrx = &ixl_txrx_dwb;
|
||||
}
|
||||
|
||||
scctx->isc_txrx->ift_legacy_intr = ixl_intr;
|
||||
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
|
||||
* sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
|
||||
scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
|
||||
scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
|
||||
scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
|
||||
scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
|
||||
scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
|
||||
scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
|
||||
scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
|
||||
scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
|
||||
}
|
||||
|
||||
static void
|
||||
ixl_admin_timer(void *arg)
|
||||
{
|
||||
struct ixl_pf *pf = (struct ixl_pf *)arg;
|
||||
|
||||
/* Fire off the admin task */
|
||||
iflib_admin_intr_deferred(pf->vsi.ctx);
|
||||
|
||||
/* Reschedule the admin timer */
|
||||
callout_schedule(&pf->admin_timer, hz/2);
|
||||
}
|
||||
|
||||
static int
|
||||
ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
|
||||
{
|
||||
struct ixl_vsi *vsi = &pf->vsi;
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
device_t dev = pf->dev;
|
||||
|
||||
device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
|
||||
|
||||
i40e_get_mac_addr(hw, hw->mac.addr);
|
||||
|
||||
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
|
||||
ixl_configure_intr0_msix(pf);
|
||||
ixl_enable_intr0(hw);
|
||||
}
|
||||
|
||||
ixl_setup_ssctx(pf);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
ixl_if_attach_pre(if_ctx_t ctx)
|
||||
@ -395,7 +478,7 @@ ixl_if_attach_pre(if_ctx_t ctx)
|
||||
struct ixl_pf *pf;
|
||||
struct i40e_hw *hw;
|
||||
struct ixl_vsi *vsi;
|
||||
if_softc_ctx_t scctx;
|
||||
enum i40e_get_fw_lldp_status_resp lldp_status;
|
||||
struct i40e_filter_control_settings filter;
|
||||
enum i40e_status_code status;
|
||||
int error = 0;
|
||||
@ -416,7 +499,12 @@ ixl_if_attach_pre(if_ctx_t ctx)
|
||||
vsi->num_vlans = 0;
|
||||
vsi->ctx = ctx;
|
||||
vsi->media = iflib_get_media(ctx);
|
||||
vsi->shared = scctx = iflib_get_softc_ctx(ctx);
|
||||
vsi->shared = iflib_get_softc_ctx(ctx);
|
||||
|
||||
snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
|
||||
"%s:admin", device_get_nameunit(dev));
|
||||
mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
|
||||
callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
|
||||
|
||||
/* Save tunable values */
|
||||
ixl_save_pf_tunables(pf);
|
||||
@ -430,13 +518,11 @@ ixl_if_attach_pre(if_ctx_t ctx)
|
||||
|
||||
/* Establish a clean starting point */
|
||||
i40e_clear_hw(hw);
|
||||
status = i40e_pf_reset(hw);
|
||||
if (status) {
|
||||
device_printf(dev, "PF reset failure %s\n",
|
||||
i40e_stat_str(hw, status));
|
||||
error = EIO;
|
||||
i40e_set_mac_type(hw);
|
||||
|
||||
error = ixl_pf_reset(pf);
|
||||
if (error)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Initialize the shared code */
|
||||
status = i40e_init_shared_code(hw);
|
||||
@ -483,6 +569,13 @@ ixl_if_attach_pre(if_ctx_t ctx)
|
||||
device_printf(dev, "Please update the NVM image.\n");
|
||||
}
|
||||
|
||||
if (IXL_PF_IN_RECOVERY_MODE(pf)) {
|
||||
error = ixl_attach_pre_recovery_mode(pf);
|
||||
if (error)
|
||||
goto err_out;
|
||||
return (error);
|
||||
}
|
||||
|
||||
/* Clear PXE mode */
|
||||
i40e_clear_pxe_mode(hw);
|
||||
|
||||
@ -495,24 +588,14 @@ ixl_if_attach_pre(if_ctx_t ctx)
|
||||
}
|
||||
|
||||
/* Set up host memory cache */
|
||||
status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
|
||||
hw->func_caps.num_rx_qp, 0, 0);
|
||||
if (status) {
|
||||
device_printf(dev, "init_lan_hmc failed: %s\n",
|
||||
i40e_stat_str(hw, status));
|
||||
goto err_get_cap;
|
||||
}
|
||||
status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
|
||||
if (status) {
|
||||
device_printf(dev, "configure_lan_hmc failed: %s\n",
|
||||
i40e_stat_str(hw, status));
|
||||
error = ixl_setup_hmc(pf);
|
||||
if (error)
|
||||
goto err_mac_hmc;
|
||||
}
|
||||
|
||||
/* Disable LLDP from the firmware for certain NVM versions */
|
||||
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
|
||||
(pf->hw.aq.fw_maj_ver < 4)) {
|
||||
i40e_aq_stop_lldp(hw, TRUE, NULL);
|
||||
i40e_aq_stop_lldp(hw, true, false, NULL);
|
||||
pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
|
||||
}
|
||||
|
||||
@ -537,46 +620,36 @@ ixl_if_attach_pre(if_ctx_t ctx)
|
||||
device_printf(dev, "i40e_set_filter_control() failed\n");
|
||||
|
||||
/* Query device FW LLDP status */
|
||||
ixl_get_fw_lldp_status(pf);
|
||||
if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
|
||||
if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
|
||||
atomic_set_32(&pf->state,
|
||||
IXL_PF_STATE_FW_LLDP_DISABLED);
|
||||
} else {
|
||||
atomic_clear_32(&pf->state,
|
||||
IXL_PF_STATE_FW_LLDP_DISABLED);
|
||||
}
|
||||
}
|
||||
|
||||
/* Tell FW to apply DCB config on link up */
|
||||
i40e_aq_set_dcb_parameters(hw, true, NULL);
|
||||
|
||||
/* Fill out iflib parameters */
|
||||
if (hw->mac.type == I40E_MAC_X722)
|
||||
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
|
||||
else
|
||||
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
|
||||
if (vsi->enable_head_writeback) {
|
||||
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
|
||||
* sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
|
||||
scctx->isc_txrx = &ixl_txrx_hwb;
|
||||
} else {
|
||||
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
|
||||
* sizeof(struct i40e_tx_desc), DBA_ALIGN);
|
||||
scctx->isc_txrx = &ixl_txrx_dwb;
|
||||
}
|
||||
scctx->isc_txrx->ift_legacy_intr = ixl_intr;
|
||||
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
|
||||
* sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
|
||||
scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
|
||||
scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
|
||||
scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
|
||||
scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
|
||||
scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
|
||||
scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
|
||||
scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
|
||||
scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
|
||||
ixl_setup_ssctx(pf);
|
||||
|
||||
INIT_DBG_DEV(dev, "end");
|
||||
return (0);
|
||||
|
||||
err_mac_hmc:
|
||||
i40e_shutdown_lan_hmc(hw);
|
||||
ixl_shutdown_hmc(pf);
|
||||
err_get_cap:
|
||||
i40e_shutdown_adminq(hw);
|
||||
err_out:
|
||||
ixl_free_pci_resources(pf);
|
||||
err_pci_res:
|
||||
mtx_lock(&pf->admin_mtx);
|
||||
callout_stop(&pf->admin_timer);
|
||||
mtx_unlock(&pf->admin_mtx);
|
||||
mtx_destroy(&pf->admin_mtx);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -610,6 +683,22 @@ ixl_if_attach_post(if_ctx_t ctx)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (IXL_PF_IN_RECOVERY_MODE(pf)) {
|
||||
/* Keep admin queue interrupts active while driver is loaded */
|
||||
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
|
||||
ixl_configure_intr0_msix(pf);
|
||||
ixl_enable_intr0(hw);
|
||||
}
|
||||
|
||||
ixl_add_sysctls_recovery_mode(pf);
|
||||
|
||||
/* Start the admin timer */
|
||||
mtx_lock(&pf->admin_mtx);
|
||||
callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
|
||||
mtx_unlock(&pf->admin_mtx);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Determine link state */
|
||||
if (ixl_attach_get_link_status(pf)) {
|
||||
error = EINVAL;
|
||||
@ -700,6 +789,10 @@ ixl_if_attach_post(if_ctx_t ctx)
|
||||
device_printf(dev, "The device is not iWARP enabled\n");
|
||||
}
|
||||
#endif
|
||||
/* Start the admin timer */
|
||||
mtx_lock(&pf->admin_mtx);
|
||||
callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
|
||||
mtx_unlock(&pf->admin_mtx);
|
||||
|
||||
INIT_DBG_DEV(dev, "end");
|
||||
return (0);
|
||||
@ -728,6 +821,12 @@ ixl_if_detach(if_ctx_t ctx)
|
||||
|
||||
INIT_DBG_DEV(dev, "begin");
|
||||
|
||||
/* Stop the admin timer */
|
||||
mtx_lock(&pf->admin_mtx);
|
||||
callout_stop(&pf->admin_timer);
|
||||
mtx_unlock(&pf->admin_mtx);
|
||||
mtx_destroy(&pf->admin_mtx);
|
||||
|
||||
#ifdef IXL_IW
|
||||
if (ixl_enable_iwarp && pf->iw_enabled) {
|
||||
error = ixl_iw_pf_detach(pf);
|
||||
@ -741,13 +840,7 @@ ixl_if_detach(if_ctx_t ctx)
|
||||
ifmedia_removeall(vsi->media);
|
||||
|
||||
/* Shutdown LAN HMC */
|
||||
if (hw->hmc.hmc_obj) {
|
||||
status = i40e_shutdown_lan_hmc(hw);
|
||||
if (status)
|
||||
device_printf(dev,
|
||||
"i40e_shutdown_lan_hmc() failed with status %s\n",
|
||||
i40e_stat_str(hw, status));
|
||||
}
|
||||
ixl_shutdown_hmc(pf);
|
||||
|
||||
/* Shutdown admin queue */
|
||||
ixl_disable_intr0(hw);
|
||||
@ -819,6 +912,8 @@ ixl_if_init(if_ctx_t ctx)
|
||||
u8 tmpaddr[ETHER_ADDR_LEN];
|
||||
int ret;
|
||||
|
||||
if (IXL_PF_IN_RECOVERY_MODE(pf))
|
||||
return;
|
||||
/*
|
||||
* If the aq is dead here, it probably means something outside of the driver
|
||||
* did something to the adapter, like a PF reset.
|
||||
@ -827,7 +922,7 @@ ixl_if_init(if_ctx_t ctx)
|
||||
if (!i40e_check_asq_alive(&pf->hw)) {
|
||||
device_printf(dev, "Admin Queue is down; resetting...\n");
|
||||
ixl_teardown_hw_structs(pf);
|
||||
ixl_rebuild_hw_structs_after_reset(pf);
|
||||
ixl_rebuild_hw_structs_after_reset(pf, false);
|
||||
}
|
||||
|
||||
/* Get the latest mac address... User might use a LAA */
|
||||
@ -853,7 +948,7 @@ ixl_if_init(if_ctx_t ctx)
|
||||
device_printf(dev, "initialize vsi failed!!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* Reconfigure multicast filters in HW */
|
||||
ixl_if_multi_set(ctx);
|
||||
|
||||
@ -900,6 +995,9 @@ ixl_if_stop(if_ctx_t ctx)
|
||||
|
||||
INIT_DEBUGOUT("ixl_if_stop: begin\n");
|
||||
|
||||
if (IXL_PF_IN_RECOVERY_MODE(pf))
|
||||
return;
|
||||
|
||||
// TODO: This may need to be reworked
|
||||
#ifdef IXL_IW
|
||||
/* Stop iWARP device */
|
||||
@ -1065,7 +1163,7 @@ ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxq
|
||||
device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
|
||||
for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
|
||||
struct tx_ring *txr = &que->txr;
|
||||
|
||||
@ -1089,7 +1187,7 @@ ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxq
|
||||
txr->tx_paddr = paddrs[i * ntxqs];
|
||||
txr->que = que;
|
||||
}
|
||||
|
||||
|
||||
return (0);
|
||||
fail:
|
||||
ixl_if_queues_free(ctx);
|
||||
@ -1166,6 +1264,9 @@ ixl_if_queues_free(if_ctx_t ctx)
|
||||
free(vsi->rx_queues, M_IXL);
|
||||
vsi->rx_queues = NULL;
|
||||
}
|
||||
|
||||
if (!IXL_PF_IN_RECOVERY_MODE(pf))
|
||||
sysctl_ctx_free(&vsi->sysctl_ctx);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1175,7 +1276,7 @@ ixl_update_link_status(struct ixl_pf *pf)
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
u64 baudrate;
|
||||
|
||||
if (pf->link_up) {
|
||||
if (pf->link_up) {
|
||||
if (vsi->link_active == FALSE) {
|
||||
vsi->link_active = TRUE;
|
||||
baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
|
||||
@ -1184,7 +1285,6 @@ ixl_update_link_status(struct ixl_pf *pf)
|
||||
#ifdef PCI_IOV
|
||||
ixl_broadcast_link_state(pf);
|
||||
#endif
|
||||
|
||||
}
|
||||
} else { /* Link down */
|
||||
if (vsi->link_active == TRUE) {
|
||||
@ -1271,20 +1371,27 @@ ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
|
||||
static void
|
||||
ixl_if_update_admin_status(if_ctx_t ctx)
|
||||
{
|
||||
struct ixl_pf *pf = iflib_get_softc(ctx);
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
u16 pending;
|
||||
struct ixl_pf *pf = iflib_get_softc(ctx);
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
u16 pending;
|
||||
|
||||
if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
|
||||
ixl_handle_empr_reset(pf);
|
||||
|
||||
/*
|
||||
* Admin Queue is shut down while handling reset.
|
||||
* Don't proceed if it hasn't been re-initialized
|
||||
* e.g due to an issue with new FW.
|
||||
*/
|
||||
if (!i40e_check_asq_alive(&pf->hw))
|
||||
return;
|
||||
|
||||
if (pf->state & IXL_PF_STATE_MDD_PENDING)
|
||||
ixl_handle_mdd_event(pf);
|
||||
|
||||
ixl_process_adminq(pf, &pending);
|
||||
ixl_update_link_status(pf);
|
||||
ixl_update_stats_counters(pf);
|
||||
|
||||
|
||||
/*
|
||||
* If there are still messages to process, reschedule ourselves.
|
||||
* Otherwise, re-enable our interrupt and go to sleep.
|
||||
@ -1522,11 +1629,12 @@ ixl_if_promisc_set(if_ctx_t ctx, int flags)
|
||||
static void
|
||||
ixl_if_timer(if_ctx_t ctx, uint16_t qid)
|
||||
{
|
||||
struct ixl_pf *pf = iflib_get_softc(ctx);
|
||||
|
||||
if (qid != 0)
|
||||
return;
|
||||
|
||||
/* Fire off the adminq task */
|
||||
iflib_admin_intr_deferred(ctx);
|
||||
ixl_update_stats_counters(pf);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1690,7 +1798,12 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
|
||||
device_t dev = pf->dev;
|
||||
|
||||
/* Save tunable information */
|
||||
#ifdef IXL_DEBUG_FC
|
||||
pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
|
||||
#endif
|
||||
#ifdef IXL_DEBUG
|
||||
pf->recovery_mode = ixl_debug_recovery_mode;
|
||||
#endif
|
||||
pf->dbg_mask = ixl_core_debug_mask;
|
||||
pf->hw.debug_mask = ixl_shared_debug_mask;
|
||||
pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
|
||||
|
@ -200,6 +200,15 @@
|
||||
#define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
|
||||
#define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
|
||||
|
||||
/* Misc flags for ixl_vsi.flags */
|
||||
#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
|
||||
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
|
||||
#define IXL_FLAGS_USES_MSIX (1 << 2)
|
||||
#define IXL_FLAGS_IS_VF (1 << 3)
|
||||
|
||||
#define IXL_VSI_IS_PF(v) ((v->flags & IXL_FLAGS_IS_VF) == 0)
|
||||
#define IXL_VSI_IS_VF(v) ((v->flags & IXL_FLAGS_IS_VF) != 0)
|
||||
|
||||
#define IXL_VF_RESET_TIMEOUT 100
|
||||
|
||||
#define IXL_VSI_DATA_PORT 0x01
|
||||
@ -292,7 +301,7 @@
|
||||
#endif
|
||||
|
||||
/* For stats sysctl naming */
|
||||
#define QUEUE_NAME_LEN 32
|
||||
#define IXL_QUEUE_NAME_LEN 32
|
||||
|
||||
#define IXL_DEV_ERR(_dev, _format, ...) \
|
||||
device_printf(_dev, "%s: " _format " (%s:%d)\n", __func__, ##__VA_ARGS__, __FILE__, __LINE__)
|
||||
@ -438,6 +447,7 @@ struct ixl_vsi {
|
||||
/* MAC/VLAN Filter list */
|
||||
struct ixl_ftl_head ftl;
|
||||
u16 num_macs;
|
||||
u64 num_hw_filters;
|
||||
|
||||
/* Contains readylist & stat counter id */
|
||||
struct i40e_aqc_vsi_properties_data info;
|
||||
@ -447,7 +457,7 @@ struct ixl_vsi {
|
||||
/* Per-VSI stats from hardware */
|
||||
struct i40e_eth_stats eth_stats;
|
||||
struct i40e_eth_stats eth_stats_offsets;
|
||||
bool stat_offsets_loaded;
|
||||
bool stat_offsets_loaded;
|
||||
/* VSI stat counters */
|
||||
u64 ipackets;
|
||||
u64 ierrors;
|
||||
@ -461,14 +471,11 @@ struct ixl_vsi {
|
||||
u64 oqdrops;
|
||||
u64 noproto;
|
||||
|
||||
/* Driver statistics */
|
||||
u64 hw_filters_del;
|
||||
u64 hw_filters_add;
|
||||
|
||||
/* Misc. */
|
||||
u64 flags;
|
||||
u64 flags;
|
||||
/* Stats sysctls for this VSI */
|
||||
struct sysctl_oid *vsi_node;
|
||||
struct sysctl_ctx_list sysctl_ctx;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -497,9 +504,9 @@ ixl_new_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
|
||||
*/
|
||||
static inline bool
|
||||
cmp_etheraddr(const u8 *ea1, const u8 *ea2)
|
||||
{
|
||||
return (bcmp(ea1, ea2, 6) == 0);
|
||||
}
|
||||
{
|
||||
return (bcmp(ea1, ea2, ETHER_ADDR_LEN) == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return next largest power of 2, unsigned
|
||||
@ -548,5 +555,6 @@ void ixl_add_vsi_sysctls(device_t dev, struct ixl_vsi *vsi,
|
||||
void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
|
||||
struct sysctl_oid_list *child,
|
||||
struct i40e_eth_stats *eth_stats);
|
||||
void ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi);
|
||||
void ixl_vsi_add_queues_stats(struct ixl_vsi *vsi,
|
||||
struct sysctl_ctx_list *ctx);
|
||||
#endif /* _IXL_H_ */
|
||||
|
@ -36,6 +36,8 @@
|
||||
#ifndef _IXL_PF_H_
|
||||
#define _IXL_PF_H_
|
||||
|
||||
#include "i40e_dcb.h"
|
||||
|
||||
#include "ixl.h"
|
||||
#include "ixl_pf_qmgr.h"
|
||||
|
||||
@ -59,19 +61,38 @@
|
||||
I40E_VFINT_DYN_CTLN(((vector) - 1) + \
|
||||
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
|
||||
|
||||
enum ixl_fw_mode {
|
||||
IXL_FW_MODE_NORMAL,
|
||||
IXL_FW_MODE_RECOVERY,
|
||||
IXL_FW_MODE_UEMPR
|
||||
};
|
||||
|
||||
enum ixl_i2c_access_method_t {
|
||||
IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE = 0,
|
||||
IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS = 1,
|
||||
IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD = 2,
|
||||
IXL_I2C_ACCESS_METHOD_AQ = 3,
|
||||
IXL_I2C_ACCESS_METHOD_TYPE_LENGTH = 4
|
||||
};
|
||||
|
||||
/* Used in struct ixl_pf's state field */
|
||||
enum ixl_pf_state {
|
||||
IXL_PF_STATE_ADAPTER_RESETTING = (1 << 0),
|
||||
IXL_PF_STATE_MDD_PENDING = (1 << 1),
|
||||
IXL_PF_STATE_PF_RESET_REQ = (1 << 2),
|
||||
IXL_PF_STATE_VF_RESET_REQ = (1 << 3),
|
||||
IXL_PF_STATE_PF_CRIT_ERR = (1 << 4),
|
||||
IXL_PF_STATE_CORE_RESET_REQ = (1 << 5),
|
||||
IXL_PF_STATE_GLOB_RESET_REQ = (1 << 6),
|
||||
IXL_PF_STATE_EMP_RESET_REQ = (1 << 7),
|
||||
IXL_PF_STATE_FW_LLDP_DISABLED = (1 << 8),
|
||||
IXL_PF_STATE_RECOVERY_MODE = (1 << 0),
|
||||
IXL_PF_STATE_ADAPTER_RESETTING = (1 << 1),
|
||||
IXL_PF_STATE_MDD_PENDING = (1 << 2),
|
||||
IXL_PF_STATE_PF_RESET_REQ = (1 << 3),
|
||||
IXL_PF_STATE_VF_RESET_REQ = (1 << 4),
|
||||
IXL_PF_STATE_PF_CRIT_ERR = (1 << 5),
|
||||
IXL_PF_STATE_CORE_RESET_REQ = (1 << 6),
|
||||
IXL_PF_STATE_GLOB_RESET_REQ = (1 << 7),
|
||||
IXL_PF_STATE_EMP_RESET_REQ = (1 << 8),
|
||||
IXL_PF_STATE_FW_LLDP_DISABLED = (1 << 9),
|
||||
};
|
||||
|
||||
#define IXL_PF_IN_RECOVERY_MODE(pf) \
|
||||
((atomic_load_acq_32(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) != 0)
|
||||
|
||||
|
||||
struct ixl_vf {
|
||||
struct ixl_vsi vsi;
|
||||
u32 vf_flags;
|
||||
@ -79,10 +100,9 @@ struct ixl_vf {
|
||||
|
||||
u8 mac[ETHER_ADDR_LEN];
|
||||
u16 vf_num;
|
||||
u32 version;
|
||||
struct virtchnl_version_info version;
|
||||
|
||||
struct ixl_pf_qtag qtag;
|
||||
struct sysctl_ctx_list ctx;
|
||||
};
|
||||
|
||||
/* Physical controller structure */
|
||||
@ -105,8 +125,17 @@ struct ixl_pf {
|
||||
struct ixl_pf_qmgr qmgr;
|
||||
struct ixl_pf_qtag qtag;
|
||||
|
||||
char admin_mtx_name[16]; /* name of the admin mutex */
|
||||
struct mtx admin_mtx; /* mutex to protect the admin timer */
|
||||
struct callout admin_timer; /* timer to trigger admin task */
|
||||
|
||||
/* Tunable values */
|
||||
#ifdef IXL_DEBUG_FC
|
||||
bool enable_tx_fc_filter;
|
||||
#endif
|
||||
#ifdef IXL_DEBUG
|
||||
bool recovery_mode;
|
||||
#endif
|
||||
int dynamic_rx_itr;
|
||||
int dynamic_tx_itr;
|
||||
int tx_itr;
|
||||
@ -128,16 +157,17 @@ struct ixl_pf {
|
||||
bool stat_offsets_loaded;
|
||||
|
||||
/* I2C access methods */
|
||||
u8 i2c_access_method;
|
||||
s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
|
||||
enum ixl_i2c_access_method_t i2c_access_method;
|
||||
s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
|
||||
u8 dev_addr, u8 *data);
|
||||
s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
|
||||
s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
|
||||
u8 dev_addr, u8 data);
|
||||
|
||||
/* SR-IOV */
|
||||
struct ixl_vf *vfs;
|
||||
int num_vfs;
|
||||
uint16_t veb_seid;
|
||||
int vc_debug_lvl;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -223,8 +253,6 @@ struct ixl_pf {
|
||||
"\t1 - Enable (VEB)\n" \
|
||||
"Enabling this will allow VFs in separate VMs to communicate over the hardware bridge."
|
||||
|
||||
extern const char * const ixl_fc_string[6];
|
||||
|
||||
MALLOC_DECLARE(M_IXL);
|
||||
|
||||
/*** Functions / Macros ***/
|
||||
@ -239,15 +267,14 @@ MALLOC_DECLARE(M_IXL);
|
||||
ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
|
||||
|
||||
/* Debug printing */
|
||||
#define ixl_dbg(pf, m, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, m, s, ##__VA_ARGS__)
|
||||
#define ixl_dbg_info(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_INFO, s, ##__VA_ARGS__)
|
||||
#define ixl_dbg_filter(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_FILTER, s, ##__VA_ARGS__)
|
||||
#define ixl_dbg_iov(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_IOV, s, ##__VA_ARGS__)
|
||||
#define ixl_dbg(pf, m, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, m, s, ##__VA_ARGS__)
|
||||
#define ixl_dbg_info(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_INFO, s, ##__VA_ARGS__)
|
||||
#define ixl_dbg_filter(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_FILTER, s, ##__VA_ARGS__)
|
||||
#define ixl_dbg_iov(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_IOV, s, ##__VA_ARGS__)
|
||||
|
||||
/* PF-only function declarations */
|
||||
int ixl_setup_interface(device_t, struct ixl_pf *);
|
||||
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
|
||||
char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
|
||||
|
||||
void ixl_handle_que(void *context, int pending);
|
||||
|
||||
@ -261,9 +288,7 @@ int ixl_msix_adminq(void *);
|
||||
void ixl_do_adminq(void *, int);
|
||||
|
||||
int ixl_res_alloc_cmp(const void *, const void *);
|
||||
char * ixl_switch_res_type_string(u8);
|
||||
char * ixl_switch_element_string(struct sbuf *,
|
||||
struct i40e_aqc_switch_config_element_resp *);
|
||||
const char * ixl_switch_res_type_string(u8);
|
||||
void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
|
||||
struct sysctl_oid_list *, struct i40e_hw_port_stats *);
|
||||
|
||||
@ -282,6 +307,7 @@ void ixl_stat_update32(struct i40e_hw *, u32, bool,
|
||||
u64 *, u64 *);
|
||||
|
||||
void ixl_stop(struct ixl_pf *);
|
||||
void ixl_vsi_add_sysctls(struct ixl_vsi *, const char *, bool);
|
||||
int ixl_get_hw_capabilities(struct ixl_pf *);
|
||||
void ixl_link_up_msg(struct ixl_pf *);
|
||||
void ixl_update_link_status(struct ixl_pf *);
|
||||
@ -289,19 +315,20 @@ int ixl_setup_stations(struct ixl_pf *);
|
||||
int ixl_switch_config(struct ixl_pf *);
|
||||
void ixl_stop_locked(struct ixl_pf *);
|
||||
int ixl_teardown_hw_structs(struct ixl_pf *);
|
||||
int ixl_reset(struct ixl_pf *);
|
||||
void ixl_init_locked(struct ixl_pf *);
|
||||
void ixl_set_rss_key(struct ixl_pf *);
|
||||
void ixl_set_rss_pctypes(struct ixl_pf *);
|
||||
void ixl_set_rss_hlut(struct ixl_pf *);
|
||||
int ixl_setup_adminq_msix(struct ixl_pf *);
|
||||
int ixl_setup_adminq_tq(struct ixl_pf *);
|
||||
int ixl_teardown_adminq_msix(struct ixl_pf *);
|
||||
void ixl_teardown_adminq_msix(struct ixl_pf *);
|
||||
void ixl_configure_intr0_msix(struct ixl_pf *);
|
||||
void ixl_configure_queue_intr_msix(struct ixl_pf *);
|
||||
void ixl_free_adminq_tq(struct ixl_pf *);
|
||||
int ixl_setup_legacy(struct ixl_pf *);
|
||||
int ixl_init_msix(struct ixl_pf *);
|
||||
void ixl_configure_tx_itr(struct ixl_pf *);
|
||||
void ixl_configure_rx_itr(struct ixl_pf *);
|
||||
void ixl_configure_itr(struct ixl_pf *);
|
||||
void ixl_configure_legacy(struct ixl_pf *);
|
||||
void ixl_free_pci_resources(struct ixl_pf *);
|
||||
@ -310,6 +337,7 @@ void ixl_config_rss(struct ixl_pf *);
|
||||
int ixl_set_advertised_speeds(struct ixl_pf *, int, bool);
|
||||
void ixl_set_initial_advertised_speeds(struct ixl_pf *);
|
||||
void ixl_print_nvm_version(struct ixl_pf *pf);
|
||||
void ixl_add_sysctls_recovery_mode(struct ixl_pf *);
|
||||
void ixl_add_device_sysctls(struct ixl_pf *);
|
||||
void ixl_handle_mdd_event(struct ixl_pf *);
|
||||
void ixl_add_hw_stats(struct ixl_pf *);
|
||||
@ -320,9 +348,14 @@ int ixl_aq_get_link_status(struct ixl_pf *,
|
||||
struct i40e_aqc_get_link_status *);
|
||||
|
||||
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
|
||||
int ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *, struct ifreq *ifr);
|
||||
|
||||
int ixl_setup_hmc(struct ixl_pf *);
|
||||
void ixl_shutdown_hmc(struct ixl_pf *);
|
||||
void ixl_handle_empr_reset(struct ixl_pf *);
|
||||
int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up);
|
||||
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
|
||||
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up);
|
||||
int ixl_pf_reset(struct ixl_pf *);
|
||||
|
||||
void ixl_set_queue_rx_itr(struct ixl_rx_queue *);
|
||||
void ixl_set_queue_tx_itr(struct ixl_tx_queue *);
|
||||
@ -344,7 +377,7 @@ int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
|
||||
void ixl_update_eth_stats(struct ixl_vsi *);
|
||||
void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
|
||||
int ixl_initialize_vsi(struct ixl_vsi *);
|
||||
void ixl_add_ifmedia(struct ixl_vsi *, u64);
|
||||
void ixl_add_ifmedia(struct ifmedia *, u64);
|
||||
int ixl_setup_queue_msix(struct ixl_vsi *);
|
||||
int ixl_setup_queue_tqs(struct ixl_vsi *);
|
||||
int ixl_teardown_queue_msix(struct ixl_vsi *);
|
||||
@ -388,8 +421,8 @@ s32 ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
|
||||
s32 ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
|
||||
u8 dev_addr, u8 data);
|
||||
|
||||
int ixl_get_fw_lldp_status(struct ixl_pf *pf);
|
||||
u64 ixl_max_aq_speed_to_value(u8);
|
||||
int ixl_attach_get_link_status(struct ixl_pf *);
|
||||
u64 ixl_max_aq_speed_to_value(u8);
|
||||
int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
|
||||
|
||||
#endif /* _IXL_PF_H_ */
|
||||
|
@ -606,7 +606,7 @@ ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
|
||||
}
|
||||
|
||||
/**
|
||||
* ixl_read_i2c_byte - Reads 8 bit word over I2C using a hardware register
|
||||
* ixl_read_i2c_byte_reg - Reads 8 bit word over I2C using a hardware register
|
||||
**/
|
||||
s32
|
||||
ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
|
||||
@ -627,7 +627,7 @@ ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
|
||||
/* Get data from I2C register */
|
||||
reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num));
|
||||
|
||||
/* Retrieve data readed from EEPROM */
|
||||
/* Retrieve data read from EEPROM */
|
||||
*data = (u8)(reg & 0xff);
|
||||
|
||||
if (status)
|
||||
@ -636,7 +636,7 @@ ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
|
||||
}
|
||||
|
||||
/**
|
||||
* ixl_write_i2c_byte - Writes 8 bit word over I2C using a hardware register
|
||||
* ixl_write_i2c_byte_reg - Writes 8 bit word over I2C using a hardware register
|
||||
**/
|
||||
s32
|
||||
ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
|
||||
@ -694,7 +694,7 @@ ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum)
|
||||
}
|
||||
|
||||
/**
|
||||
* ixl_read_i2c_byte - Reads 8 bit word over I2C using a hardware register
|
||||
* ixl_read_i2c_byte_aq - Reads 8 bit word over I2C using an AQ command
|
||||
**/
|
||||
s32
|
||||
ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
|
||||
@ -706,7 +706,7 @@ ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
|
||||
|
||||
status = i40e_aq_get_phy_register(hw,
|
||||
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
|
||||
dev_addr,
|
||||
dev_addr, false,
|
||||
byte_offset,
|
||||
®, NULL);
|
||||
|
||||
@ -720,7 +720,7 @@ ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
|
||||
}
|
||||
|
||||
/**
|
||||
* ixl_write_i2c_byte - Writes 8 bit word over I2C using a hardware register
|
||||
* ixl_write_i2c_byte_aq - Writes 8 bit word over I2C using an AQ command
|
||||
**/
|
||||
s32
|
||||
ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
|
||||
@ -731,7 +731,7 @@ ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
|
||||
|
||||
status = i40e_aq_set_phy_register(hw,
|
||||
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
|
||||
dev_addr,
|
||||
dev_addr, false,
|
||||
byte_offset,
|
||||
data, NULL);
|
||||
|
||||
|
1137
sys/dev/ixl/ixl_pf_iflib.c
Normal file
1137
sys/dev/ixl/ixl_pf_iflib.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -39,9 +39,6 @@ static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum
|
||||
static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
|
||||
static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
|
||||
|
||||
static bool ixl_zero_mac(const uint8_t *addr);
|
||||
static bool ixl_bcast_mac(const uint8_t *addr);
|
||||
|
||||
static int ixl_vc_opcode_level(uint16_t opcode);
|
||||
|
||||
static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
|
||||
@ -117,8 +114,9 @@ ixl_initialize_sriov(struct ixl_pf *pf)
|
||||
iov_error);
|
||||
} else
|
||||
device_printf(dev, "SR-IOV ready\n");
|
||||
}
|
||||
|
||||
pf->vc_debug_lvl = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate the VSI for a VF.
|
||||
@ -203,20 +201,21 @@ ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
|
||||
int error;
|
||||
|
||||
hw = &pf->hw;
|
||||
vf->vsi.flags |= IXL_FLAGS_IS_VF;
|
||||
|
||||
error = ixl_vf_alloc_vsi(pf, vf);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
vf->vsi.dev = pf->dev;
|
||||
|
||||
ixl_init_filters(&vf->vsi);
|
||||
/* Let VF receive broadcast Ethernet frames */
|
||||
error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
|
||||
if (error)
|
||||
device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
|
||||
/* Re-add VF's MAC/VLAN filters to its VSI */
|
||||
ixl_reconfigure_filters(&vf->vsi);
|
||||
/* Reset stats? */
|
||||
vf->vsi.hw_filters_add = 0;
|
||||
vf->vsi.hw_filters_del = 0;
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -488,33 +487,36 @@ static void
|
||||
ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
uint16_t msg_size)
|
||||
{
|
||||
struct virtchnl_version_info reply;
|
||||
struct virtchnl_version_info *recv_vf_version;
|
||||
device_t dev = pf->dev;
|
||||
|
||||
if (msg_size != sizeof(struct virtchnl_version_info)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
recv_vf_version = (struct virtchnl_version_info *)msg;
|
||||
|
||||
/* VFs running the 1.0 API expect to get 1.0 back */
|
||||
if (VF_IS_V10(recv_vf_version)) {
|
||||
vf->version.major = 1;
|
||||
vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
|
||||
} else {
|
||||
vf->version.major = VIRTCHNL_VERSION_MAJOR;
|
||||
vf->version.minor = VIRTCHNL_VERSION_MINOR;
|
||||
|
||||
if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
|
||||
(recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
|
||||
device_printf(dev,
|
||||
"%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
|
||||
__func__, vf->vf_num,
|
||||
recv_vf_version->major, recv_vf_version->minor,
|
||||
VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
|
||||
}
|
||||
|
||||
vf->version = ((struct virtchnl_version_info *)msg)->minor;
|
||||
|
||||
reply.major = VIRTCHNL_VERSION_MAJOR;
|
||||
reply.minor = VIRTCHNL_VERSION_MINOR;
|
||||
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
|
||||
sizeof(reply));
|
||||
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS,
|
||||
&vf->version, sizeof(vf->version));
|
||||
}
|
||||
|
||||
static void
|
||||
ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
uint16_t msg_size)
|
||||
{
|
||||
|
||||
if (msg_size != 0) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
ixl_reset_vf(pf, vf);
|
||||
|
||||
/* No response to a reset message. */
|
||||
@ -526,19 +528,9 @@ ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
{
|
||||
struct virtchnl_vf_resource reply;
|
||||
|
||||
if ((vf->version == 0 && msg_size != 0) ||
|
||||
(vf->version == 1 && msg_size != 4)) {
|
||||
device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
|
||||
" for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
|
||||
vf->version);
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
bzero(&reply, sizeof(reply));
|
||||
|
||||
if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
|
||||
if (vf->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
|
||||
reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
|
||||
VIRTCHNL_VF_OFFLOAD_RSS_REG |
|
||||
VIRTCHNL_VF_OFFLOAD_VLAN;
|
||||
@ -681,15 +673,8 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
{
|
||||
struct virtchnl_vsi_queue_config_info *info;
|
||||
struct virtchnl_queue_pair_info *pair;
|
||||
uint16_t expected_msg_size;
|
||||
int i;
|
||||
|
||||
if (msg_size < sizeof(*info)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
info = msg;
|
||||
if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
|
||||
device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
|
||||
@ -699,15 +684,6 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
return;
|
||||
}
|
||||
|
||||
expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
|
||||
if (msg_size != expected_msg_size) {
|
||||
device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
|
||||
vf->vf_num, msg_size, expected_msg_size);
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
if (info->vsi_id != vf->vsi.vsi_num) {
|
||||
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
|
||||
vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
|
||||
@ -839,25 +815,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
int i, largest_txq, largest_rxq;
|
||||
|
||||
hw = &pf->hw;
|
||||
|
||||
if (msg_size < sizeof(*map)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
map = msg;
|
||||
if (map->num_vectors == 0) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < map->num_vectors; i++) {
|
||||
vector = &map->vecmap[i];
|
||||
@ -910,13 +868,8 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
struct virtchnl_queue_select *select;
|
||||
int error = 0;
|
||||
|
||||
if (msg_size != sizeof(*select)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
select = msg;
|
||||
|
||||
if (select->vsi_id != vf->vsi.vsi_num ||
|
||||
select->rx_queues == 0 || select->tx_queues == 0) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
|
||||
@ -989,13 +942,8 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
|
||||
struct virtchnl_queue_select *select;
|
||||
int error = 0;
|
||||
|
||||
if (msg_size != sizeof(*select)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
select = msg;
|
||||
|
||||
if (select->vsi_id != vf->vsi.vsi_num ||
|
||||
select->rx_queues == 0 || select->tx_queues == 0) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
|
||||
@ -1064,28 +1012,11 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
|
||||
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
|
||||
}
|
||||
|
||||
static bool
|
||||
ixl_zero_mac(const uint8_t *addr)
|
||||
{
|
||||
uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
|
||||
|
||||
return (cmp_etheraddr(addr, zero));
|
||||
}
|
||||
|
||||
static bool
|
||||
ixl_bcast_mac(const uint8_t *addr)
|
||||
{
|
||||
static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
|
||||
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
|
||||
return (cmp_etheraddr(addr, ixl_bcast_addr));
|
||||
}
|
||||
|
||||
static int
|
||||
ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
|
||||
{
|
||||
|
||||
if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
|
||||
if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
|
||||
return (EINVAL);
|
||||
|
||||
/*
|
||||
@ -1108,23 +1039,11 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
struct virtchnl_ether_addr *addr;
|
||||
struct ixl_vsi *vsi;
|
||||
int i;
|
||||
size_t expected_size;
|
||||
|
||||
vsi = &vf->vsi;
|
||||
|
||||
if (msg_size < sizeof(*addr_list)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
addr_list = msg;
|
||||
expected_size = sizeof(*addr_list) +
|
||||
addr_list->num_elements * sizeof(*addr);
|
||||
|
||||
if (addr_list->num_elements == 0 ||
|
||||
addr_list->vsi_id != vsi->vsi_num ||
|
||||
msg_size != expected_size) {
|
||||
if (addr_list->vsi_id != vsi->vsi_num) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
@ -1152,32 +1071,23 @@ ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
{
|
||||
struct virtchnl_ether_addr_list *addr_list;
|
||||
struct virtchnl_ether_addr *addr;
|
||||
size_t expected_size;
|
||||
struct ixl_vsi *vsi;
|
||||
int i;
|
||||
|
||||
if (msg_size < sizeof(*addr_list)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
vsi = &vf->vsi;
|
||||
addr_list = msg;
|
||||
expected_size = sizeof(*addr_list) +
|
||||
addr_list->num_elements * sizeof(*addr);
|
||||
|
||||
if (addr_list->num_elements == 0 ||
|
||||
addr_list->vsi_id != vf->vsi.vsi_num ||
|
||||
msg_size != expected_size) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||
if (addr_list->vsi_id != vsi->vsi_num) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < addr_list->num_elements; i++) {
|
||||
addr = &addr_list->list[i];
|
||||
if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
|
||||
if (ETHER_IS_ZERO(addr->addr) || ETHER_IS_BROADCAST(addr->addr)) {
|
||||
i40e_send_vf_nack(pf, vf,
|
||||
VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
|
||||
VIRTCHNL_OP_DEL_ETH_ADDR, I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -1210,21 +1120,11 @@ ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
{
|
||||
struct virtchnl_vlan_filter_list *filter_list;
|
||||
enum i40e_status_code code;
|
||||
size_t expected_size;
|
||||
int i;
|
||||
|
||||
if (msg_size < sizeof(*filter_list)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
filter_list = msg;
|
||||
expected_size = sizeof(*filter_list) +
|
||||
filter_list->num_elements * sizeof(uint16_t);
|
||||
if (filter_list->num_elements == 0 ||
|
||||
filter_list->vsi_id != vf->vsi.vsi_num ||
|
||||
msg_size != expected_size) {
|
||||
|
||||
if (filter_list->vsi_id != vf->vsi.vsi_num) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
@ -1262,20 +1162,10 @@ ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
{
|
||||
struct virtchnl_vlan_filter_list *filter_list;
|
||||
int i;
|
||||
size_t expected_size;
|
||||
|
||||
if (msg_size < sizeof(*filter_list)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
filter_list = msg;
|
||||
expected_size = sizeof(*filter_list) +
|
||||
filter_list->num_elements * sizeof(uint16_t);
|
||||
if (filter_list->num_elements == 0 ||
|
||||
filter_list->vsi_id != vf->vsi.vsi_num ||
|
||||
msg_size != expected_size) {
|
||||
|
||||
if (filter_list->vsi_id != vf->vsi.vsi_num) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
@ -1309,12 +1199,6 @@ ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
enum i40e_status_code code;
|
||||
|
||||
if (msg_size != sizeof(*info)) {
|
||||
i40e_send_vf_nack(pf, vf,
|
||||
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
|
||||
/*
|
||||
* Do the same thing as the Linux PF driver -- lie to the VF
|
||||
@ -1362,12 +1246,6 @@ ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
{
|
||||
struct virtchnl_queue_select *queue;
|
||||
|
||||
if (msg_size != sizeof(*queue)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
queue = msg;
|
||||
if (queue->vsi_id != vf->vsi.vsi_num) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
|
||||
@ -1392,12 +1270,6 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
|
||||
hw = &pf->hw;
|
||||
|
||||
if (msg_size < sizeof(*key)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
key = msg;
|
||||
|
||||
if (key->key_len > 52) {
|
||||
@ -1454,12 +1326,6 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
|
||||
hw = &pf->hw;
|
||||
|
||||
if (msg_size < sizeof(*lut)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
lut = msg;
|
||||
|
||||
if (lut->lut_entries > 64) {
|
||||
@ -1507,13 +1373,6 @@ ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
|
||||
struct virtchnl_rss_hena *hena;
|
||||
|
||||
hw = &pf->hw;
|
||||
|
||||
if (msg_size < sizeof(*hena)) {
|
||||
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
|
||||
I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
hena = msg;
|
||||
|
||||
/* Set HENA */
|
||||
@ -1537,7 +1396,7 @@ ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
|
||||
event.severity = PF_EVENT_SEVERITY_INFO;
|
||||
event.event_data.link_event.link_status = pf->vsi.link_active;
|
||||
event.event_data.link_event.link_speed =
|
||||
(enum virtchnl_link_speed)hw->phy.link_info.link_speed;
|
||||
i40e_virtchnl_link_speed(hw->phy.link_info.link_speed);
|
||||
|
||||
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
|
||||
sizeof(event));
|
||||
@ -1555,10 +1414,12 @@ ixl_broadcast_link_state(struct ixl_pf *pf)
|
||||
void
|
||||
ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
|
||||
{
|
||||
device_t dev = pf->dev;
|
||||
struct ixl_vf *vf;
|
||||
void *msg;
|
||||
uint16_t vf_num, msg_size;
|
||||
uint32_t opcode;
|
||||
void *msg;
|
||||
int err;
|
||||
|
||||
vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
|
||||
opcode = le32toh(event->desc.cookie_high);
|
||||
@ -1578,6 +1439,15 @@ ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
|
||||
(vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
|
||||
vf_num, msg_size);
|
||||
|
||||
/* Perform basic checks on the msg */
|
||||
err = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msg_size);
|
||||
if (err) {
|
||||
device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n",
|
||||
__func__, vf->vf_num, opcode, msg_size, err);
|
||||
i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_PARAM);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This must be a stray msg from a previously destroyed VF. */
|
||||
if (!(vf->vf_flags & VF_FLAG_ENABLED))
|
||||
return;
|
||||
@ -1785,7 +1655,7 @@ ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
|
||||
struct i40e_hw *hw;
|
||||
struct ixl_vsi *pf_vsi;
|
||||
enum i40e_status_code ret;
|
||||
int i, error;
|
||||
int error;
|
||||
|
||||
hw = &pf->hw;
|
||||
pf_vsi = &pf->vsi;
|
||||
@ -1797,9 +1667,6 @@ ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_vfs; i++)
|
||||
sysctl_ctx_init(&pf->vfs[i].ctx);
|
||||
|
||||
/*
|
||||
* Add the VEB and ...
|
||||
* - do nothing: VEPA mode
|
||||
@ -1872,7 +1739,7 @@ ixl_if_iov_uninit(if_ctx_t ctx)
|
||||
|
||||
/* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
|
||||
for (i = 0; i < num_vfs; i++)
|
||||
sysctl_ctx_free(&vfs[i].ctx);
|
||||
sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
|
||||
free(vfs, M_IXL);
|
||||
}
|
||||
|
||||
@ -1911,8 +1778,7 @@ int
|
||||
ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
|
||||
{
|
||||
struct ixl_pf *pf = iflib_get_softc(ctx);
|
||||
device_t dev = pf->dev;
|
||||
char sysctl_name[QUEUE_NAME_LEN];
|
||||
char sysctl_name[IXL_QUEUE_NAME_LEN];
|
||||
struct ixl_vf *vf;
|
||||
const void *mac;
|
||||
size_t size;
|
||||
@ -1923,7 +1789,6 @@ ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
|
||||
vf->vf_num = vfnum;
|
||||
vf->vsi.back = pf;
|
||||
vf->vf_flags = VF_FLAG_ENABLED;
|
||||
SLIST_INIT(&vf->vsi.ftl);
|
||||
|
||||
/* Reserve queue allocation from PF */
|
||||
vf_num_queues = nvlist_get_number(params, "num-queues");
|
||||
@ -1961,7 +1826,7 @@ ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
|
||||
out:
|
||||
if (error == 0) {
|
||||
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
|
||||
ixl_add_vsi_sysctls(dev, &vf->vsi, &vf->ctx, sysctl_name);
|
||||
ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);
|
||||
}
|
||||
|
||||
return (error);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -892,12 +892,11 @@ ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
|
||||
}
|
||||
|
||||
void
|
||||
ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi)
|
||||
ixl_vsi_add_queues_stats(struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx)
|
||||
{
|
||||
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
|
||||
struct sysctl_oid_list *vsi_list, *queue_list;
|
||||
struct sysctl_oid *queue_node;
|
||||
char queue_namebuf[32];
|
||||
char queue_namebuf[IXL_QUEUE_NAME_LEN];
|
||||
|
||||
struct ixl_rx_queue *rx_que;
|
||||
struct ixl_tx_queue *tx_que;
|
||||
@ -909,7 +908,7 @@ ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi)
|
||||
/* Queue statistics */
|
||||
for (int q = 0; q < vsi->num_rx_queues; q++) {
|
||||
bzero(queue_namebuf, sizeof(queue_namebuf));
|
||||
snprintf(queue_namebuf, QUEUE_NAME_LEN, "rxq%02d", q);
|
||||
snprintf(queue_namebuf, sizeof(queue_namebuf), "rxq%02d", q);
|
||||
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
|
||||
OID_AUTO, queue_namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE,
|
||||
NULL, "RX Queue #");
|
||||
@ -937,7 +936,7 @@ ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi)
|
||||
}
|
||||
for (int q = 0; q < vsi->num_tx_queues; q++) {
|
||||
bzero(queue_namebuf, sizeof(queue_namebuf));
|
||||
snprintf(queue_namebuf, QUEUE_NAME_LEN, "txq%02d", q);
|
||||
snprintf(queue_namebuf, sizeof(queue_namebuf), "txq%02d", q);
|
||||
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
|
||||
OID_AUTO, queue_namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE,
|
||||
NULL, "TX Queue #");
|
||||
|
@ -5,7 +5,8 @@
|
||||
KMOD = if_ixl
|
||||
SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
|
||||
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h
|
||||
SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c
|
||||
SRCS += ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c
|
||||
SRCS += if_ixl.c ixl_pf_iflib.c
|
||||
SRCS.PCI_IOV += pci_iov_if.h ixl_pf_iov.c
|
||||
|
||||
# Shared source
|
||||
|
Loading…
Reference in New Issue
Block a user