iavf(4): Split source and update to 3.0.26-k

The iavf(4) driver now uses a different source base from ixl(4), since
it will be the standard VF driver for new Intel Ethernet products going
forward, including ice(4). It continues to use the iflib framework
for network drivers.

Since it now uses a different source code base, this commit adds a new
sys/dev/iavf entry, but it re-uses the existing module name so no
configuration changes are necessary.

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Reviewed by:		kbowling@
Tested by:		lukasz.szczepaniak@intel.com
Sponsored by:		Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D28636
This commit is contained in:
Eric Joyner 2021-02-12 13:28:18 -08:00
parent 483a226238
commit ca853dee3b
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
29 changed files with 13536 additions and 16 deletions

View File

@ -110,6 +110,22 @@ dev/axgbe/xgbe-i2c.c optional axp
dev/axgbe/xgbe-phy-v2.c optional axp
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
dev/iavf/if_iavf_iflib.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/iavf/iavf_lib.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/iavf/iavf_osdep.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/iavf/iavf_txrx_iflib.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/iavf/iavf_common.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/iavf/iavf_adminq.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/iavf/iavf_vc_common.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/iavf/iavf_vc_iflib.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/iavf"
dev/ice/if_ice_iflib.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_lib.c optional ice pci \
@ -172,23 +188,19 @@ dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_i2c.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/if_iavf.c optional iavf pci \
dev/ixl/ixl_txrx.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/iavf_vc.c optional iavf pci \
dev/ixl/i40e_osdep.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_txrx.c optional ixl pci | iavf pci \
dev/ixl/i40e_lan_hmc.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_osdep.c optional ixl pci | iavf pci \
dev/ixl/i40e_hmc.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_lan_hmc.c optional ixl pci | iavf pci \
dev/ixl/i40e_common.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_hmc.c optional ixl pci | iavf pci \
dev/ixl/i40e_nvm.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_common.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_nvm.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_adminq.c optional ixl pci | iavf pci \
dev/ixl/i40e_adminq.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_dcb.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"

990
sys/dev/iavf/iavf_adminq.c Normal file
View File

@ -0,0 +1,990 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "iavf_status.h"
#include "iavf_type.h"
#include "iavf_register.h"
#include "iavf_adminq.h"
#include "iavf_prototype.h"
/**
* iavf_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
* This assumes the alloc_asq and alloc_arq functions have already been called
**/
STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
{
/* set head and tail registers in our local struct */
hw->aq.asq.tail = IAVF_VF_ATQT1;
hw->aq.asq.head = IAVF_VF_ATQH1;
hw->aq.asq.len = IAVF_VF_ATQLEN1;
hw->aq.asq.bal = IAVF_VF_ATQBAL1;
hw->aq.asq.bah = IAVF_VF_ATQBAH1;
hw->aq.arq.tail = IAVF_VF_ARQT1;
hw->aq.arq.head = IAVF_VF_ARQH1;
hw->aq.arq.len = IAVF_VF_ARQLEN1;
hw->aq.arq.bal = IAVF_VF_ARQBAL1;
hw->aq.arq.bah = IAVF_VF_ARQBAH1;
}
/**
* iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
{
enum iavf_status ret_code;
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
iavf_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct iavf_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct iavf_asq_cmd_details)));
if (ret_code) {
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
return ret_code;
}
/**
* iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
{
enum iavf_status ret_code;
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
iavf_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct iavf_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
}
/**
* iavf_free_adminq_asq - Free Admin Queue send rings
* @hw: pointer to the hardware structure
*
* This assumes the posted send buffers have already been cleaned
* and de-allocated
**/
void iavf_free_adminq_asq(struct iavf_hw *hw)
{
iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
* iavf_free_adminq_arq - Free Admin Queue receive rings
* @hw: pointer to the hardware structure
*
* This assumes the posted receive buffers have already been cleaned
* and de-allocated
**/
void iavf_free_adminq_arq(struct iavf_hw *hw)
{
iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
{
enum iavf_status ret_code;
struct iavf_aq_desc *desc;
struct iavf_dma_mem *bi;
int i;
/* We'll be allocating the buffer info memory first, then we can
* allocate the mapped buffers for the event processing
*/
/* buffer_info structures do not need alignment */
ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
(hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i];
ret_code = iavf_allocate_dma_mem(hw, bi,
iavf_mem_arq_buf,
hw->aq.arq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_arq_bufs;
/* now configure the descriptors for use */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
*/
desc->datalen = CPU_TO_LE16((u16)bi->size);
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
desc->params.external.addr_high =
CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
desc->params.external.addr_low =
CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
desc->params.external.param0 = 0;
desc->params.external.param1 = 0;
}
alloc_arq_bufs:
return ret_code;
unwind_alloc_arq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
/**
* iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
{
enum iavf_status ret_code;
struct iavf_dma_mem *bi;
int i;
/* No mapped memory needed yet, just the buffer info structures */
ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
(hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i];
ret_code = iavf_allocate_dma_mem(hw, bi,
iavf_mem_asq_buf,
hw->aq.asq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_asq_bufs;
}
alloc_asq_bufs:
return ret_code;
unwind_alloc_asq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
/**
* iavf_free_arq_bufs - Free receive queue buffer info elements
* @hw: pointer to the hardware structure
**/
STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
{
int i;
/* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
/* free the descriptor memory */
iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
/* free the dma header */
iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* iavf_free_asq_bufs - Free send queue buffer info elements
* @hw: pointer to the hardware structure
**/
STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
{
int i;
/* only unmap if the address is non-NULL */
for (i = 0; i < hw->aq.num_asq_entries; i++)
if (hw->aq.asq.r.asq_bi[i].pa)
iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
/* free the buffer info list */
iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
/* free the descriptor memory */
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
/* free the dma header */
iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
* iavf_config_asq_regs - configure ASQ registers
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
{
enum iavf_status ret_code = IAVF_SUCCESS;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
/* set starting point */
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
IAVF_VF_ATQLEN1_ATQENABLE_MASK));
wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.asq.bal);
if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
* iavf_config_arq_regs - ARQ register configuration
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
{
enum iavf_status ret_code = IAVF_SUCCESS;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
/* set starting point */
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
IAVF_VF_ARQLEN1_ARQENABLE_MASK));
wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.arq.bal);
if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
* iavf_init_asq - main initialization routine for ASQ
* @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.arq_buf_size
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = IAVF_SUCCESS;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
ret_code = IAVF_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_asq_entries == 0) ||
(hw->aq.asq_buf_size == 0)) {
ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
/* allocate the ring memory */
ret_code = iavf_alloc_adminq_asq_ring(hw);
if (ret_code != IAVF_SUCCESS)
goto init_adminq_exit;
/* allocate buffers in the rings */
ret_code = iavf_alloc_asq_bufs(hw);
if (ret_code != IAVF_SUCCESS)
goto init_adminq_free_rings;
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code != IAVF_SUCCESS)
goto init_config_regs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
return ret_code;
init_config_regs:
iavf_free_asq_bufs(hw);
init_adminq_exit:
return ret_code;
}
/**
* iavf_init_arq - initialize ARQ
* @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.arq_buf_size
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = IAVF_SUCCESS;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
ret_code = IAVF_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.arq_buf_size == 0)) {
ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
/* allocate the ring memory */
ret_code = iavf_alloc_adminq_arq_ring(hw);
if (ret_code != IAVF_SUCCESS)
goto init_adminq_exit;
/* allocate buffers in the rings */
ret_code = iavf_alloc_arq_bufs(hw);
if (ret_code != IAVF_SUCCESS)
goto init_adminq_free_rings;
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code != IAVF_SUCCESS)
goto init_adminq_free_rings;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
init_adminq_exit:
return ret_code;
}
/**
* iavf_shutdown_asq - shutdown the ASQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = IAVF_SUCCESS;
iavf_acquire_spinlock(&hw->aq.asq_spinlock);
if (hw->aq.asq.count == 0) {
ret_code = IAVF_ERR_NOT_READY;
goto shutdown_asq_out;
}
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
wr32(hw, hw->aq.asq.len, 0);
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
iavf_free_asq_bufs(hw);
shutdown_asq_out:
iavf_release_spinlock(&hw->aq.asq_spinlock);
return ret_code;
}
/**
* iavf_shutdown_arq - shutdown ARQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = IAVF_SUCCESS;
iavf_acquire_spinlock(&hw->aq.arq_spinlock);
if (hw->aq.arq.count == 0) {
ret_code = IAVF_ERR_NOT_READY;
goto shutdown_arq_out;
}
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
wr32(hw, hw->aq.arq.len, 0);
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
iavf_free_arq_bufs(hw);
shutdown_arq_out:
iavf_release_spinlock(&hw->aq.arq_spinlock);
return ret_code;
}
/**
* iavf_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.num_arq_entries
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
{
enum iavf_status ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
(hw->aq.arq_buf_size == 0) ||
(hw->aq.asq_buf_size == 0)) {
ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
iavf_init_spinlock(&hw->aq.asq_spinlock);
iavf_init_spinlock(&hw->aq.arq_spinlock);
/* Set up register offsets */
iavf_adminq_init_regs(hw);
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
/* allocate the ASQ */
ret_code = iavf_init_asq(hw);
if (ret_code != IAVF_SUCCESS)
goto init_adminq_destroy_spinlocks;
/* allocate the ARQ */
ret_code = iavf_init_arq(hw);
if (ret_code != IAVF_SUCCESS)
goto init_adminq_free_asq;
/* success! */
goto init_adminq_exit;
init_adminq_free_asq:
iavf_shutdown_asq(hw);
init_adminq_destroy_spinlocks:
iavf_destroy_spinlock(&hw->aq.asq_spinlock);
iavf_destroy_spinlock(&hw->aq.arq_spinlock);
init_adminq_exit:
return ret_code;
}
/**
* iavf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
{
enum iavf_status ret_code = IAVF_SUCCESS;
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
iavf_shutdown_asq(hw);
iavf_shutdown_arq(hw);
iavf_destroy_spinlock(&hw->aq.asq_spinlock);
iavf_destroy_spinlock(&hw->aq.arq_spinlock);
return ret_code;
}
/**
* iavf_clean_asq - cleans Admin send queue
* @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
u16 iavf_clean_asq(struct iavf_hw *hw)
{
struct iavf_adminq_ring *asq = &(hw->aq.asq);
struct iavf_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
struct iavf_aq_desc desc_cb;
struct iavf_aq_desc *desc;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
IAVF_ADMINQ_CALLBACK cb_func =
(IAVF_ADMINQ_CALLBACK)details->callback;
iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
IAVF_DMA_TO_DMA);
cb_func(hw, &desc_cb);
}
iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
ntc++;
if (ntc == asq->count)
ntc = 0;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
}
asq->next_to_clean = ntc;
return IAVF_DESC_UNUSED(asq);
}
/**
* iavf_asq_done - check if FW has processed the Admin Send Queue
* @hw: pointer to the hw struct
*
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
bool iavf_asq_done(struct iavf_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
}
/**
* iavf_asq_send_command - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
* @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
struct iavf_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details)
{
enum iavf_status status = IAVF_SUCCESS;
struct iavf_dma_mem *dma_buff = NULL;
struct iavf_asq_cmd_details *details;
struct iavf_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
u32 val = 0;
iavf_acquire_spinlock(&hw->aq.asq_spinlock);
hw->aq.asq_last_status = IAVF_AQ_RC_OK;
if (hw->aq.asq.count == 0) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
status = IAVF_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
}
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
status = IAVF_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
}
details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
iavf_memcpy(details,
cmd_details,
sizeof(struct iavf_asq_cmd_details),
IAVF_NONDMA_TO_NONDMA);
/* If the cmd_details are defined copy the cookie. The
* CPU_TO_LE32 is not needed here because the data is ignored
* by the FW, only used by the driver
*/
if (details->cookie) {
desc->cookie_high =
CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
desc->cookie_low =
CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
}
} else {
iavf_memset(details, 0,
sizeof(struct iavf_asq_cmd_details),
IAVF_NONDMA_MEM);
}
/* clear requested flags and then set additional flags if defined */
desc->flags &= ~CPU_TO_LE16(details->flags_dis);
desc->flags |= CPU_TO_LE16(details->flags_ena);
if (buff_size > hw->aq.asq_buf_size) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n",
buff_size);
status = IAVF_ERR_INVALID_SIZE;
goto asq_send_command_error;
}
if (details->postpone && !details->async) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag");
status = IAVF_ERR_PARAM;
goto asq_send_command_error;
}
/* call clean and check queue available function to reclaim the
* descriptors that were processed by FW, the function returns the
* number of desc available
*/
/* the clean function called here could be called in a separate thread
* in case of asynchronous completions
*/
if (iavf_clean_asq(hw) == 0) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n");
status = IAVF_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
/* initialize the temp desc pointer with the right desc */
desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
/* if the desc is available copy the temp desc to the right place */
iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
IAVF_NONDMA_TO_DMA);
/* if buff is not NULL assume indirect command */
if (buff != NULL) {
dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
/* copy the user buff into the respective DMA buff */
iavf_memcpy(dma_buff->va, buff, buff_size,
IAVF_NONDMA_TO_DMA);
desc_on_ring->datalen = CPU_TO_LE16(buff_size);
/* Update the address values in the desc with the pa value
* for respective buffer
*/
desc_on_ring->params.external.addr_high =
CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
desc_on_ring->params.external.addr_low =
CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
}
/* bump the tail */
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
do {
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
if (iavf_asq_done(hw))
break;
iavf_usec_delay(50);
total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
/* if ready, copy the desc back to temp */
if (iavf_asq_done(hw)) {
iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
IAVF_DMA_TO_NONDMA);
if (buff != NULL)
iavf_memcpy(buff, dma_buff->va, buff_size,
IAVF_DMA_TO_NONDMA);
retval = LE16_TO_CPU(desc->retval);
if (retval != 0) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Command completed with error 0x%X.\n",
retval);
/* strip off FW internal code */
retval &= 0xff;
}
cmd_completed = true;
if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
status = IAVF_SUCCESS;
else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
status = IAVF_ERR_NOT_READY;
else
status = IAVF_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
}
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
/* save writeback aq if requested */
if (details->wb_desc)
iavf_memcpy(details->wb_desc, desc_on_ring,
sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
}
}
asq_send_command_error:
iavf_release_spinlock(&hw->aq.asq_spinlock);
return status;
}
/**
* iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
*
* Fill the desc with default values
**/
void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
u16 opcode)
{
/* zero out the desc */
iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
IAVF_NONDMA_MEM);
desc->opcode = CPU_TO_LE16(opcode);
desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
}
/**
* iavf_clean_arq_element
* @hw: pointer to the hw struct
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
struct iavf_arq_event_info *e,
u16 *pending)
{
enum iavf_status ret_code = IAVF_SUCCESS;
u16 ntc = hw->aq.arq.next_to_clean;
struct iavf_aq_desc *desc;
struct iavf_dma_mem *bi;
u16 desc_idx;
u16 datalen;
u16 flags;
u16 ntu;
/* pre-clean the event info */
iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
/* take the lock before we start messing with the ring */
iavf_acquire_spinlock(&hw->aq.arq_spinlock);
if (hw->aq.arq.count == 0) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n");
ret_code = IAVF_ERR_QUEUE_EMPTY;
goto clean_arq_element_err;
}
/* set next_to_use to head */
ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
/* now clean the next descriptor */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
hw->aq.arq_last_status =
(enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & IAVF_AQ_FLAG_ERR) {
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
}
iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
IAVF_DMA_TO_NONDMA);
datalen = LE16_TO_CPU(desc->datalen);
e->msg_len = min(datalen, e->buf_len);
if (e->msg_buf != NULL && (e->msg_len != 0))
iavf_memcpy(e->msg_buf,
hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_len, IAVF_DMA_TO_NONDMA);
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
desc->datalen = CPU_TO_LE16((u16)bi->size);
desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, hw->aq.arq.tail, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
ntc = 0;
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
iavf_release_spinlock(&hw->aq.arq_spinlock);
return ret_code;
}

122
sys/dev/iavf/iavf_adminq.h Normal file
View File

@ -0,0 +1,122 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _IAVF_ADMINQ_H_
#define _IAVF_ADMINQ_H_
#include "iavf_osdep.h"
#include "iavf_status.h"
#include "iavf_adminq_cmd.h"
#define IAVF_ADMINQ_DESC(R, i) \
(&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
struct iavf_adminq_ring {
struct iavf_virt_mem dma_head; /* space for dma structures */
struct iavf_dma_mem desc_buf; /* descriptor ring memory */
struct iavf_virt_mem cmd_buf; /* command buffer memory */
union {
struct iavf_dma_mem *asq_bi;
struct iavf_dma_mem *arq_bi;
} r;
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
/* used for queue tracking */
u32 head;
u32 tail;
u32 len;
u32 bah;
u32 bal;
};
/* ASQ transaction details */
struct iavf_asq_cmd_details {
void *callback; /* cast from type IAVF_ADMINQ_CALLBACK */
u64 cookie;
u16 flags_ena;
u16 flags_dis;
bool async;
bool postpone;
struct iavf_aq_desc *wb_desc;
};
#define IAVF_ADMINQ_DETAILS(R, i) \
(&(((struct iavf_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct iavf_arq_event_info {
struct iavf_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
};
/* Admin Queue information */
struct iavf_adminq_info {
struct iavf_adminq_ring arq; /* receive queue */
struct iavf_adminq_ring asq; /* send queue */
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
u16 num_arq_entries; /* receive queue depth */
u16 num_asq_entries; /* send queue depth */
u16 arq_buf_size; /* receive queue buffer size */
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
struct iavf_spinlock asq_spinlock; /* Send queue spinlock */
struct iavf_spinlock arq_spinlock; /* Receive queue spinlock */
/* last status values on send and receive queues */
enum iavf_admin_queue_err asq_last_status;
enum iavf_admin_queue_err arq_last_status;
};
/* general information */
#define IAVF_AQ_LARGE_BUF 512
#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
u16 opcode);
#endif /* _IAVF_ADMINQ_H_ */

View File

@ -0,0 +1,678 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _IAVF_ADMINQ_CMD_H_
#define _IAVF_ADMINQ_CMD_H_
/* This header file defines the iavf Admin Queue commands and is shared between
* iavf Firmware and Software. Do not change the names in this file to IAVF
* because this file should be diff-able against the iavf version, even
* though many parts have been removed in this VF version.
*
* This file needs to comply with the Linux Kernel coding style.
*/
#define IAVF_FW_API_VERSION_MAJOR 0x0001
#define IAVF_FW_API_VERSION_MINOR_X722 0x0006
#define IAVF_FW_API_VERSION_MINOR_X710 0x0007
#define IAVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == IAVF_MAC_XL710 ? \
IAVF_FW_API_VERSION_MINOR_X710 : \
IAVF_FW_API_VERSION_MINOR_X722)
/* API version 1.7 implements additional link and PHY-specific APIs */
#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
#define IAVF_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
struct iavf_aq_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 retval;
__le32 cookie_high;
__le32 cookie_low;
union {
struct {
__le32 param0;
__le32 param1;
__le32 param2;
__le32 param3;
} internal;
struct {
__le32 param0;
__le32 param1;
__le32 addr_high;
__le32 addr_low;
} external;
u8 raw[16];
} params;
};
/* Flags sub-structure
* |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
* |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
*/
/* command flags and offsets*/
#define IAVF_AQ_FLAG_DD_SHIFT 0
#define IAVF_AQ_FLAG_CMP_SHIFT 1
#define IAVF_AQ_FLAG_ERR_SHIFT 2
#define IAVF_AQ_FLAG_VFE_SHIFT 3
#define IAVF_AQ_FLAG_LB_SHIFT 9
#define IAVF_AQ_FLAG_RD_SHIFT 10
#define IAVF_AQ_FLAG_VFC_SHIFT 11
#define IAVF_AQ_FLAG_BUF_SHIFT 12
#define IAVF_AQ_FLAG_SI_SHIFT 13
#define IAVF_AQ_FLAG_EI_SHIFT 14
#define IAVF_AQ_FLAG_FE_SHIFT 15
#define IAVF_AQ_FLAG_DD (1 << IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
#define IAVF_AQ_FLAG_CMP (1 << IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
#define IAVF_AQ_FLAG_ERR (1 << IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
#define IAVF_AQ_FLAG_VFE (1 << IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
#define IAVF_AQ_FLAG_LB (1 << IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
#define IAVF_AQ_FLAG_RD (1 << IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
#define IAVF_AQ_FLAG_VFC (1 << IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
#define IAVF_AQ_FLAG_BUF (1 << IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
#define IAVF_AQ_FLAG_SI (1 << IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
#define IAVF_AQ_FLAG_EI (1 << IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
#define IAVF_AQ_FLAG_FE (1 << IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum iavf_admin_queue_err {
IAVF_AQ_RC_OK = 0, /* success */
IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
IAVF_AQ_RC_ENOENT = 2, /* No such element */
IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
IAVF_AQ_RC_EIO = 5, /* I/O error */
IAVF_AQ_RC_ENXIO = 6, /* No such resource */
IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
IAVF_AQ_RC_EAGAIN = 8, /* Try again */
IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
IAVF_AQ_RC_EACCES = 10, /* Permission denied */
IAVF_AQ_RC_EFAULT = 11, /* Bad address */
IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
IAVF_AQ_RC_EEXIST = 13, /* object already exists */
IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
IAVF_AQ_RC_EFBIG = 22, /* File too large */
};
/* Admin Queue command opcodes */
enum iavf_admin_queue_opc {
/* aq commands */
iavf_aqc_opc_get_version = 0x0001,
iavf_aqc_opc_driver_version = 0x0002,
iavf_aqc_opc_queue_shutdown = 0x0003,
iavf_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
iavf_aqc_opc_request_resource = 0x0008,
iavf_aqc_opc_release_resource = 0x0009,
iavf_aqc_opc_list_func_capabilities = 0x000A,
iavf_aqc_opc_list_dev_capabilities = 0x000B,
/* Proxy commands */
iavf_aqc_opc_set_proxy_config = 0x0104,
iavf_aqc_opc_set_ns_proxy_table_entry = 0x0105,
/* LAA */
iavf_aqc_opc_mac_address_read = 0x0107,
iavf_aqc_opc_mac_address_write = 0x0108,
/* PXE */
iavf_aqc_opc_clear_pxe_mode = 0x0110,
/* WoL commands */
iavf_aqc_opc_set_wol_filter = 0x0120,
iavf_aqc_opc_get_wake_reason = 0x0121,
iavf_aqc_opc_clear_all_wol_filters = 0x025E,
/* internal switch commands */
iavf_aqc_opc_get_switch_config = 0x0200,
iavf_aqc_opc_add_statistics = 0x0201,
iavf_aqc_opc_remove_statistics = 0x0202,
iavf_aqc_opc_set_port_parameters = 0x0203,
iavf_aqc_opc_get_switch_resource_alloc = 0x0204,
iavf_aqc_opc_set_switch_config = 0x0205,
iavf_aqc_opc_rx_ctl_reg_read = 0x0206,
iavf_aqc_opc_rx_ctl_reg_write = 0x0207,
iavf_aqc_opc_add_vsi = 0x0210,
iavf_aqc_opc_update_vsi_parameters = 0x0211,
iavf_aqc_opc_get_vsi_parameters = 0x0212,
iavf_aqc_opc_add_pv = 0x0220,
iavf_aqc_opc_update_pv_parameters = 0x0221,
iavf_aqc_opc_get_pv_parameters = 0x0222,
iavf_aqc_opc_add_veb = 0x0230,
iavf_aqc_opc_update_veb_parameters = 0x0231,
iavf_aqc_opc_get_veb_parameters = 0x0232,
iavf_aqc_opc_delete_element = 0x0243,
iavf_aqc_opc_add_macvlan = 0x0250,
iavf_aqc_opc_remove_macvlan = 0x0251,
iavf_aqc_opc_add_vlan = 0x0252,
iavf_aqc_opc_remove_vlan = 0x0253,
iavf_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
iavf_aqc_opc_add_tag = 0x0255,
iavf_aqc_opc_remove_tag = 0x0256,
iavf_aqc_opc_add_multicast_etag = 0x0257,
iavf_aqc_opc_remove_multicast_etag = 0x0258,
iavf_aqc_opc_update_tag = 0x0259,
iavf_aqc_opc_add_control_packet_filter = 0x025A,
iavf_aqc_opc_remove_control_packet_filter = 0x025B,
iavf_aqc_opc_add_cloud_filters = 0x025C,
iavf_aqc_opc_remove_cloud_filters = 0x025D,
iavf_aqc_opc_clear_wol_switch_filters = 0x025E,
iavf_aqc_opc_replace_cloud_filters = 0x025F,
iavf_aqc_opc_add_mirror_rule = 0x0260,
iavf_aqc_opc_delete_mirror_rule = 0x0261,
/* Dynamic Device Personalization */
iavf_aqc_opc_write_personalization_profile = 0x0270,
iavf_aqc_opc_get_personalization_profile_list = 0x0271,
/* DCB commands */
iavf_aqc_opc_dcb_ignore_pfc = 0x0301,
iavf_aqc_opc_dcb_updated = 0x0302,
iavf_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
iavf_aqc_opc_configure_vsi_bw_limit = 0x0400,
iavf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
iavf_aqc_opc_configure_vsi_tc_bw = 0x0407,
iavf_aqc_opc_query_vsi_bw_config = 0x0408,
iavf_aqc_opc_query_vsi_ets_sla_config = 0x040A,
iavf_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
iavf_aqc_opc_enable_switching_comp_ets = 0x0413,
iavf_aqc_opc_modify_switching_comp_ets = 0x0414,
iavf_aqc_opc_disable_switching_comp_ets = 0x0415,
iavf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
iavf_aqc_opc_configure_switching_comp_bw_config = 0x0417,
iavf_aqc_opc_query_switching_comp_ets_config = 0x0418,
iavf_aqc_opc_query_port_ets_config = 0x0419,
iavf_aqc_opc_query_switching_comp_bw_config = 0x041A,
iavf_aqc_opc_suspend_port_tx = 0x041B,
iavf_aqc_opc_resume_port_tx = 0x041C,
iavf_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
iavf_aqc_opc_query_hmc_resource_profile = 0x0500,
iavf_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
/* phy commands*/
iavf_aqc_opc_get_phy_abilities = 0x0600,
iavf_aqc_opc_set_phy_config = 0x0601,
iavf_aqc_opc_set_mac_config = 0x0603,
iavf_aqc_opc_set_link_restart_an = 0x0605,
iavf_aqc_opc_get_link_status = 0x0607,
iavf_aqc_opc_set_phy_int_mask = 0x0613,
iavf_aqc_opc_get_local_advt_reg = 0x0614,
iavf_aqc_opc_set_local_advt_reg = 0x0615,
iavf_aqc_opc_get_partner_advt = 0x0616,
iavf_aqc_opc_set_lb_modes = 0x0618,
iavf_aqc_opc_get_phy_wol_caps = 0x0621,
iavf_aqc_opc_set_phy_debug = 0x0622,
iavf_aqc_opc_upload_ext_phy_fm = 0x0625,
iavf_aqc_opc_run_phy_activity = 0x0626,
iavf_aqc_opc_set_phy_register = 0x0628,
iavf_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
iavf_aqc_opc_nvm_read = 0x0701,
iavf_aqc_opc_nvm_erase = 0x0702,
iavf_aqc_opc_nvm_update = 0x0703,
iavf_aqc_opc_nvm_config_read = 0x0704,
iavf_aqc_opc_nvm_config_write = 0x0705,
iavf_aqc_opc_nvm_progress = 0x0706,
iavf_aqc_opc_oem_post_update = 0x0720,
iavf_aqc_opc_thermal_sensor = 0x0721,
/* virtualization commands */
iavf_aqc_opc_send_msg_to_pf = 0x0801,
iavf_aqc_opc_send_msg_to_vf = 0x0802,
iavf_aqc_opc_send_msg_to_peer = 0x0803,
/* alternate structure */
iavf_aqc_opc_alternate_write = 0x0900,
iavf_aqc_opc_alternate_write_indirect = 0x0901,
iavf_aqc_opc_alternate_read = 0x0902,
iavf_aqc_opc_alternate_read_indirect = 0x0903,
iavf_aqc_opc_alternate_write_done = 0x0904,
iavf_aqc_opc_alternate_set_mode = 0x0905,
iavf_aqc_opc_alternate_clear_port = 0x0906,
/* LLDP commands */
iavf_aqc_opc_lldp_get_mib = 0x0A00,
iavf_aqc_opc_lldp_update_mib = 0x0A01,
iavf_aqc_opc_lldp_add_tlv = 0x0A02,
iavf_aqc_opc_lldp_update_tlv = 0x0A03,
iavf_aqc_opc_lldp_delete_tlv = 0x0A04,
iavf_aqc_opc_lldp_stop = 0x0A05,
iavf_aqc_opc_lldp_start = 0x0A06,
iavf_aqc_opc_get_cee_dcb_cfg = 0x0A07,
iavf_aqc_opc_lldp_set_local_mib = 0x0A08,
iavf_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
/* Tunnel commands */
iavf_aqc_opc_add_udp_tunnel = 0x0B00,
iavf_aqc_opc_del_udp_tunnel = 0x0B01,
iavf_aqc_opc_set_rss_key = 0x0B02,
iavf_aqc_opc_set_rss_lut = 0x0B03,
iavf_aqc_opc_get_rss_key = 0x0B04,
iavf_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
iavf_aqc_opc_event_lan_overflow = 0x1001,
/* OEM commands */
iavf_aqc_opc_oem_parameter_change = 0xFE00,
iavf_aqc_opc_oem_device_status_change = 0xFE01,
iavf_aqc_opc_oem_ocsd_initialize = 0xFE02,
iavf_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
iavf_aqc_opc_debug_read_reg = 0xFF03,
iavf_aqc_opc_debug_write_reg = 0xFF04,
iavf_aqc_opc_debug_modify_reg = 0xFF07,
iavf_aqc_opc_debug_dump_internals = 0xFF08,
};
/* command structures and indirect data structures */
/* Structure naming conventions:
* - no suffix for direct command descriptor structures
* - _data for indirect sent data
* - _resp for indirect return data (data which is both will use _data)
* - _completion for direct return data
* - _element_ for repeated elements (may also be _data or _resp)
*
* Command structures are expected to overlay the params.raw member of the basic
* descriptor, and as such cannot exceed 16 bytes in length.
*/
/* This macro is used to generate a compilation error if a structure
* is not exactly the correct length. It gives a divide by zero error if the
* structure is not of the correct size, otherwise it creates an enum that is
* never used.
*/
#define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \
{ iavf_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
/* This macro is used extensively to ensure that command structures are 16
* bytes in length as they have to map to the raw array of that size.
*/
#define IAVF_CHECK_CMD_LENGTH(X) IAVF_CHECK_STRUCT_LEN(16, X)
/* Queue Shutdown (direct 0x0003) */
struct iavf_aqc_queue_shutdown {
__le32 driver_unloading;
#define IAVF_AQ_DRIVER_UNLOADING 0x1
u8 reserved[12];
};
IAVF_CHECK_CMD_LENGTH(iavf_aqc_queue_shutdown);
#define IAVF_AQC_WOL_PRESERVE_STATUS 0x200
#define IAVF_AQC_MC_MAG_EN 0x0100
#define IAVF_AQC_WOL_PRESERVE_ON_PFR 0x0200
struct iavf_aqc_vsi_properties_data {
/* first 96 byte are written by SW */
__le16 valid_sections;
#define IAVF_AQ_VSI_PROP_SWITCH_VALID 0x0001
#define IAVF_AQ_VSI_PROP_SECURITY_VALID 0x0002
#define IAVF_AQ_VSI_PROP_VLAN_VALID 0x0004
#define IAVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008
#define IAVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
#define IAVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
#define IAVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
#define IAVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
#define IAVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
#define IAVF_AQ_VSI_PROP_SCHED_VALID 0x0200
/* switch section */
__le16 switch_id; /* 12bit id combined with flags below */
#define IAVF_AQ_VSI_SW_ID_SHIFT 0x0000
#define IAVF_AQ_VSI_SW_ID_MASK (0xFFF << IAVF_AQ_VSI_SW_ID_SHIFT)
#define IAVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
#define IAVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
#define IAVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
u8 sw_reserved[2];
/* security section */
u8 sec_flags;
#define IAVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
u8 sec_reserved;
/* VLAN section */
__le16 pvid; /* VLANS include priority bits */
__le16 fcoe_pvid;
u8 port_vlan_flags;
#define IAVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00
#define IAVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
IAVF_AQ_VSI_PVLAN_MODE_SHIFT)
#define IAVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01
#define IAVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
#define IAVF_AQ_VSI_PVLAN_MODE_ALL 0x03
#define IAVF_AQ_VSI_PVLAN_INSERT_PVID 0x04
#define IAVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
#define IAVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
IAVF_AQ_VSI_PVLAN_EMOD_SHIFT)
#define IAVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
#define IAVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
#define IAVF_AQ_VSI_PVLAN_EMOD_STR 0x10
#define IAVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
u8 pvlan_reserved[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
#define IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0
#define IAVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT)
#define IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3
#define IAVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT)
#define IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6
#define IAVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT)
#define IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9
#define IAVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT)
#define IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12
#define IAVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT)
#define IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15
#define IAVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT)
#define IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18
#define IAVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT)
#define IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21
#define IAVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT)
__le32 egress_table; /* same defines as for ingress table */
/* cascaded PV section */
__le16 cas_pv_tag;
u8 cas_pv_flags;
#define IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
#define IAVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT)
#define IAVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
#define IAVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
#define IAVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02
#define IAVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10
#define IAVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
#define IAVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
u8 cas_pv_reserved;
/* queue mapping section */
__le16 mapping_flags;
#define IAVF_AQ_VSI_QUE_MAP_CONTIG 0x0
#define IAVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1
__le16 queue_mapping[16];
#define IAVF_AQ_VSI_QUEUE_SHIFT 0x0
#define IAVF_AQ_VSI_QUEUE_MASK (0x7FF << IAVF_AQ_VSI_QUEUE_SHIFT)
__le16 tc_mapping[8];
#define IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
#define IAVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT)
#define IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
#define IAVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
#define IAVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
#define IAVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define IAVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define IAVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
u8 sched_reserved;
/* outer up section */
__le32 outer_up_table; /* same structure and defines as ingress tbl */
u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
__le16 qs_handle[8];
#define IAVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
__le16 stat_counter_idx;
__le16 sched_id;
u8 resp_reserved[12];
};
IAVF_CHECK_STRUCT_LEN(128, iavf_aqc_vsi_properties_data);
/* Get VEB Parameters (direct 0x0232)
* uses iavf_aqc_switch_seid for the descriptor
*/
struct iavf_aqc_get_veb_parameters_completion {
__le16 seid;
__le16 switch_id;
__le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
__le16 statistic_index;
__le16 vebs_used;
__le16 vebs_free;
u8 reserved[4];
};
IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_veb_parameters_completion);
#define IAVF_LINK_SPEED_100MB_SHIFT 0x1
#define IAVF_LINK_SPEED_1000MB_SHIFT 0x2
#define IAVF_LINK_SPEED_10GB_SHIFT 0x3
#define IAVF_LINK_SPEED_40GB_SHIFT 0x4
#define IAVF_LINK_SPEED_20GB_SHIFT 0x5
#define IAVF_LINK_SPEED_25GB_SHIFT 0x6
enum iavf_aq_link_speed {
IAVF_LINK_SPEED_UNKNOWN = 0,
IAVF_LINK_SPEED_100MB = (1 << IAVF_LINK_SPEED_100MB_SHIFT),
IAVF_LINK_SPEED_1GB = (1 << IAVF_LINK_SPEED_1000MB_SHIFT),
IAVF_LINK_SPEED_10GB = (1 << IAVF_LINK_SPEED_10GB_SHIFT),
IAVF_LINK_SPEED_40GB = (1 << IAVF_LINK_SPEED_40GB_SHIFT),
IAVF_LINK_SPEED_20GB = (1 << IAVF_LINK_SPEED_20GB_SHIFT),
IAVF_LINK_SPEED_25GB = (1 << IAVF_LINK_SPEED_25GB_SHIFT),
};
#define IAVF_AQ_LINK_UP_FUNCTION 0x01
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
*/
struct iavf_aqc_pf_vf_message {
__le32 id;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
IAVF_CHECK_CMD_LENGTH(iavf_aqc_pf_vf_message);
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response
*/
#define IAVF_AQC_CEE_APP_FCOE_SHIFT 0x0
#define IAVF_AQC_CEE_APP_FCOE_MASK (0x7 << IAVF_AQC_CEE_APP_FCOE_SHIFT)
#define IAVF_AQC_CEE_APP_ISCSI_SHIFT 0x3
#define IAVF_AQC_CEE_APP_ISCSI_MASK (0x7 << IAVF_AQC_CEE_APP_ISCSI_SHIFT)
#define IAVF_AQC_CEE_APP_FIP_SHIFT 0x8
#define IAVF_AQC_CEE_APP_FIP_MASK (0x7 << IAVF_AQC_CEE_APP_FIP_SHIFT)
#define IAVF_AQC_CEE_PG_STATUS_SHIFT 0x0
#define IAVF_AQC_CEE_PG_STATUS_MASK (0x7 << IAVF_AQC_CEE_PG_STATUS_SHIFT)
#define IAVF_AQC_CEE_PFC_STATUS_SHIFT 0x3
#define IAVF_AQC_CEE_PFC_STATUS_MASK (0x7 << IAVF_AQC_CEE_PFC_STATUS_SHIFT)
#define IAVF_AQC_CEE_APP_STATUS_SHIFT 0x8
#define IAVF_AQC_CEE_APP_STATUS_MASK (0x7 << IAVF_AQC_CEE_APP_STATUS_SHIFT)
#define IAVF_AQC_CEE_FCOE_STATUS_SHIFT 0x8
#define IAVF_AQC_CEE_FCOE_STATUS_MASK (0x7 << IAVF_AQC_CEE_FCOE_STATUS_SHIFT)
#define IAVF_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
#define IAVF_AQC_CEE_ISCSI_STATUS_MASK (0x7 << IAVF_AQC_CEE_ISCSI_STATUS_SHIFT)
#define IAVF_AQC_CEE_FIP_STATUS_SHIFT 0x10
#define IAVF_AQC_CEE_FIP_STATUS_MASK (0x7 << IAVF_AQC_CEE_FIP_STATUS_SHIFT)
/* struct iavf_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
* word boundary layout issues, which the Linux compilers silently deal
* with by adding padding, making the actual struct larger than designed.
* However, the FW compiler for the NIC is less lenient and complains
* about the struct. Hence, the struct defined here has an extra byte in
* fields reserved3 and reserved4 to directly acknowledge that padding,
* and the new length is used in the length check macro.
*/
struct iavf_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved1;
u8 oper_num_tc;
u8 oper_prio_tc[4];
u8 reserved2;
u8 oper_tc_bw[8];
u8 oper_pfc_en;
u8 reserved3[2];
__le16 oper_app_prio;
u8 reserved4[2];
__le16 tlv_status;
};
IAVF_CHECK_STRUCT_LEN(0x18, iavf_aqc_get_cee_dcb_cfg_v1_resp);
struct iavf_aqc_get_cee_dcb_cfg_resp {
u8 oper_num_tc;
u8 oper_prio_tc[4];
u8 oper_tc_bw[8];
u8 oper_pfc_en;
__le16 oper_app_prio;
__le32 tlv_status;
u8 reserved[12];
};
IAVF_CHECK_STRUCT_LEN(0x20, iavf_aqc_get_cee_dcb_cfg_resp);
/* Set Local LLDP MIB (indirect 0x0A08)
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx
*/
struct iavf_aqc_lldp_set_local_mib {
#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
__le16 length;
u8 reserved1[4];
__le32 address_high;
__le32 address_low;
};
IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_set_local_mib);
struct iavf_aqc_lldp_set_local_mib_resp {
#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
u8 status;
u8 reserved[15];
};
IAVF_CHECK_STRUCT_LEN(0x10, iavf_aqc_lldp_set_local_mib_resp);
/* Stop/Start LLDP Agent (direct 0x0A09)
* Used for stopping/starting specific LLDP agent. e.g. DCBx
*/
struct iavf_aqc_lldp_stop_start_specific_agent {
#define IAVF_AQC_START_SPECIFIC_AGENT_SHIFT 0
#define IAVF_AQC_START_SPECIFIC_AGENT_MASK \
(1 << IAVF_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_stop_start_specific_agent);
struct iavf_aqc_get_set_rss_key {
#define IAVF_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
#define IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
__le16 vsi_id;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_key);
struct iavf_aqc_get_set_rss_key_data {
u8 standard_rss_key[0x28];
u8 extended_hash_key[0xc];
};
IAVF_CHECK_STRUCT_LEN(0x34, iavf_aqc_get_set_rss_key_data);
struct iavf_aqc_get_set_rss_lut {
#define IAVF_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
#define IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
__le16 flags;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_lut);
#endif /* _IAVF_ADMINQ_CMD_H_ */

64
sys/dev/iavf/iavf_alloc.h Normal file
View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _IAVF_ALLOC_H_
#define _IAVF_ALLOC_H_
struct iavf_hw;
/* Memory allocation types */
enum iavf_memory_type {
iavf_mem_arq_buf = 0, /* ARQ indirect command buffer */
iavf_mem_asq_buf = 1,
iavf_mem_atq_buf = 2, /* ATQ indirect command buffer */
iavf_mem_arq_ring = 3, /* ARQ descriptor ring */
iavf_mem_atq_ring = 4, /* ATQ descriptor ring */
iavf_mem_pd = 5, /* Page Descriptor */
iavf_mem_bp = 6, /* Backing Page - 4KB */
iavf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
iavf_mem_reserved
};
/* prototype for functions used for dynamic memory allocation */
enum iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw,
struct iavf_dma_mem *mem,
enum iavf_memory_type type,
u64 size, u32 alignment);
enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw,
struct iavf_dma_mem *mem);
enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
struct iavf_virt_mem *mem,
u32 size);
enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw,
struct iavf_virt_mem *mem);
#endif /* _IAVF_ALLOC_H_ */

1053
sys/dev/iavf/iavf_common.c Normal file

File diff suppressed because it is too large Load Diff

131
sys/dev/iavf/iavf_debug.h Normal file
View File

@ -0,0 +1,131 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_debug.h
* @brief Debug macros
*
* Contains definitions for useful debug macros which can be enabled by
* building with IAVF_DEBUG defined.
*/
#ifndef _IAVF_DEBUG_H_
#define _IAVF_DEBUG_H_
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_FORMAT_ARGS(mac_addr) \
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
(mac_addr)[4], (mac_addr)[5]
#ifdef IAVF_DEBUG
#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
/* Defines for printing generic debug information */
#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__)
#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__)
#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__)
/* Defines for printing specific debug information */
#define DEBUG_INIT 1
#define DEBUG_IOCTL 1
#define DEBUG_HW 1
#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \
if_printf(ifp, S "\n", ##__VA_ARGS__)
#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
#else /* no IAVF_DEBUG */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define DPRINTF(...)
#define DDPRINTF(...)
#define IDPRINTF(...)
#define INIT_DEBUGOUT(...)
#define INIT_DBG_DEV(...)
#define INIT_DBG_IF(...)
#define IOCTL_DEBUGOUT(...)
#define IOCTL_DBG_IF2(...)
#define IOCTL_DBG_IF(...)
#define HW_DEBUGOUT(...)
#endif /* IAVF_DEBUG */
/**
* @enum iavf_dbg_mask
* @brief Bitmask values for various debug messages
*
* Enumeration of possible debug message categories, represented as a bitmask.
*
* Bits are set in the softc dbg_mask field indicating which messages are
* enabled.
*
* Used by debug print macros in order to compare the message type with the
* enabled bits in the dbg_mask to decide whether to print the message or not.
*/
enum iavf_dbg_mask {
IAVF_DBG_INFO = 0x00000001,
IAVF_DBG_EN_DIS = 0x00000002,
IAVF_DBG_AQ = 0x00000004,
IAVF_DBG_INIT = 0x00000008,
IAVF_DBG_FILTER = 0x00000010,
IAVF_DBG_RSS = 0x00000100,
IAVF_DBG_VC = 0x00001000,
IAVF_DBG_SWITCH_INFO = 0x00010000,
IAVF_DBG_ALL = 0xFFFFFFFF
};
/* Debug printing */
void iavf_debug_core(device_t dev, uint32_t enabled_mask, uint32_t mask, char *fmt, ...) __printflike(4,5);
#define iavf_dbg(sc, m, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, m, s, ##__VA_ARGS__)
#define iavf_dbg_init(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_INIT, s, ##__VA_ARGS__)
#define iavf_dbg_info(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_INFO, s, ##__VA_ARGS__)
#define iavf_dbg_vc(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_VC, s, ##__VA_ARGS__)
#define iavf_dbg_filter(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_FILTER, s, ##__VA_ARGS__)
#define iavf_dbg_rss(sc, s, ...) iavf_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_RSS, s, ##__VA_ARGS__)
#endif /* _IAVF_DEBUG_H_ */

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _IAVF_DEVIDS_H_
#define _IAVF_DEVIDS_H_
/* Vendor ID */
#define IAVF_INTEL_VENDOR_ID 0x8086
/* Device IDs for the VF driver */
#define IAVF_DEV_ID_VF 0x154C
#define IAVF_DEV_ID_VF_HV 0x1571
#define IAVF_DEV_ID_ADAPTIVE_VF 0x1889
#define IAVF_DEV_ID_X722_VF 0x37CD
#endif /* _IAVF_DEVIDS_H_ */

View File

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_drv_info.h
* @brief device IDs and driver version
*
* Contains the device IDs tables and the driver version string.
*
* It must be included after iavf_legacy.h or iavf_iflib.h, and is expected to
* be included exactly once in the associated if_iavf file. Thus, it does not
* have the standard header guard.
*/
/**
* @var iavf_driver_version
* @brief driver version string
*
* Driver version information, used for display as part of an informational
* sysctl.
*/
const char iavf_driver_version[] = "3.0.26-k";
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 3.0.26-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 3.0.26-k")
/**
* @var iavf_vendor_info_array
* @brief array of PCI devices supported by this driver
*
* Array of PCI devices which are supported by this driver. Used to determine
* whether a given device should be loaded by this driver. This information is
* also exported as part of the module information for other tools to analyze.
*
* @remark Each type of device ID needs to be listed from most-specific entry
* to most-generic entry; e.g. PVIDV_OEM()s for a device ID must come before
* the PVIDV() for it.
*/
static pci_vendor_info_t iavf_vendor_info_array[] = {
PVIDV(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF,
"Intel(R) Ethernet Virtual Function 700 Series"),
PVIDV(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF,
"Intel(R) Ethernet Virtual Function 700 Series (X722)"),
PVIDV(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF,
"Intel(R) Ethernet Adaptive Virtual Function"),
PVID_END
};

407
sys/dev/iavf/iavf_iflib.h Normal file
View File

@ -0,0 +1,407 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_iflib.h
* @brief main header for the iflib driver
*
* Contains definitions for various driver structures used throughout the
* driver code. This header is used by the iflib implementation.
*/
#ifndef _IAVF_IFLIB_H_
#define _IAVF_IFLIB_H_
#include "iavf_opts.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/buf_ring.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/sockio.h>
#include <sys/eventhandler.h>
#include <sys/syslog.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/bpf.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <netinet/udp.h>
#include <netinet/sctp.h>
#include <machine/in_cksum.h>
#include <sys/bus.h>
#include <sys/pciio.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/clock.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <sys/proc.h>
#include <sys/endian.h>
#include <sys/taskqueue.h>
#include <sys/pcpu.h>
#include <sys/smp.h>
#include <sys/sbuf.h>
#include <machine/smp.h>
#include <machine/stdarg.h>
#include <net/ethernet.h>
#include <net/iflib.h>
#include "ifdi_if.h"
#include "iavf_lib.h"
#define IAVF_CSUM_TCP \
(CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP)
#define IAVF_CSUM_UDP \
(CSUM_IP_UDP|CSUM_IP6_UDP)
#define IAVF_CSUM_SCTP \
(CSUM_IP_SCTP|CSUM_IP6_SCTP)
#define IAVF_CSUM_IPV4 \
(CSUM_IP|CSUM_IP_TSO)
#define IAVF_CAPS \
(IFCAP_TSO4 | IFCAP_TSO6 | \
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \
IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | \
IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO)
#define iavf_sc_from_ctx(_ctx) \
((struct iavf_sc *)iflib_get_softc(_ctx))
/* Use the correct assert function for each lock type */
#define IFLIB_CTX_ASSERT(_ctx) \
sx_assert(iflib_ctx_lock_get(_ctx), SA_XLOCKED)
#define IAVF_VC_LOCK(_sc) mtx_lock(&(_sc)->vc_mtx)
#define IAVF_VC_UNLOCK(_sc) mtx_unlock(&(_sc)->vc_mtx)
#define IAVF_VC_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->vc_mtx)
#define IAVF_VC_TRYLOCK(_sc) mtx_trylock(&(_sc)->vc_mtx)
#define IAVF_VC_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->vc_mtx, MA_OWNED)
/**
* @struct tx_ring
* @brief Transmit ring control struct
*
* Structure used to track the hardware Tx ring data.
*/
struct tx_ring {
struct iavf_tx_queue *que;
u32 tail;
struct iavf_tx_desc *tx_base;
u64 tx_paddr;
u32 packets;
u32 me;
/*
* For reporting completed packet status
* in descriptor writeback mdoe
*/
qidx_t *tx_rsq;
qidx_t tx_rs_cidx;
qidx_t tx_rs_pidx;
qidx_t tx_cidx_processed;
/* Used for Dynamic ITR calculation */
u32 bytes;
u32 itr;
u32 latency;
/* Soft Stats */
u64 tx_bytes;
u64 tx_packets;
u64 mss_too_small;
};
/**
* @struct rx_ring
* @brief Receive ring control struct
*
* Structure used to track the hardware Rx ring data.
*/
struct rx_ring {
struct iavf_rx_queue *que;
union iavf_rx_desc *rx_base;
uint64_t rx_paddr;
bool discard;
u32 itr;
u32 latency;
u32 mbuf_sz;
u32 tail;
u32 me;
/* Used for Dynamic ITR calculation */
u32 packets;
u32 bytes;
/* Soft stats */
u64 rx_packets;
u64 rx_bytes;
u64 desc_errs;
};
/**
* @struct iavf_tx_queue
* @brief Driver Tx queue structure
*
* Structure to track the Tx ring, IRQ, MSI-X vector, and some software stats
* for a Tx queue.
*/
struct iavf_tx_queue {
struct iavf_vsi *vsi;
struct tx_ring txr;
struct if_irq que_irq;
u32 msix;
/* Stats */
u64 irqs;
u64 tso;
u32 pkt_too_small;
};
/**
* @struct iavf_rx_queue
* @brief Driver Rx queue structure
*
* Structure to track the Rx ring, IRQ, MSI-X vector, and some software stats
* for an Rx queue.
*/
struct iavf_rx_queue {
struct iavf_vsi *vsi;
struct rx_ring rxr;
struct if_irq que_irq;
u32 msix;
/* Stats */
u64 irqs;
};
/**
* @struct iavf_vsi
* @brief Virtual Station Interface
*
* Data tracking a VSI for an iavf device.
*/
struct iavf_vsi {
if_ctx_t ctx;
if_softc_ctx_t shared;
struct ifnet *ifp;
struct iavf_sc *back;
device_t dev;
struct iavf_hw *hw;
int id;
u16 num_rx_queues;
u16 num_tx_queues;
u32 rx_itr_setting;
u32 tx_itr_setting;
u16 max_frame_size;
bool enable_head_writeback;
bool link_active;
struct iavf_tx_queue *tx_queues;
struct iavf_rx_queue *rx_queues;
struct if_irq irq;
u16 num_vlans;
u16 num_macs;
/* Per-VSI stats from hardware */
struct iavf_eth_stats eth_stats;
struct iavf_eth_stats eth_stats_offsets;
bool stat_offsets_loaded;
/* VSI stat counters */
u64 ipackets;
u64 ierrors;
u64 opackets;
u64 oerrors;
u64 ibytes;
u64 obytes;
u64 imcasts;
u64 omcasts;
u64 iqdrops;
u64 oqdrops;
u64 noproto;
/* Misc. */
u64 flags;
struct sysctl_oid *vsi_node;
struct sysctl_ctx_list sysctl_ctx;
};
/**
* @struct iavf_mac_filter
* @brief MAC Address filter data
*
* Entry in the MAC filter list describing a MAC address filter used to
* program hardware to filter a specific MAC address.
*/
struct iavf_mac_filter {
SLIST_ENTRY(iavf_mac_filter) next;
u8 macaddr[ETHER_ADDR_LEN];
u16 flags;
};
/**
* @struct mac_list
* @brief MAC filter list head
*
* List head type for a singly-linked list of MAC address filters.
*/
SLIST_HEAD(mac_list, iavf_mac_filter);
/**
* @struct iavf_vlan_filter
* @brief VLAN filter data
*
* Entry in the VLAN filter list describing a VLAN filter used to
* program hardware to filter traffic on a specific VLAN.
*/
struct iavf_vlan_filter {
SLIST_ENTRY(iavf_vlan_filter) next;
u16 vlan;
u16 flags;
};
/**
* @struct vlan_list
* @brief VLAN filter list head
*
* List head type for a singly-linked list of VLAN filters.
*/
SLIST_HEAD(vlan_list, iavf_vlan_filter);
/**
* @struct iavf_sc
* @brief Main context structure for the iavf driver
*
* Software context structure used to store information about a single device
* that is loaded by the iavf driver.
*/
struct iavf_sc {
struct iavf_vsi vsi;
struct iavf_hw hw;
struct iavf_osdep osdep;
device_t dev;
struct resource *pci_mem;
/* driver state flags, only access using atomic functions */
u32 state;
struct ifmedia *media;
struct virtchnl_version_info version;
enum iavf_dbg_mask dbg_mask;
u16 promisc_flags;
bool link_up;
union {
enum virtchnl_link_speed link_speed;
u32 link_speed_adv;
};
/* Tunable settings */
int tx_itr;
int rx_itr;
int dynamic_tx_itr;
int dynamic_rx_itr;
/* Filter lists */
struct mac_list *mac_filters;
struct vlan_list *vlan_filters;
/* Virtual comm channel */
struct virtchnl_vf_resource *vf_res;
struct virtchnl_vsi_resource *vsi_res;
/* Misc stats maintained by the driver */
u64 admin_irq;
/* Buffer used for reading AQ responses */
u8 aq_buffer[IAVF_AQ_BUF_SZ];
/* State flag used in init/stop */
u32 queues_enabled;
u8 enable_queues_chan;
u8 disable_queues_chan;
/* For virtchnl message processing task */
struct task vc_task;
struct taskqueue *vc_tq;
char vc_mtx_name[16];
struct mtx vc_mtx;
};
/* Function prototypes */
void iavf_init_tx_ring(struct iavf_vsi *vsi, struct iavf_tx_queue *que);
void iavf_get_default_rss_key(u32 *);
const char * iavf_vc_stat_str(struct iavf_hw *hw,
enum virtchnl_status_code stat_err);
void iavf_init_tx_rsqs(struct iavf_vsi *vsi);
void iavf_init_tx_cidx(struct iavf_vsi *vsi);
u64 iavf_max_vc_speed_to_value(u8 link_speeds);
void iavf_add_vsi_sysctls(device_t dev, struct iavf_vsi *vsi,
struct sysctl_ctx_list *ctx, const char *sysctl_name);
void iavf_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
struct iavf_eth_stats *eth_stats);
void iavf_add_queues_sysctls(device_t dev, struct iavf_vsi *vsi);
void iavf_enable_intr(struct iavf_vsi *);
void iavf_disable_intr(struct iavf_vsi *);
#endif /* _IAVF_IFLIB_H_ */

1531
sys/dev/iavf/iavf_lib.c Normal file

File diff suppressed because it is too large Load Diff

512
sys/dev/iavf/iavf_lib.h Normal file
View File

@ -0,0 +1,512 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_lib.h
* @brief header for structures and functions common to legacy and iflib
*
* Contains definitions and function declarations which are shared between the
* legacy and iflib driver implementation.
*/
#ifndef _IAVF_LIB_H_
#define _IAVF_LIB_H_
#include <sys/malloc.h>
#include <machine/stdarg.h>
#include <sys/sysctl.h>
#include "iavf_debug.h"
#include "iavf_osdep.h"
#include "iavf_type.h"
#include "iavf_prototype.h"
MALLOC_DECLARE(M_IAVF);
/*
* Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the
* number of tx/rx descriptors allocated by the driver. Increasing this
* value allows the driver to queue more operations.
*
* Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
* The driver currently always uses 32 byte Rx descriptors.
*/
#define IAVF_DEFAULT_RING 1024
#define IAVF_MAX_RING 4096
#define IAVF_MIN_RING 64
#define IAVF_RING_INCREMENT 32
#define IAVF_AQ_LEN 256
#define IAVF_AQ_LEN_MAX 1024
/*
** Default number of entries in Tx queue buf_ring.
*/
#define DEFAULT_TXBRSZ 4096
/* Alignment for rings */
#define DBA_ALIGN 128
/*
* Max number of multicast MAC addrs added to the driver's
* internal lists before converting to promiscuous mode
*/
#define MAX_MULTICAST_ADDR 128
/* Byte alignment for Tx/Rx descriptor rings */
#define DBA_ALIGN 128
#define IAVF_MSIX_BAR 3
#define IAVF_ADM_LIMIT 2
#define IAVF_TSO_SIZE ((255*1024)-1)
#define IAVF_AQ_BUF_SZ ((u32) 4096)
#define IAVF_RX_HDR 128
#define IAVF_RX_LIMIT 512
#define IAVF_RX_ITR 0
#define IAVF_TX_ITR 1
/**
* The maximum packet length allowed to be sent or received by the adapter.
*/
#define IAVF_MAX_FRAME 9728
/**
* The minimum packet length allowed to be sent by the adapter.
*/
#define IAVF_MIN_FRAME 17
#define IAVF_MAX_TX_SEGS 8
#define IAVF_MAX_RX_SEGS 5
#define IAVF_MAX_TSO_SEGS 128
#define IAVF_SPARSE_CHAIN 7
#define IAVF_MIN_TSO_MSS 64
#define IAVF_MAX_TSO_MSS 9668
#define IAVF_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
#define IAVF_AQ_MAX_ERR 30
#define IAVF_MAX_INIT_WAIT 120
#define IAVF_AQ_TIMEOUT (1 * hz)
#define IAVF_ADV_LINK_SPEED_SCALE ((u64)1000000)
#define IAVF_MAX_DIS_Q_RETRY 10
#define IAVF_RSS_KEY_SIZE_REG 13
#define IAVF_RSS_KEY_SIZE (IAVF_RSS_KEY_SIZE_REG * 4)
#define IAVF_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */
#define IAVF_RSS_VSI_LUT_ENTRY_MASK 0x3F
#define IAVF_RSS_VF_LUT_ENTRY_MASK 0xF
/* Maximum MTU size */
#define IAVF_MAX_MTU (IAVF_MAX_FRAME - \
ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
/*
* Hardware requires that TSO packets have an segment size of at least 64
* bytes. To avoid sending bad frames to the hardware, the driver forces the
* MSS for all TSO packets to have a segment size of at least 64 bytes.
*
* However, if the MTU is reduced below a certain size, then the resulting
* larger MSS can result in transmitting segmented frames with a packet size
* larger than the MTU.
*
* Avoid this by preventing the MTU from being lowered below this limit.
* Alternative solutions require changing the TCP stack to disable offloading
* the segmentation when the requested segment size goes below 64 bytes.
*/
#define IAVF_MIN_MTU 112
/*
* Interrupt Moderation parameters
* Multiply ITR values by 2 for real ITR value
*/
#define IAVF_MAX_ITR 0x0FF0
#define IAVF_ITR_100K 0x0005
#define IAVF_ITR_20K 0x0019
#define IAVF_ITR_8K 0x003E
#define IAVF_ITR_4K 0x007A
#define IAVF_ITR_1K 0x01F4
#define IAVF_ITR_DYNAMIC 0x8000
#define IAVF_LOW_LATENCY 0
#define IAVF_AVE_LATENCY 1
#define IAVF_BULK_LATENCY 2
/* MacVlan Flags */
#define IAVF_FILTER_USED (u16)(1 << 0)
#define IAVF_FILTER_VLAN (u16)(1 << 1)
#define IAVF_FILTER_ADD (u16)(1 << 2)
#define IAVF_FILTER_DEL (u16)(1 << 3)
#define IAVF_FILTER_MC (u16)(1 << 4)
/* used in the vlan field of the filter when not a vlan */
#define IAVF_VLAN_ANY -1
#define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
#define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
#define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
/* Misc flags for iavf_vsi.flags */
#define IAVF_FLAGS_KEEP_TSO4 (1 << 0)
#define IAVF_FLAGS_KEEP_TSO6 (1 << 1)
#define IAVF_DEFAULT_RSS_HENA_BASE (\
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6))
#define IAVF_DEFAULT_ADV_RSS_HENA (\
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
#define IAVF_DEFAULT_RSS_HENA_XL710 (\
IAVF_DEFAULT_RSS_HENA_BASE | \
BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
#define IAVF_DEFAULT_RSS_HENA_X722 (\
IAVF_DEFAULT_RSS_HENA_XL710 | \
IAVF_DEFAULT_ADV_RSS_HENA)
#define IAVF_DEFAULT_RSS_HENA_AVF (\
IAVF_DEFAULT_RSS_HENA_BASE | \
IAVF_DEFAULT_ADV_RSS_HENA)
/* Pre-11 counter(9) compatibility */
#if __FreeBSD_version >= 1100036
#define IAVF_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count)
#define IAVF_SET_IERRORS(vsi, count) (vsi)->ierrors = (count)
#define IAVF_SET_OPACKETS(vsi, count) (vsi)->opackets = (count)
#define IAVF_SET_OERRORS(vsi, count) (vsi)->oerrors = (count)
#define IAVF_SET_COLLISIONS(vsi, count) /* Do nothing; collisions is always 0. */
#define IAVF_SET_IBYTES(vsi, count) (vsi)->ibytes = (count)
#define IAVF_SET_OBYTES(vsi, count) (vsi)->obytes = (count)
#define IAVF_SET_IMCASTS(vsi, count) (vsi)->imcasts = (count)
#define IAVF_SET_OMCASTS(vsi, count) (vsi)->omcasts = (count)
#define IAVF_SET_IQDROPS(vsi, count) (vsi)->iqdrops = (count)
#define IAVF_SET_OQDROPS(vsi, count) (vsi)->oqdrops = (count)
#define IAVF_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#else
#define IAVF_SET_IPACKETS(vsi, count) (vsi)->ifp->if_ipackets = (count)
#define IAVF_SET_IERRORS(vsi, count) (vsi)->ifp->if_ierrors = (count)
#define IAVF_SET_OPACKETS(vsi, count) (vsi)->ifp->if_opackets = (count)
#define IAVF_SET_OERRORS(vsi, count) (vsi)->ifp->if_oerrors = (count)
#define IAVF_SET_COLLISIONS(vsi, count) (vsi)->ifp->if_collisions = (count)
#define IAVF_SET_IBYTES(vsi, count) (vsi)->ifp->if_ibytes = (count)
#define IAVF_SET_OBYTES(vsi, count) (vsi)->ifp->if_obytes = (count)
#define IAVF_SET_IMCASTS(vsi, count) (vsi)->ifp->if_imcasts = (count)
#define IAVF_SET_OMCASTS(vsi, count) (vsi)->ifp->if_omcasts = (count)
#define IAVF_SET_IQDROPS(vsi, count) (vsi)->ifp->if_iqdrops = (count)
#define IAVF_SET_OQDROPS(vsi, odrops) (vsi)->ifp->if_snd.ifq_drops = (odrops)
#define IAVF_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
/* For stats sysctl naming */
#define IAVF_QUEUE_NAME_LEN 32
#define IAVF_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0)
#define IAVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
#define IAVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
#define IAVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
#define IAVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
#define IAVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
#define IAVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
#define IAVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
#define IAVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
#define IAVF_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
#define IAVF_FLAG_AQ_GET_STATS (u32)(1 << 10)
#define IAVF_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11)
#define IAVF_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12)
#define IAVF_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13)
#define IAVF_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14)
#define IAVF_CAP_ADV_LINK_SPEED(_sc) \
((_sc)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
#define IAVF_NRXQS(_vsi) ((_vsi)->num_rx_queues)
#define IAVF_NTXQS(_vsi) ((_vsi)->num_tx_queues)
/**
* printf %b flag args
*/
#define IAVF_FLAGS \
"\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \
"\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
"\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
"\12CONFIGURE_PROMISC\13GET_STATS\14CONFIG_RSS_KEY" \
"\15SET_RSS_HENA\16GET_RSS_HENA_CAPS\17CONFIG_RSS_LUT"
/**
* printf %b flag args for offloads from virtchnl.h
*/
#define IAVF_PRINTF_VF_OFFLOAD_FLAGS \
"\20\1L2" \
"\2IWARP" \
"\3FCOE" \
"\4RSS_AQ" \
"\5RSS_REG" \
"\6WB_ON_ITR" \
"\7REQ_QUEUES" \
"\10ADV_LINK_SPEED" \
"\21VLAN" \
"\22RX_POLLING" \
"\23RSS_PCTYPE_V2" \
"\24RSS_PF" \
"\25ENCAP" \
"\26ENCAP_CSUM" \
"\27RX_ENCAP_CSUM" \
"\30ADQ"
/**
* @enum iavf_ext_link_speed
* @brief Extended link speed enumeration
*
* Enumeration of possible link speeds that the device could be operating in.
* Contains an extended list compared to the virtchnl_link_speed, including
* additional higher speeds such as 50GB, and 100GB.
*
* The enumeration is used to convert between the old virtchnl_link_speed, the
* newer advanced speed reporting value specified in Mb/s, and the ifmedia
* link speeds reported to the operating system.
*/
enum iavf_ext_link_speed {
IAVF_EXT_LINK_SPEED_UNKNOWN,
IAVF_EXT_LINK_SPEED_10MB,
IAVF_EXT_LINK_SPEED_100MB,
IAVF_EXT_LINK_SPEED_1000MB,
IAVF_EXT_LINK_SPEED_2500MB,
IAVF_EXT_LINK_SPEED_5GB,
IAVF_EXT_LINK_SPEED_10GB,
IAVF_EXT_LINK_SPEED_20GB,
IAVF_EXT_LINK_SPEED_25GB,
IAVF_EXT_LINK_SPEED_40GB,
IAVF_EXT_LINK_SPEED_50GB,
IAVF_EXT_LINK_SPEED_100GB,
};
/**
* @struct iavf_sysctl_info
* @brief sysctl statistic info
*
* Structure describing a single statistics sysctl, used for reporting
* specific hardware and software statistics via the sysctl interface.
*/
struct iavf_sysctl_info {
u64 *stat;
char *name;
char *description;
};
/* Forward struct declarations */
struct iavf_sc;
struct iavf_vsi;
/**
* @enum iavf_state
* @brief Driver state flags
*
* Used to indicate the status of various driver events. Intended to be
* modified only using atomic operations, so that we can use it even in places
* which aren't locked.
*/
enum iavf_state {
IAVF_STATE_INITIALIZED,
IAVF_STATE_RESET_REQUIRED,
IAVF_STATE_RESET_PENDING,
IAVF_STATE_RUNNING,
/* This entry must be last */
IAVF_STATE_LAST,
};
/* Functions for setting and checking driver state. Note the functions take
* bit positions, not bitmasks. The atomic_testandset_32 and
* atomic_testandclear_32 operations require bit positions, while the
* atomic_set_32 and atomic_clear_32 require bitmasks. This can easily lead to
* programming error, so we provide wrapper functions to avoid this.
*/
/**
* iavf_set_state - Set the specified state
* @s: the state bitmap
* @bit: the state to set
*
* Atomically update the state bitmap with the specified bit set.
*/
static inline void
iavf_set_state(volatile u32 *s, enum iavf_state bit)
{
/* atomic_set_32 expects a bitmask */
atomic_set_32(s, BIT(bit));
}
/**
* iavf_clear_state - Clear the specified state
* @s: the state bitmap
* @bit: the state to clear
*
* Atomically update the state bitmap with the specified bit cleared.
*/
static inline void
iavf_clear_state(volatile u32 *s, enum iavf_state bit)
{
/* atomic_clear_32 expects a bitmask */
atomic_clear_32(s, BIT(bit));
}
/**
* iavf_testandset_state - Test and set the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* Atomically update the state bitmap, setting the specified bit.
*
* @returns the previous value of the bit.
*/
static inline u32
iavf_testandset_state(volatile u32 *s, enum iavf_state bit)
{
/* atomic_testandset_32 expects a bit position */
return atomic_testandset_32(s, bit);
}
/**
* iavf_testandclear_state - Test and clear the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* Atomically update the state bitmap, clearing the specified bit.
*
* @returns the previous value of the bit.
*/
static inline u32
iavf_testandclear_state(volatile u32 *s, enum iavf_state bit)
{
/* atomic_testandclear_32 expects a bit position */
return atomic_testandclear_32(s, bit);
}
/**
* iavf_test_state - Test the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* @returns true if the state is set, false otherwise.
*
* @remark Use this only if the flow does not need to update the state. If you
* must update the state as well, prefer iavf_testandset_state or
* iavf_testandclear_state.
*/
static inline u32
iavf_test_state(volatile u32 *s, enum iavf_state bit)
{
return (*s & BIT(bit)) ? true : false;
}
/**
* cmp_etheraddr - Compare two ethernet addresses
* @ea1: first ethernet address
* @ea2: second ethernet address
*
* Compares two ethernet addresses.
*
* @returns true if the addresses are equal, false otherwise.
*/
static inline bool
cmp_etheraddr(const u8 *ea1, const u8 *ea2)
{
bool cmp = FALSE;
if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) &&
(ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) &&
(ea1[4] == ea2[4]) && (ea1[5] == ea2[5]))
cmp = TRUE;
return (cmp);
}
int iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
int iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
void iavf_update_link_status(struct iavf_sc *);
bool iavf_driver_is_detaching(struct iavf_sc *sc);
void iavf_msec_pause(int msecs);
void iavf_get_default_rss_key(u32 *key);
int iavf_allocate_pci_resources_common(struct iavf_sc *sc);
int iavf_reset_complete(struct iavf_hw *hw);
int iavf_setup_vc(struct iavf_sc *sc);
int iavf_reset(struct iavf_sc *sc);
void iavf_enable_adminq_irq(struct iavf_hw *hw);
void iavf_disable_adminq_irq(struct iavf_hw *hw);
int iavf_vf_config(struct iavf_sc *sc);
void iavf_print_device_info(struct iavf_sc *sc);
int iavf_get_vsi_res_from_vf_res(struct iavf_sc *sc);
void iavf_set_mac_addresses(struct iavf_sc *sc);
void iavf_init_filters(struct iavf_sc *sc);
void iavf_free_filters(struct iavf_sc *sc);
void iavf_add_device_sysctls_common(struct iavf_sc *sc);
void iavf_configure_tx_itr(struct iavf_sc *sc);
void iavf_configure_rx_itr(struct iavf_sc *sc);
struct sysctl_oid_list *
iavf_create_debug_sysctl_tree(struct iavf_sc *sc);
void iavf_add_debug_sysctls_common(struct iavf_sc *sc,
struct sysctl_oid_list *debug_list);
void iavf_add_vsi_sysctls(device_t dev, struct iavf_vsi *vsi,
struct sysctl_ctx_list *ctx, const char *sysctl_name);
void iavf_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct iavf_eth_stats *eth_stats);
void iavf_media_status_common(struct iavf_sc *sc,
struct ifmediareq *ifmr);
int iavf_media_change_common(struct ifnet *ifp);
void iavf_set_initial_baudrate(struct ifnet *ifp);
u64 iavf_max_vc_speed_to_value(u8 link_speeds);
void iavf_config_rss_reg(struct iavf_sc *sc);
void iavf_config_rss_pf(struct iavf_sc *sc);
void iavf_config_rss(struct iavf_sc *sc);
int iavf_config_promisc(struct iavf_sc *sc, int flags);
void iavf_init_multi(struct iavf_sc *sc);
void iavf_multi_set(struct iavf_sc *sc);
int iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags);
struct iavf_mac_filter *
iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr);
struct iavf_mac_filter *
iavf_get_mac_filter(struct iavf_sc *sc);
u64 iavf_baudrate_from_link_speed(struct iavf_sc *sc);
void iavf_add_vlan_filter(struct iavf_sc *sc, u16 vtag);
int iavf_mark_del_vlan_filter(struct iavf_sc *sc, u16 vtag);
void iavf_update_msix_devinfo(device_t dev);
void iavf_disable_queues_with_retries(struct iavf_sc *);
int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
int iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
int iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
int iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
#endif /* _IAVF_LIB_H_ */

48
sys/dev/iavf/iavf_opts.h Normal file
View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_opts.h
* @brief header including the kernel option files
*
* Contains includes for the opt_*.h header files which define macros
* indicating whether certain kernel functionality is enabled based on kernel
* configuration.
*/
#ifndef _IAVF_OPTS_H_
#define _IAVF_OPTS_H_
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
#endif

405
sys/dev/iavf/iavf_osdep.c Normal file
View File

@ -0,0 +1,405 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_osdep.c
* @brief OS compatibility layer
*
* Contains definitions for various functions used to provide an OS
* independent layer for sharing code between drivers on different operating
* systems.
*/
#include <machine/stdarg.h>
#include "iavf_iflib.h"
/********************************************************************
* Manage DMA'able memory.
*******************************************************************/
/**
* iavf_dmamap_cb - DMA mapping callback function
* @arg: pointer to return the segment address
* @segs: the segments array
* @nseg: number of segments in the array
* @error: error code
*
* Callback used by the bus DMA code to obtain the segment address.
*/
static void
iavf_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg __unused,
int error)
{
if (error)
return;
*(bus_addr_t *) arg = segs->ds_addr;
return;
}
/**
* iavf_allocate_virt_mem - Allocate virtual memory
* @hw: hardware structure
* @mem: structure describing the memory allocation
* @size: size of the allocation
*
* OS compatibility function to allocate virtual memory.
*
* @returns zero on success, or a status code on failure.
*/
enum iavf_status
iavf_allocate_virt_mem(struct iavf_hw *hw __unused, struct iavf_virt_mem *mem,
u32 size)
{
mem->va = malloc(size, M_IAVF, M_NOWAIT | M_ZERO);
return(mem->va == NULL);
}
/**
* iavf_free_virt_mem - Free virtual memory
* @hw: hardware structure
* @mem: structure describing the memory to free
*
* OS compatibility function to free virtual memory
*
* @returns zero.
*/
enum iavf_status
iavf_free_virt_mem(struct iavf_hw *hw __unused, struct iavf_virt_mem *mem)
{
free(mem->va, M_IAVF);
mem->va = NULL;
return(0);
}
/**
* iavf_allocate_dma_mem - Allocate DMA memory
* @hw: hardware structure
* @mem: structure describing the memory allocation
* @type: unused type parameter specifying the type of allocation
* @size: size of the allocation
* @alignment: alignment requirements for the allocation
*
* Allocates DMA memory by using bus_dma_tag_create to create a DMA tag, and
* them bus_dmamem_alloc to allocate the associated memory.
*
* @returns zero on success, or a status code on failure.
*/
enum iavf_status
iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,
enum iavf_memory_type type __unused, u64 size, u32 alignment)
{
device_t dev = ((struct iavf_osdep *)hw->back)->dev;
int err;
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
alignment, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
size, /* maxsize */
1, /* nsegments */
size, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&mem->tag);
if (err != 0) {
device_printf(dev,
"iavf_allocate_dma: bus_dma_tag_create failed, "
"error %u\n", err);
goto fail_0;
}
err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
if (err != 0) {
device_printf(dev,
"iavf_allocate_dma: bus_dmamem_alloc failed, "
"error %u\n", err);
goto fail_1;
}
err = bus_dmamap_load(mem->tag, mem->map, mem->va,
size,
iavf_dmamap_cb,
&mem->pa,
BUS_DMA_NOWAIT);
if (err != 0) {
device_printf(dev,
"iavf_allocate_dma: bus_dmamap_load failed, "
"error %u\n", err);
goto fail_2;
}
mem->nseg = 1;
mem->size = size;
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
return (0);
fail_2:
bus_dmamem_free(mem->tag, mem->va, mem->map);
fail_1:
bus_dma_tag_destroy(mem->tag);
fail_0:
mem->map = NULL;
mem->tag = NULL;
return (err);
}
/**
* iavf_free_dma_mem - Free DMA memory allocation
* @hw: hardware structure
* @mem: pointer to memory structure previously allocated
*
* Releases DMA memory that was previously allocated by iavf_allocate_dma_mem.
*
* @returns zero.
*/
enum iavf_status
iavf_free_dma_mem(struct iavf_hw *hw __unused, struct iavf_dma_mem *mem)
{
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(mem->tag, mem->map);
bus_dmamem_free(mem->tag, mem->va, mem->map);
bus_dma_tag_destroy(mem->tag);
return (0);
}
/**
* iavf_init_spinlock - Initialize a spinlock
* @lock: OS compatibility lock structure
*
* Use the mutex layer to initialize a spin lock that can be used via the OS
* compatibility layer accessors.
*
* @remark we pass MTX_DUPOK because the mutex name will not be unique. An
* alternative would be to somehow generate a name, such as by passing in the
* __file__ and __line__ values from a macro.
*/
void
iavf_init_spinlock(struct iavf_spinlock *lock)
{
mtx_init(&lock->mutex, "mutex",
"iavf spinlock", MTX_DEF | MTX_DUPOK);
}
/**
* iavf_acquire_spinlock - Acquire a spin lock
* @lock: OS compatibility lock structure
*
* Acquire a spin lock using mtx_lock.
*/
void
iavf_acquire_spinlock(struct iavf_spinlock *lock)
{
mtx_lock(&lock->mutex);
}
/**
* iavf_release_spinlock - Release a spin lock
* @lock: OS compatibility lock structure
*
* Release a spin lock using mtx_unlock.
*/
void
iavf_release_spinlock(struct iavf_spinlock *lock)
{
mtx_unlock(&lock->mutex);
}
/**
* iavf_destroy_spinlock - Destroy a spin lock
* @lock: OS compatibility lock structure
*
* Destroy (deinitialize) a spin lock by calling mtx_destroy.
*
* @remark we only destroy the lock if it was initialized. This means that
* calling iavf_destroy_spinlock on a lock that was already destroyed or was
* never initialized is not considered a bug.
*/
void
iavf_destroy_spinlock(struct iavf_spinlock *lock)
{
if (mtx_initialized(&lock->mutex))
mtx_destroy(&lock->mutex);
}
/**
* iavf_debug_shared - Log a debug message if enabled
* @hw: device hardware structure
* @mask: bit indicating the type of the message
* @fmt: printf format string
*
* Checks if the mask is enabled in the hw->debug_mask. If so, prints
* a message to the console using vprintf().
*/
void
iavf_debug_shared(struct iavf_hw *hw, uint64_t mask, char *fmt, ...)
{
va_list args;
device_t dev;
if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
return;
dev = ((struct iavf_osdep *)hw->back)->dev;
/* Re-implement device_printf() */
device_print_prettyname(dev);
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
/**
* iavf_read_pci_cfg - Read a PCI config register
* @hw: device hardware structure
* @reg: the PCI register to read
*
* Calls pci_read_config to read the given PCI register from the PCI config
* space.
*
* @returns the value of the register.
*/
u16
iavf_read_pci_cfg(struct iavf_hw *hw, u32 reg)
{
u16 value;
value = pci_read_config(((struct iavf_osdep *)hw->back)->dev,
reg, 2);
return (value);
}
/**
* iavf_write_pci_cfg - Write a PCI config register
* @hw: device hardware structure
* @reg: the PCI register to write
* @value: the value to write
*
* Calls pci_write_config to write to a given PCI register in the PCI config
* space.
*/
void
iavf_write_pci_cfg(struct iavf_hw *hw, u32 reg, u16 value)
{
pci_write_config(((struct iavf_osdep *)hw->back)->dev,
reg, value, 2);
return;
}
/**
* iavf_rd32 - Read a 32bit hardware register value
* @hw: the private hardware structure
* @reg: register address to read
*
* Read the specified 32bit register value from BAR0 and return its contents.
*
* @returns the value of the 32bit register.
*/
inline uint32_t
iavf_rd32(struct iavf_hw *hw, uint32_t reg)
{
struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
KASSERT(reg < osdep->mem_bus_space_size,
("iavf: register offset %#jx too large (max is %#jx)",
(uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
return (bus_space_read_4(osdep->mem_bus_space_tag,
osdep->mem_bus_space_handle, reg));
}
/**
* iavf_wr32 - Write a 32bit hardware register
* @hw: the private hardware structure
* @reg: the register address to write to
* @val: the 32bit value to write
*
* Write the specified 32bit value to a register address in BAR0.
*/
inline void
iavf_wr32(struct iavf_hw *hw, uint32_t reg, uint32_t val)
{
struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
KASSERT(reg < osdep->mem_bus_space_size,
("iavf: register offset %#jx too large (max is %#jx)",
(uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
bus_space_write_4(osdep->mem_bus_space_tag,
osdep->mem_bus_space_handle, reg, val);
}
/**
* iavf_flush - Flush register writes
* @hw: private hardware structure
*
* Forces the completion of outstanding PCI register writes by reading from
* a specific hardware register.
*/
inline void
iavf_flush(struct iavf_hw *hw)
{
struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
rd32(hw, osdep->flush_reg);
}
/**
* iavf_debug_core - Debug printf for core driver code
* @dev: the device_t to log under
* @enabled_mask: the mask of enabled messages
* @mask: the mask of the requested message to print
* @fmt: printf format string
*
* If enabled_mask has the bit from the mask set, print a message to the
* console using the specified format. This is used to conditionally enable
* log messages at run time by toggling the enabled_mask in the device
* structure.
*/
void
iavf_debug_core(device_t dev, u32 enabled_mask, u32 mask, char *fmt, ...)
{
va_list args;
if (!(mask & enabled_mask))
return;
/* Re-implement device_printf() */
device_print_prettyname(dev);
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}

250
sys/dev/iavf/iavf_osdep.h Normal file
View File

@ -0,0 +1,250 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_osdep.h
* @brief OS compatibility layer definitions
*
* Contains macros and definitions used to implement an OS compatibility layer
* used by some of the hardware files.
*/
#ifndef _IAVF_OSDEP_H_
#define _IAVF_OSDEP_H_
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/clock.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include "iavf_status.h"
#include "iavf_debug.h"
#define iavf_usec_delay(x) DELAY(x)
#define iavf_msec_delay(x) DELAY(1000 * (x))
#define DBG 0
#define DEBUGFUNC(F) DEBUGOUT(F);
#if DBG
#define DEBUGOUT(S) printf(S "\n")
#define DEBUGOUT1(S,A) printf(S "\n",A)
#define DEBUGOUT2(S,A,B) printf(S "\n",A,B)
#define DEBUGOUT3(S,A,B,C) printf(S "\n",A,B,C)
#define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G)
#else
#define DEBUGOUT(S)
#define DEBUGOUT1(S,A)
#define DEBUGOUT2(S,A,B)
#define DEBUGOUT3(S,A,B,C)
#define DEBUGOUT6(S,A,B,C,D,E,F)
#define DEBUGOUT7(S,A,B,C,D,E,F,G)
#endif
#define UNREFERENCED_PARAMETER(_p) _p = _p
#define UNREFERENCED_1PARAMETER(_p) do { \
UNREFERENCED_PARAMETER(_p); \
} while (0)
#define UNREFERENCED_2PARAMETER(_p, _q) do { \
UNREFERENCED_PARAMETER(_p); \
UNREFERENCED_PARAMETER(_q); \
} while (0)
#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \
UNREFERENCED_PARAMETER(_p); \
UNREFERENCED_PARAMETER(_q); \
UNREFERENCED_PARAMETER(_r); \
} while (0)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \
UNREFERENCED_PARAMETER(_p); \
UNREFERENCED_PARAMETER(_q); \
UNREFERENCED_PARAMETER(_r); \
UNREFERENCED_PARAMETER(_s); \
} while (0)
#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) do { \
UNREFERENCED_PARAMETER(_p); \
UNREFERENCED_PARAMETER(_q); \
UNREFERENCED_PARAMETER(_r); \
UNREFERENCED_PARAMETER(_s); \
UNREFERENCED_PARAMETER(_t); \
} while (0)
#define STATIC static
#define INLINE inline
#define iavf_memset(a, b, c, d) memset((a), (b), (c))
#define iavf_memcpy(a, b, c, d) memcpy((a), (b), (c))
#define CPU_TO_LE16(o) htole16(o)
#define CPU_TO_LE32(s) htole32(s)
#define CPU_TO_LE64(h) htole64(h)
#define LE16_TO_CPU(a) le16toh(a)
#define LE32_TO_CPU(c) le32toh(c)
#define LE64_TO_CPU(k) le64toh(k)
/**
* @typedef u8
* @brief compatibility typedef for uint8_t
*/
typedef uint8_t u8;
/**
* @typedef s8
* @brief compatibility typedef for int8_t
*/
typedef int8_t s8;
/**
* @typedef u16
* @brief compatibility typedef for uint16_t
*/
typedef uint16_t u16;
/**
* @typedef s16
* @brief compatibility typedef for int16_t
*/
typedef int16_t s16;
/**
* @typedef u32
* @brief compatibility typedef for uint32_t
*/
typedef uint32_t u32;
/**
* @typedef s32
* @brief compatibility typedef for int32_t
*/
typedef int32_t s32;
/**
* @typedef u64
* @brief compatibility typedef for uint64_t
*/
typedef uint64_t u64;
#define __le16 u16
#define __le32 u32
#define __le64 u64
#define __be16 u16
#define __be32 u32
#define __be64 u64
/**
* @struct iavf_spinlock
* @brief OS wrapper for a non-sleeping lock
*
* Wrapper used to provide an implementation of a non-sleeping lock.
*/
struct iavf_spinlock {
struct mtx mutex;
};
/**
* @struct iavf_osdep
* @brief Storage for data used by the osdep interface
*
* Contains data used by the osdep layer. Accessed via the hw->back pointer.
*/
struct iavf_osdep {
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;
uint32_t flush_reg;
int i2c_intfc_num;
device_t dev;
};
/**
* @struct iavf_dma_mem
* @brief DMA memory map
*
* Structure representing a DMA memory mapping.
*/
struct iavf_dma_mem {
void *va;
u64 pa;
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
bus_size_t size;
int nseg;
int flags;
};
/**
* @struct iavf_virt_mem
* @brief Virtual memory
*
* Structure representing some virtual memory.
*/
struct iavf_virt_mem {
void *va;
u32 size;
};
struct iavf_hw; /* forward decl */
u16 iavf_read_pci_cfg(struct iavf_hw *, u32);
void iavf_write_pci_cfg(struct iavf_hw *, u32, u16);
/*
** iavf_debug - OS dependent version of shared code debug printing
*/
#define iavf_debug(h, m, s, ...) iavf_debug_shared(h, m, s, ##__VA_ARGS__)
void iavf_debug_shared(struct iavf_hw *hw, uint64_t mask,
char *fmt_str, ...) __printflike(3, 4);
/*
** This hardware supports either 16 or 32 byte rx descriptors;
** the driver only uses the 32 byte kind.
*/
#define iavf_rx_desc iavf_32byte_rx_desc
uint32_t iavf_rd32(struct iavf_hw *hw, uint32_t reg);
void iavf_wr32(struct iavf_hw *hw, uint32_t reg, uint32_t val);
void iavf_flush(struct iavf_hw *hw);
#define rd32(hw, reg) iavf_rd32(hw, reg)
#define wr32(hw, reg, val) iavf_wr32(hw, reg, val)
#endif /* _IAVF_OSDEP_H_ */

View File

@ -0,0 +1,122 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _IAVF_PROTOTYPE_H_
#define _IAVF_PROTOTYPE_H_
#include "iavf_type.h"
#include "iavf_alloc.h"
#include "virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
* mostly because they are needed even before the init
* has happened and will assist in the early SW and FW
* setup.
*/
/* adminq functions */
enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
enum iavf_status iavf_init_asq(struct iavf_hw *hw);
enum iavf_status iavf_init_arq(struct iavf_hw *hw);
enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw);
enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw);
enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw);
enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw);
u16 iavf_clean_asq(struct iavf_hw *hw);
void iavf_free_adminq_asq(struct iavf_hw *hw);
void iavf_free_adminq_arq(struct iavf_hw *hw);
enum iavf_status iavf_validate_mac_addr(u8 *mac_addr);
void iavf_adminq_init_ring_data(struct iavf_hw *hw);
enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
struct iavf_arq_event_info *e,
u16 *events_pending);
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
struct iavf_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details);
bool iavf_asq_done(struct iavf_hw *hw);
/* debug function for adminq */
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void iavf_idle_aq(struct iavf_hw *hw);
bool iavf_check_asq_alive(struct iavf_hw *hw);
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
u16 seid,
struct iavf_aqc_get_set_rss_key_data *key);
enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
u16 seid,
struct iavf_aqc_get_set_rss_key_data *key);
const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
STATIC INLINE struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return iavf_ptype_lookup[ptype];
}
/* prototype for functions used for SW spinlocks */
void iavf_init_spinlock(struct iavf_spinlock *sp);
void iavf_acquire_spinlock(struct iavf_spinlock *sp);
void iavf_release_spinlock(struct iavf_spinlock *sp);
void iavf_destroy_spinlock(struct iavf_spinlock *sp);
void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg);
enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
enum virtchnl_ops v_opcode,
enum iavf_status v_retval,
u8 *msg, u16 msglen,
struct iavf_asq_cmd_details *cmd_details);
enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id,
u8 table_id, u32 start_index, u16 buff_size,
void *buff, u16 *ret_buff_size,
u8 *ret_next_table, u32 *ret_next_index,
struct iavf_asq_cmd_details *cmd_details);
enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
struct iavf_asq_cmd_details *cmd_details);
#endif /* _IAVF_PROTOTYPE_H_ */

View File

@ -0,0 +1,121 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _IAVF_REGISTER_H_
#define _IAVF_REGISTER_H_
#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */
#define IAVF_VF_ARQH1_ARQH_SHIFT 0
#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT)
#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28
#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQVFE_SHIFT)
#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29
#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT)
#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT)
#define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31
#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT)
#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */
#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28
#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQVFE_SHIFT)
#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29
#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT)
#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT)
#define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31
#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT)
#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
#define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0
#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT)
#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0
#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
#define IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
#define IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
#define IAVF_VFINT_DYN_CTL01_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)
#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
#define IAVF_VFINT_DYN_CTLN1_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31
#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */
#define IAVF_VFINT_ICR01_QUEUE_0_SHIFT 1
#define IAVF_VFINT_ICR01_QUEUE_0_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_QUEUE_0_SHIFT)
#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
#define IAVF_VFINT_ICR01_ADMINQ_SHIFT 30
#define IAVF_VFINT_ICR01_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_ADMINQ_SHIFT)
#define IAVF_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
#define IAVF_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
#define IAVF_VFQF_HKEY_MAX_INDEX 12
#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define IAVF_VFQF_HLUT_MAX_INDEX 15
#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
#endif /* _IAVF_REGISTER_H_ */

107
sys/dev/iavf/iavf_status.h Normal file
View File

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _IAVF_STATUS_H_
#define _IAVF_STATUS_H_
/* Error Codes */
enum iavf_status {
IAVF_SUCCESS = 0,
IAVF_ERR_NVM = -1,
IAVF_ERR_NVM_CHECKSUM = -2,
IAVF_ERR_PHY = -3,
IAVF_ERR_CONFIG = -4,
IAVF_ERR_PARAM = -5,
IAVF_ERR_MAC_TYPE = -6,
IAVF_ERR_UNKNOWN_PHY = -7,
IAVF_ERR_LINK_SETUP = -8,
IAVF_ERR_ADAPTER_STOPPED = -9,
IAVF_ERR_INVALID_MAC_ADDR = -10,
IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
IAVF_ERR_INVALID_LINK_SETTINGS = -13,
IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
IAVF_ERR_RESET_FAILED = -15,
IAVF_ERR_SWFW_SYNC = -16,
IAVF_ERR_NO_AVAILABLE_VSI = -17,
IAVF_ERR_NO_MEMORY = -18,
IAVF_ERR_BAD_PTR = -19,
IAVF_ERR_RING_FULL = -20,
IAVF_ERR_INVALID_PD_ID = -21,
IAVF_ERR_INVALID_QP_ID = -22,
IAVF_ERR_INVALID_CQ_ID = -23,
IAVF_ERR_INVALID_CEQ_ID = -24,
IAVF_ERR_INVALID_AEQ_ID = -25,
IAVF_ERR_INVALID_SIZE = -26,
IAVF_ERR_INVALID_ARP_INDEX = -27,
IAVF_ERR_INVALID_FPM_FUNC_ID = -28,
IAVF_ERR_QP_INVALID_MSG_SIZE = -29,
IAVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
IAVF_ERR_INVALID_FRAG_COUNT = -31,
IAVF_ERR_QUEUE_EMPTY = -32,
IAVF_ERR_INVALID_ALIGNMENT = -33,
IAVF_ERR_FLUSHED_QUEUE = -34,
IAVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
IAVF_ERR_INVALID_IMM_DATA_SIZE = -36,
IAVF_ERR_TIMEOUT = -37,
IAVF_ERR_OPCODE_MISMATCH = -38,
IAVF_ERR_CQP_COMPL_ERROR = -39,
IAVF_ERR_INVALID_VF_ID = -40,
IAVF_ERR_INVALID_HMCFN_ID = -41,
IAVF_ERR_BACKING_PAGE_ERROR = -42,
IAVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
IAVF_ERR_INVALID_PBLE_INDEX = -44,
IAVF_ERR_INVALID_SD_INDEX = -45,
IAVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
IAVF_ERR_INVALID_SD_TYPE = -47,
IAVF_ERR_MEMCPY_FAILED = -48,
IAVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
IAVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
IAVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
IAVF_ERR_SRQ_ENABLED = -52,
IAVF_ERR_ADMIN_QUEUE_ERROR = -53,
IAVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
IAVF_ERR_BUF_TOO_SHORT = -55,
IAVF_ERR_ADMIN_QUEUE_FULL = -56,
IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
IAVF_ERR_BAD_IWARP_CQE = -58,
IAVF_ERR_NVM_BLANK_MODE = -59,
IAVF_ERR_NOT_IMPLEMENTED = -60,
IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
IAVF_ERR_DIAG_TEST_FAILED = -62,
IAVF_ERR_NOT_READY = -63,
IAVF_NOT_SUPPORTED = -64,
IAVF_ERR_FIRMWARE_API_VERSION = -65,
IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _IAVF_STATUS_H_ */

View File

@ -0,0 +1,158 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_sysctls_common.h
* @brief Sysctls common to the legacy and iflib drivers
*
* Contains global sysctl definitions which are shared between the legacy and
* iflib driver implementations.
*/
#ifndef _IAVF_SYSCTLS_COMMON_H_
#define _IAVF_SYSCTLS_COMMON_H_
#include <sys/sysctl.h>
/* Root node for tunables */
static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD, 0,
"IAVF driver parameters");
/**
* @var iavf_enable_head_writeback
* @brief Sysctl to control Tx descriptor completion method
*
* Global sysctl value indicating whether to enable the head writeback method
* of Tx descriptor completion notification.
*
* @remark Head writeback has been deprecated and will only work on 700-series
* virtual functions only.
*/
static int iavf_enable_head_writeback = 0;
SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
&iavf_enable_head_writeback, 0,
"For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors. For 700 series VFs only.");
/**
* @var iavf_core_debug_mask
* @brief Debug mask for driver messages
*
* Global sysctl value used to control what set of debug messages are printed.
* Used by messages in core driver code.
*/
static int iavf_core_debug_mask = 0;
SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
&iavf_core_debug_mask, 0,
"Display debug statements that are printed in non-shared code");
/**
* @var iavf_shared_debug_mask
* @brief Debug mask for shared code messages
*
* Global sysctl value used to control what set of debug messages are printed.
* Used by messages in shared device logic code.
*/
static int iavf_shared_debug_mask = 0;
SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
&iavf_shared_debug_mask, 0,
"Display debug statements that are printed in shared code");
/**
* @var iavf_rx_itr
* @brief Rx interrupt throttling rate
*
* Controls the default interrupt throttling rate for receive interrupts.
*/
int iavf_rx_itr = IAVF_ITR_8K;
SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
&iavf_rx_itr, 0, "RX Interrupt Rate");
/**
* @var iavf_tx_itr
* @brief Tx interrupt throttling rate
*
* Controls the default interrupt throttling rate for transmit interrupts.
*/
int iavf_tx_itr = IAVF_ITR_4K;
SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
&iavf_tx_itr, 0, "TX Interrupt Rate");
/**
* iavf_save_tunables - Sanity check and save off tunable values
* @sc: device softc
*
* @pre "iavf_drv_info.h" is included before this file
* @pre dev pointer in sc is valid
*/
static void
iavf_save_tunables(struct iavf_sc *sc)
{
device_t dev = sc->dev;
u16 pci_device_id = pci_get_device(dev);
/* Save tunable information */
sc->dbg_mask = (enum iavf_dbg_mask)iavf_core_debug_mask;
sc->hw.debug_mask = iavf_shared_debug_mask;
if (pci_device_id == IAVF_DEV_ID_VF ||
pci_device_id == IAVF_DEV_ID_X722_VF)
sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
else if (iavf_enable_head_writeback) {
device_printf(dev, "Head writeback can only be enabled on 700 series Virtual Functions\n");
device_printf(dev, "Using descriptor writeback instead...\n");
sc->vsi.enable_head_writeback = 0;
}
if (iavf_tx_itr < 0 || iavf_tx_itr > IAVF_MAX_ITR) {
device_printf(dev, "Invalid tx_itr value of %d set!\n",
iavf_tx_itr);
device_printf(dev, "tx_itr must be between %d and %d, "
"inclusive\n",
0, IAVF_MAX_ITR);
device_printf(dev, "Using default value of %d instead\n",
IAVF_ITR_4K);
sc->tx_itr = IAVF_ITR_4K;
} else
sc->tx_itr = iavf_tx_itr;
if (iavf_rx_itr < 0 || iavf_rx_itr > IAVF_MAX_ITR) {
device_printf(dev, "Invalid rx_itr value of %d set!\n",
iavf_rx_itr);
device_printf(dev, "rx_itr must be between %d and %d, "
"inclusive\n",
0, IAVF_MAX_ITR);
device_printf(dev, "Using default value of %d instead\n",
IAVF_ITR_8K);
sc->rx_itr = IAVF_ITR_8K;
} else
sc->rx_itr = iavf_rx_itr;
}
#endif /* _IAVF_SYSCTLS_COMMON_H_ */

View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_sysctls_iflib.h
* @brief global sysctls for the iflib driver
*
* Contains sysctl definitions which are used by the iflib driver
* implementation. Sysctls which are unique to the iflib driver should be
* declared in this file.
*/
#ifndef _IAVF_SYSCTLS_IFLIB_H_
#define _IAVF_SYSCTLS_IFLIB_H_
#include "iavf_sysctls_common.h"
#endif /* _IAVF_SYSCTLS_IFLIB_H_ */

View File

@ -0,0 +1,93 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_txrx_common.h
* @brief Tx/Rx hotpath functions common to legacy and iflib
*
* Contains implementations for functions used in the hotpath for both the
* legacy and iflib driver implementations.
*/
#ifndef _IAVF_TXRX_COMMON_H_
#define _IAVF_TXRX_COMMON_H_
#include "iavf_iflib.h"
static inline int iavf_ptype_to_hash(u8 ptype);
/**
* iavf_ptype_to_hash - parse the packet type
* @ptype: packet type
*
* Determine the appropriate hash for a given packet type
*
* @returns the M_HASHTYPE_* value for the given packet type.
*/
static inline int
iavf_ptype_to_hash(u8 ptype)
{
struct iavf_rx_ptype_decoded decoded;
decoded = decode_rx_desc_ptype(ptype);
if (!decoded.known)
return M_HASHTYPE_OPAQUE;
if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_L2)
return M_HASHTYPE_OPAQUE;
/* Note: anything that gets to this point is IP */
if (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6) {
switch (decoded.inner_prot) {
case IAVF_RX_PTYPE_INNER_PROT_TCP:
return M_HASHTYPE_RSS_TCP_IPV6;
case IAVF_RX_PTYPE_INNER_PROT_UDP:
return M_HASHTYPE_RSS_UDP_IPV6;
default:
return M_HASHTYPE_RSS_IPV6;
}
}
if (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4) {
switch (decoded.inner_prot) {
case IAVF_RX_PTYPE_INNER_PROT_TCP:
return M_HASHTYPE_RSS_TCP_IPV4;
case IAVF_RX_PTYPE_INNER_PROT_UDP:
return M_HASHTYPE_RSS_UDP_IPV4;
default:
return M_HASHTYPE_RSS_IPV4;
}
}
/* We should never get here! */
return M_HASHTYPE_OPAQUE;
}
#endif /* _IAVF_TXRX_COMMON_H_ */

View File

@ -0,0 +1,789 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_txrx_iflib.c
* @brief Tx/Rx hotpath implementation for the iflib driver
*
* Contains functions used to implement the Tx and Rx hotpaths of the iflib
* driver implementation.
*/
#include "iavf_iflib.h"
#include "iavf_txrx_common.h"
#ifdef RSS
#include <net/rss_config.h>
#endif
/* Local Prototypes */
static void iavf_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype);
static int iavf_isc_txd_encap(void *arg, if_pkt_info_t pi);
static void iavf_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
static int iavf_isc_txd_credits_update_hwb(void *arg, uint16_t txqid, bool clear);
static int iavf_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear);
static void iavf_isc_rxd_refill(void *arg, if_rxd_update_t iru);
static void iavf_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
qidx_t pidx);
static int iavf_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
qidx_t budget);
static int iavf_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
/**
* @var iavf_txrx_hwb
* @brief iflib Tx/Rx operations for head write back
*
* iflib ops structure for when operating the device in head write back mode.
*/
struct if_txrx iavf_txrx_hwb = {
iavf_isc_txd_encap,
iavf_isc_txd_flush,
iavf_isc_txd_credits_update_hwb,
iavf_isc_rxd_available,
iavf_isc_rxd_pkt_get,
iavf_isc_rxd_refill,
iavf_isc_rxd_flush,
NULL
};
/**
* @var iavf_txrx_dwb
* @brief iflib Tx/Rx operations for descriptor write back
*
* iflib ops structure for when operating the device in descriptor write back
* mode.
*/
struct if_txrx iavf_txrx_dwb = {
iavf_isc_txd_encap,
iavf_isc_txd_flush,
iavf_isc_txd_credits_update_dwb,
iavf_isc_rxd_available,
iavf_isc_rxd_pkt_get,
iavf_isc_rxd_refill,
iavf_isc_rxd_flush,
NULL
};
/**
* iavf_is_tx_desc_done - Check if a Tx descriptor is ready
* @txr: the Tx ring to check in
* @idx: ring index to check
*
* @returns true if the descriptor has been written back by hardware, and
* false otherwise.
*/
static bool
iavf_is_tx_desc_done(struct tx_ring *txr, int idx)
{
return (((txr->tx_base[idx].cmd_type_offset_bsz >> IAVF_TXD_QW1_DTYPE_SHIFT)
& IAVF_TXD_QW1_DTYPE_MASK) == IAVF_TX_DESC_DTYPE_DESC_DONE);
}
/**
* iavf_tso_detect_sparse - detect TSO packets with too many segments
* @segs: packet segments array
* @nsegs: number of packet segments
* @pi: packet information
*
* Hardware only transmits packets with a maximum of 8 descriptors. For TSO
* packets, hardware needs to be able to build the split packets using 8 or
* fewer descriptors. Additionally, the header must be contained within at
* most 3 descriptors.
*
* To verify this, we walk the headers to find out how many descriptors the
* headers require (usually 1). Then we ensure that, for each TSO segment, its
* data plus the headers are contained within 8 or fewer descriptors.
*
* @returns zero if the packet is valid, one otherwise.
*/
static int
iavf_tso_detect_sparse(bus_dma_segment_t *segs, int nsegs, if_pkt_info_t pi)
{
int count, curseg, i, hlen, segsz, seglen, tsolen;
if (nsegs <= IAVF_MAX_TX_SEGS-2)
return (0);
segsz = pi->ipi_tso_segsz;
curseg = count = 0;
hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
tsolen = pi->ipi_len - hlen;
i = 0;
curseg = segs[0].ds_len;
while (hlen > 0) {
count++;
if (count > IAVF_MAX_TX_SEGS - 2)
return (1);
if (curseg == 0) {
i++;
if (__predict_false(i == nsegs))
return (1);
curseg = segs[i].ds_len;
}
seglen = min(curseg, hlen);
curseg -= seglen;
hlen -= seglen;
}
while (tsolen > 0) {
segsz = pi->ipi_tso_segsz;
while (segsz > 0 && tsolen != 0) {
count++;
if (count > IAVF_MAX_TX_SEGS - 2) {
return (1);
}
if (curseg == 0) {
i++;
if (__predict_false(i == nsegs)) {
return (1);
}
curseg = segs[i].ds_len;
}
seglen = min(curseg, segsz);
segsz -= seglen;
curseg -= seglen;
tsolen -= seglen;
}
count = 0;
}
return (0);
}
/**
* iavf_tx_setup_offload - Setup Tx offload parameters
* @que: pointer to the Tx queue
* @pi: Tx packet info
* @cmd: pointer to command descriptor value
* @off: pointer to offset descriptor value
*
* Based on packet type and Tx offloads requested, sets up the command and
* offset values for a Tx descriptor to enable the requested offloads.
*/
static void
iavf_tx_setup_offload(struct iavf_tx_queue *que __unused,
if_pkt_info_t pi, u32 *cmd, u32 *off)
{
switch (pi->ipi_etype) {
#ifdef INET
case ETHERTYPE_IP:
if (pi->ipi_csum_flags & IAVF_CSUM_IPV4)
*cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
else
*cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
*cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
break;
#endif
default:
break;
}
*off |= (pi->ipi_ehdrlen >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
*off |= (pi->ipi_ip_hlen >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
switch (pi->ipi_ipproto) {
case IPPROTO_TCP:
if (pi->ipi_csum_flags & IAVF_CSUM_TCP) {
*cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
*off |= (pi->ipi_tcp_hlen >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
/* Check for NO_HEAD MDD event */
MPASS(pi->ipi_tcp_hlen != 0);
}
break;
case IPPROTO_UDP:
if (pi->ipi_csum_flags & IAVF_CSUM_UDP) {
*cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
*off |= (sizeof(struct udphdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
break;
case IPPROTO_SCTP:
if (pi->ipi_csum_flags & IAVF_CSUM_SCTP) {
*cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
*off |= (sizeof(struct sctphdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
/* Fall Thru */
default:
break;
}
}
/**
* iavf_tso_setup - Setup TSO context descriptor
* @txr: the Tx ring to process
* @pi: packet info structure
*
* Enable hardware segmentation offload (TSO) for a given packet by creating
* a context descriptor with the necessary details for offloading.
*
* @returns the new ring index to use for the data descriptor.
*/
static int
iavf_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
{
if_softc_ctx_t scctx;
struct iavf_tx_context_desc *TXD;
u32 cmd, mss, type, tsolen;
int idx, total_hdr_len;
u64 type_cmd_tso_mss;
idx = pi->ipi_pidx;
TXD = (struct iavf_tx_context_desc *) &txr->tx_base[idx];
total_hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
tsolen = pi->ipi_len - total_hdr_len;
scctx = txr->que->vsi->shared;
type = IAVF_TX_DESC_DTYPE_CONTEXT;
cmd = IAVF_TX_CTX_DESC_TSO;
/*
* TSO MSS must not be less than 64; this prevents a
* BAD_LSO_MSS MDD event when the MSS is too small.
*/
if (pi->ipi_tso_segsz < IAVF_MIN_TSO_MSS) {
txr->mss_too_small++;
pi->ipi_tso_segsz = IAVF_MIN_TSO_MSS;
}
mss = pi->ipi_tso_segsz;
/* Check for BAD_LS0_MSS MDD event (mss too large) */
MPASS(mss <= IAVF_MAX_TSO_MSS);
/* Check for NO_HEAD MDD event (header lengths are 0) */
MPASS(pi->ipi_ehdrlen != 0);
MPASS(pi->ipi_ip_hlen != 0);
/* Partial check for BAD_LSO_LEN MDD event */
MPASS(tsolen != 0);
/* Partial check for WRONG_SIZE MDD event (during TSO) */
MPASS(total_hdr_len + mss <= IAVF_MAX_FRAME);
type_cmd_tso_mss = ((u64)type << IAVF_TXD_CTX_QW1_DTYPE_SHIFT) |
((u64)cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
((u64)tsolen << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
((u64)mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
TXD->tunneling_params = htole32(0);
txr->que->tso++;
return ((idx + 1) & (scctx->isc_ntxd[0]-1));
}
#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
/**
* iavf_isc_txd_encap - Encapsulate a Tx packet into descriptors
* @arg: void pointer to the VSI structure
* @pi: packet info to encapsulate
*
* This routine maps the mbufs to tx descriptors, allowing the
* TX engine to transmit the packets.
*
* @returns 0 on success, positive on failure
*/
static int
iavf_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
struct iavf_vsi *vsi = arg;
if_softc_ctx_t scctx = vsi->shared;
struct iavf_tx_queue *que = &vsi->tx_queues[pi->ipi_qsidx];
struct tx_ring *txr = &que->txr;
int nsegs = pi->ipi_nsegs;
bus_dma_segment_t *segs = pi->ipi_segs;
struct iavf_tx_desc *txd = NULL;
int i, j, mask, pidx_last;
u32 cmd, off, tx_intr;
if (__predict_false(pi->ipi_len < IAVF_MIN_FRAME)) {
que->pkt_too_small++;
return (EINVAL);
}
cmd = off = 0;
i = pi->ipi_pidx;
tx_intr = (pi->ipi_flags & IPI_TX_INTR);
/* Set up the TSO/CSUM offload */
if (pi->ipi_csum_flags & CSUM_OFFLOAD) {
/* Set up the TSO context descriptor if required */
if (pi->ipi_csum_flags & CSUM_TSO) {
/* Prevent MAX_BUFF MDD event (for TSO) */
if (iavf_tso_detect_sparse(segs, nsegs, pi))
return (EFBIG);
i = iavf_tso_setup(txr, pi);
}
iavf_tx_setup_offload(que, pi, &cmd, &off);
}
if (pi->ipi_mflags & M_VLANTAG)
cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
cmd |= IAVF_TX_DESC_CMD_ICRC;
mask = scctx->isc_ntxd[0] - 1;
/* Check for WRONG_SIZE MDD event */
MPASS(pi->ipi_len >= IAVF_MIN_FRAME);
#ifdef INVARIANTS
if (!(pi->ipi_csum_flags & CSUM_TSO))
MPASS(pi->ipi_len <= IAVF_MAX_FRAME);
#endif
for (j = 0; j < nsegs; j++) {
bus_size_t seglen;
txd = &txr->tx_base[i];
seglen = segs[j].ds_len;
/* Check for ZERO_BSIZE MDD event */
MPASS(seglen != 0);
txd->buffer_addr = htole64(segs[j].ds_addr);
txd->cmd_type_offset_bsz =
htole64(IAVF_TX_DESC_DTYPE_DATA
| ((u64)cmd << IAVF_TXD_QW1_CMD_SHIFT)
| ((u64)off << IAVF_TXD_QW1_OFFSET_SHIFT)
| ((u64)seglen << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
| ((u64)htole16(pi->ipi_vtag) << IAVF_TXD_QW1_L2TAG1_SHIFT));
txr->tx_bytes += seglen;
pidx_last = i;
i = (i+1) & mask;
}
/* Set the last descriptor for report */
txd->cmd_type_offset_bsz |=
htole64(((u64)IAVF_TXD_CMD << IAVF_TXD_QW1_CMD_SHIFT));
/* Add to report status array (if using TX interrupts) */
if (!vsi->enable_head_writeback && tx_intr) {
txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & mask;
MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
}
pi->ipi_new_pidx = i;
++txr->tx_packets;
return (0);
}
/**
* iavf_isc_txd_flush - Flush Tx ring
* @arg: void pointer to the VSI
* @txqid: the Tx queue to flush
* @pidx: the ring index to flush to
*
* Advance the Transmit Descriptor Tail (Tdt), this tells the
* hardware that this frame is available to transmit.
*/
static void
iavf_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
struct iavf_vsi *vsi = arg;
struct tx_ring *txr = &vsi->tx_queues[txqid].txr;
/* Check for ENDLESS_TX MDD event */
MPASS(pidx < vsi->shared->isc_ntxd[0]);
wr32(vsi->hw, txr->tail, pidx);
}
/**
* iavf_init_tx_ring - Initialize queue Tx ring
* @vsi: pointer to the VSI
* @que: pointer to queue to initialize
*
* (Re)Initialize a queue transmit ring by clearing its memory.
*/
void
iavf_init_tx_ring(struct iavf_vsi *vsi, struct iavf_tx_queue *que)
{
struct tx_ring *txr = &que->txr;
/* Clear the old ring contents */
bzero((void *)txr->tx_base,
(sizeof(struct iavf_tx_desc)) *
(vsi->shared->isc_ntxd[0] + (vsi->enable_head_writeback ? 1 : 0)));
wr32(vsi->hw, txr->tail, 0);
}
/**
* iavf_get_tx_head - Get the index of the head of a ring
* @que: queue to read
*
* Retrieve the value from the location the HW records its HEAD index
*
* @returns the index of the HW head of the Tx queue
*/
static inline u32
iavf_get_tx_head(struct iavf_tx_queue *que)
{
if_softc_ctx_t scctx = que->vsi->shared;
struct tx_ring *txr = &que->txr;
void *head = &txr->tx_base[scctx->isc_ntxd[0]];
return LE32_TO_CPU(*(volatile __le32 *)head);
}
/**
* iavf_isc_txd_credits_update_hwb - Update Tx ring credits
* @arg: void pointer to the VSI
* @qid: the queue id to update
* @clear: whether to update or only report current status
*
* Checks the number of packets in the queue that could be cleaned up.
*
* if clear is true, the iflib stack has cleaned the packets and is
* notifying the driver to update its processed ring pointer.
*
* @returns the number of packets in the ring that can be cleaned.
*
* @remark this function is intended for the head write back mode.
*/
static int
iavf_isc_txd_credits_update_hwb(void *arg, uint16_t qid, bool clear)
{
struct iavf_vsi *vsi = arg;
if_softc_ctx_t scctx = vsi->shared;
struct iavf_tx_queue *que = &vsi->tx_queues[qid];
struct tx_ring *txr = &que->txr;
int head, credits;
/* Get the Head WB value */
head = iavf_get_tx_head(que);
credits = head - txr->tx_cidx_processed;
if (credits < 0)
credits += scctx->isc_ntxd[0];
if (clear)
txr->tx_cidx_processed = head;
return (credits);
}
/**
* iavf_isc_txd_credits_update_dwb - Update Tx ring credits
* @arg: void pointer to the VSI
* @txqid: the queue id to update
* @clear: whether to update or only report current status
*
* Checks the number of packets in the queue that could be cleaned up.
*
* if clear is true, the iflib stack has cleaned the packets and is
* notifying the driver to update its processed ring pointer.
*
* @returns the number of packets in the ring that can be cleaned.
*
* @remark this function is intended for the descriptor write back mode.
*/
static int
iavf_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear)
{
struct iavf_vsi *vsi = arg;
struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid];
if_softc_ctx_t scctx = vsi->shared;
struct tx_ring *txr = &tx_que->txr;
qidx_t processed = 0;
qidx_t cur, prev, ntxd, rs_cidx;
int32_t delta;
bool is_done;
rs_cidx = txr->tx_rs_cidx;
if (rs_cidx == txr->tx_rs_pidx)
return (0);
cur = txr->tx_rsq[rs_cidx];
MPASS(cur != QIDX_INVALID);
is_done = iavf_is_tx_desc_done(txr, cur);
if (!is_done)
return (0);
/* If clear is false just let caller know that there
* are descriptors to reclaim */
if (!clear)
return (1);
prev = txr->tx_cidx_processed;
ntxd = scctx->isc_ntxd[0];
do {
MPASS(prev != cur);
delta = (int32_t)cur - (int32_t)prev;
if (delta < 0)
delta += ntxd;
MPASS(delta > 0);
processed += delta;
prev = cur;
rs_cidx = (rs_cidx + 1) & (ntxd-1);
if (rs_cidx == txr->tx_rs_pidx)
break;
cur = txr->tx_rsq[rs_cidx];
MPASS(cur != QIDX_INVALID);
is_done = iavf_is_tx_desc_done(txr, cur);
} while (is_done);
txr->tx_rs_cidx = rs_cidx;
txr->tx_cidx_processed = prev;
return (processed);
}
/**
* iavf_isc_rxd_refill - Prepare descriptors for re-use
* @arg: void pointer to the VSI
* @iru: the Rx descriptor update structure
*
* Update Rx descriptors for a given queue so that they can be re-used by
* hardware for future packets.
*/
static void
iavf_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
struct iavf_vsi *vsi = arg;
if_softc_ctx_t scctx = vsi->shared;
struct rx_ring *rxr = &((vsi->rx_queues[iru->iru_qsidx]).rxr);
uint64_t *paddrs;
uint16_t next_pidx, pidx;
uint16_t count;
int i;
paddrs = iru->iru_paddrs;
pidx = iru->iru_pidx;
count = iru->iru_count;
for (i = 0, next_pidx = pidx; i < count; i++) {
rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
if (++next_pidx == scctx->isc_nrxd[0])
next_pidx = 0;
}
}
/**
* iavf_isc_rxd_flush - Notify hardware of new Rx descriptors
* @arg: void pointer to the VSI
* @rxqid: Rx queue to update
* @flid: unused parameter
* @pidx: ring index to update to
*
* Updates the tail pointer of the Rx ring, notifying hardware of new
* descriptors available for receiving packets.
*/
static void
iavf_isc_rxd_flush(void * arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
{
struct iavf_vsi *vsi = arg;
struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
wr32(vsi->hw, rxr->tail, pidx);
}
/**
* iavf_isc_rxd_available - Calculate number of available Rx descriptors
* @arg: void pointer to the VSI
* @rxqid: Rx queue to check
* @idx: starting index to check from
* @budget: maximum Rx budget
*
* Determines how many packets are ready to be processed in the Rx queue, up
* to the specified budget.
*
* @returns the number of packets ready to be processed, up to the budget.
*/
static int
iavf_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
{
struct iavf_vsi *vsi = arg;
struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
union iavf_rx_desc *rxd;
u64 qword;
uint32_t status;
int cnt, i, nrxd;
nrxd = vsi->shared->isc_nrxd[0];
for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) {
rxd = &rxr->rx_base[i];
qword = le64toh(rxd->wb.qword1.status_error_len);
status = (qword & IAVF_RXD_QW1_STATUS_MASK)
>> IAVF_RXD_QW1_STATUS_SHIFT;
if ((status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) == 0)
break;
if (++i == nrxd)
i = 0;
if (status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))
cnt++;
}
return (cnt);
}
/**
* iavf_isc_rxd_pkt_get - Decapsulate packet from Rx descriptors
* @arg: void pointer to the VSI
* @ri: packet info structure
*
* Read packet data from the Rx ring descriptors and fill in the packet info
* structure so that the iflib stack can process the packet.
*
* @remark this routine executes in ithread context.
*
* @returns zero success, or EBADMSG if the packet is corrupted.
*/
static int
iavf_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
struct iavf_vsi *vsi = arg;
struct iavf_rx_queue *que = &vsi->rx_queues[ri->iri_qsidx];
struct rx_ring *rxr = &que->rxr;
union iavf_rx_desc *cur;
u32 status, error;
u16 plen, vtag;
u64 qword;
u8 ptype;
bool eop;
int i, cidx;
cidx = ri->iri_cidx;
i = 0;
do {
/* 5 descriptor receive limit */
MPASS(i < IAVF_MAX_RX_SEGS);
cur = &rxr->rx_base[cidx];
qword = le64toh(cur->wb.qword1.status_error_len);
status = (qword & IAVF_RXD_QW1_STATUS_MASK)
>> IAVF_RXD_QW1_STATUS_SHIFT;
error = (qword & IAVF_RXD_QW1_ERROR_MASK)
>> IAVF_RXD_QW1_ERROR_SHIFT;
plen = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK)
>> IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK)
>> IAVF_RXD_QW1_PTYPE_SHIFT;
/* we should never be called without a valid descriptor */
MPASS((status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) != 0);
ri->iri_len += plen;
rxr->rx_bytes += plen;
cur->wb.qword1.status_error_len = 0;
eop = (status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT));
if (status & (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT))
vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
else
vtag = 0;
/*
** Make sure bad packets are discarded,
** note that only EOP descriptor has valid
** error results.
*/
if (eop && (error & (1 << IAVF_RX_DESC_ERROR_RXE_SHIFT))) {
rxr->desc_errs++;
return (EBADMSG);
}
ri->iri_frags[i].irf_flid = 0;
ri->iri_frags[i].irf_idx = cidx;
ri->iri_frags[i].irf_len = plen;
if (++cidx == vsi->shared->isc_nrxd[0])
cidx = 0;
i++;
} while (!eop);
/* capture data for dynamic ITR adjustment */
rxr->packets++;
rxr->rx_packets++;
if ((if_getcapenable(vsi->ifp) & IFCAP_RXCSUM) != 0)
iavf_rx_checksum(ri, status, error, ptype);
ri->iri_flowid = le32toh(cur->wb.qword0.hi_dword.rss);
ri->iri_rsstype = iavf_ptype_to_hash(ptype);
ri->iri_vtag = vtag;
ri->iri_nfrags = i;
if (vtag)
ri->iri_flags |= M_VLANTAG;
return (0);
}
/**
* iavf_rx_checksum - Handle Rx hardware checksum indication
* @ri: Rx packet info structure
* @status: status from Rx descriptor
* @error: error from Rx descriptor
* @ptype: packet type
*
* Verify that the hardware indicated that the checksum is valid.
* Inform the stack about the status of checksum so that stack
* doesn't spend time verifying the checksum.
*/
static void
iavf_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype)
{
struct iavf_rx_ptype_decoded decoded;
ri->iri_csum_flags = 0;
/* No L3 or L4 checksum was calculated */
if (!(status & (1 << IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
decoded = decode_rx_desc_ptype(ptype);
/* IPv6 with extension headers likely have bad csum */
if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6) {
if (status &
(1 << IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
ri->iri_csum_flags = 0;
return;
}
}
ri->iri_csum_flags |= CSUM_L3_CALC;
/* IPv4 checksum error */
if (error & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT))
return;
ri->iri_csum_flags |= CSUM_L3_VALID;
ri->iri_csum_flags |= CSUM_L4_CALC;
/* L4 checksum error */
if (error & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT))
return;
ri->iri_csum_flags |= CSUM_L4_VALID;
ri->iri_csum_data |= htons(0xffff);
}

1037
sys/dev/iavf/iavf_type.h Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_vc_common.h
* @brief header for the virtchnl interface
*
* Contains function declarations for the virtchnl PF to VF communication
* interface.
*/
#ifndef _IAVF_VC_COMMON_H_
#define _IAVF_VC_COMMON_H_
#include "iavf_iflib.h"
int iavf_send_pf_msg(struct iavf_sc *sc,
enum virtchnl_ops op, u8 *msg, u16 len);
int iavf_verify_api_ver(struct iavf_sc *);
int iavf_send_api_ver(struct iavf_sc *sc);
int iavf_enable_queues(struct iavf_sc *sc);
int iavf_disable_queues(struct iavf_sc *sc);
int iavf_add_vlans(struct iavf_sc *sc);
int iavf_send_vf_config_msg(struct iavf_sc *sc);
int iavf_get_vf_config(struct iavf_sc *sc);
int iavf_del_vlans(struct iavf_sc *sc);
int iavf_add_ether_filters(struct iavf_sc *sc);
int iavf_del_ether_filters(struct iavf_sc *sc);
int iavf_request_reset(struct iavf_sc *sc);
int iavf_request_stats(struct iavf_sc *sc);
void iavf_update_stats_counters(struct iavf_sc *sc, struct iavf_eth_stats *es);
int iavf_config_rss_key(struct iavf_sc *sc);
int iavf_set_rss_hena(struct iavf_sc *sc);
int iavf_config_rss_lut(struct iavf_sc *sc);
int iavf_config_promisc_mode(struct iavf_sc *sc);
void *iavf_vc_get_op_chan(struct iavf_sc *sc, uint32_t request);
int iavf_vc_send_cmd(struct iavf_sc *sc, uint32_t request);
const char * iavf_vc_stat_str(struct iavf_hw *hw,
enum virtchnl_status_code stat_err);
const char *
iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed);
const char * iavf_vc_opcode_str(uint16_t op);
void
iavf_vc_completion(struct iavf_sc *sc,
enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
enum iavf_ext_link_speed iavf_adv_speed_to_ext_speed(u32 adv_link_speed);
u32 iavf_ext_speed_to_ifmedia(enum iavf_ext_link_speed link_speed);
enum iavf_ext_link_speed iavf_vc_speed_to_ext_speed(enum virtchnl_link_speed link_speed);
const char * iavf_ext_speed_to_str(enum iavf_ext_link_speed link_speed);
int iavf_configure_queues(struct iavf_sc *sc);
int iavf_map_queues(struct iavf_sc *sc);
#endif /* _IAVF_VC_COMMON_H_ */

View File

@ -0,0 +1,178 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file iavf_vc_iflib.c
* @brief iflib-specific Virtchnl interface functions
*
* Contains functions implementing the virtchnl interface for communicating
* with the PF driver. This file contains definitions specific to the iflib
* driver implementation.
*/
#include "iavf_iflib.h"
#include "iavf_vc_common.h"
/**
* iavf_configure_queues - Configure queues
* @sc: device softc
*
* Request that the PF set up our queues.
*
* @returns zero on success, or an error code on failure.
*/
int
iavf_configure_queues(struct iavf_sc *sc)
{
device_t dev = sc->dev;
struct iavf_vsi *vsi = &sc->vsi;
if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
struct iavf_tx_queue *tx_que = vsi->tx_queues;
struct iavf_rx_queue *rx_que = vsi->rx_queues;
struct tx_ring *txr;
struct rx_ring *rxr;
int len, pairs;
struct virtchnl_vsi_queue_config_info *vqci;
struct virtchnl_queue_pair_info *vqpi;
/* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX
* queues of a pair need to be configured */
pairs = max(vsi->num_tx_queues, vsi->num_rx_queues);
len = sizeof(struct virtchnl_vsi_queue_config_info) +
(sizeof(struct virtchnl_queue_pair_info) * pairs);
vqci = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
if (!vqci) {
device_printf(dev, "%s: unable to allocate memory\n", __func__);
return (ENOMEM);
}
vqci->vsi_id = sc->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
/* Size check is not needed here - HW max is 16 queue pairs, and we
* can fit info for 31 of them into the AQ buffer before it overflows.
*/
// TODO: the above is wrong now; X722 VFs can have 256 queues
for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) {
txr = &tx_que->txr;
rxr = &rx_que->rxr;
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = scctx->isc_ntxd[0];
vqpi->txq.dma_ring_addr = txr->tx_paddr;
/* Enable Head writeback */
if (!vsi->enable_head_writeback) {
vqpi->txq.headwb_enabled = 0;
vqpi->txq.dma_headwb_addr = 0;
} else {
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = txr->tx_paddr +
sizeof(struct iavf_tx_desc) * scctx->isc_ntxd[0];
}
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = scctx->isc_nrxd[0];
vqpi->rxq.dma_ring_addr = rxr->rx_paddr;
vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size;
vqpi->rxq.databuffer_size = rxr->mbuf_sz;
vqpi->rxq.splithdr_enabled = 0;
}
iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
free(vqci, M_IAVF);
return (0);
}
/**
* iavf_map_queues - Map queues to interrupt vectors
* @sc: device softc
*
* Request that the PF map queues to interrupt vectors. Misc causes, including
* admin queue, are always mapped to vector 0.
*
* @returns zero on success, or an error code on failure.
*/
int
iavf_map_queues(struct iavf_sc *sc)
{
struct virtchnl_irq_map_info *vm;
int i, q, len;
struct iavf_vsi *vsi = &sc->vsi;
struct iavf_rx_queue *rx_que = vsi->rx_queues;
if_softc_ctx_t scctx = vsi->shared;
device_t dev = sc->dev;
// XXX: What happens if we only get 1 MSI-X vector?
MPASS(scctx->isc_vectors > 1);
/* How many queue vectors, adminq uses one */
// XXX: How do we know how many interrupt vectors we have?
q = scctx->isc_vectors - 1;
len = sizeof(struct virtchnl_irq_map_info) +
(scctx->isc_vectors * sizeof(struct virtchnl_vector_map));
vm = malloc(len, M_IAVF, M_NOWAIT);
if (!vm) {
device_printf(dev, "%s: unable to allocate memory\n", __func__);
return (ENOMEM);
}
vm->num_vectors = scctx->isc_vectors;
/* Queue vectors first */
for (i = 0; i < q; i++, rx_que++) {
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
vm->vecmap[i].vector_id = i + 1; /* first is adminq */
// TODO: Re-examine this
vm->vecmap[i].txq_map = (1 << rx_que->rxr.me);
vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me);
vm->vecmap[i].rxitr_idx = 0;
vm->vecmap[i].txitr_idx = 1;
}
/* Misc vector last - this is only for AdminQ messages */
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
vm->vecmap[i].vector_id = 0;
vm->vecmap[i].txq_map = 0;
vm->vecmap[i].rxq_map = 0;
vm->vecmap[i].rxitr_idx = 0;
vm->vecmap[i].txitr_idx = 0;
iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vm, len);
free(vm, M_IAVF);
return (0);
}

2138
sys/dev/iavf/if_iavf_iflib.c Normal file

File diff suppressed because it is too large Load Diff

991
sys/dev/iavf/virtchnl.h Normal file
View File

@ -0,0 +1,991 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
/* Description:
* This header file describes the VF-PF communication protocol used
* by the drivers for all devices starting from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
* flags, retval, datalen, and data addr are all used normally.
* The Firmware copies the cookie fields when sending messages between the
* PF and VF, but uses all other fields internally. Due to this limitation,
* we must send all messages as "indirect", i.e. using an external buffer.
*
* All the VSI indexes are relative to the VF. Each VF can have maximum of
* three VSIs. All the queue indexes are relative to the VSI. Each VF can
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value
* is of status_code type, defined in the shared type.h.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
* PF driver, then request a reset, then get resources, then configure
* queues and interrupts. After these operations are complete, the VF
* driver may start its queues, optionally add MAC and VLAN filters, and
* process traffic.
*/
/* START GENERIC DEFINES
* Need to ensure the following enums and defines hold the same meaning and
* value in current and future projects
*/
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
VIRTCHNL_STATUS_ERR_PARAM = -5,
VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
};
/* Backward compatibility */
#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */
/* deprecated with AVF 1.0 */
enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
* of the virtchnl_msg structure.
*/
enum virtchnl_ops {
/* The PF sends status change events to VFs using
* the VIRTCHNL_OP_EVENT opcode.
* VFs send requests to the PF using the other ops.
* Use of "advanced opcode" features must be negotiated as part of capabilities
* exchange and are not considered part of base mode feature set.
*/
VIRTCHNL_OP_UNKNOWN = 0,
VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
VIRTCHNL_OP_RESET_VF = 2,
VIRTCHNL_OP_GET_VF_RESOURCES = 3,
VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
VIRTCHNL_OP_ENABLE_QUEUES = 8,
VIRTCHNL_OP_DISABLE_QUEUES = 9,
VIRTCHNL_OP_ADD_ETH_ADDR = 10,
VIRTCHNL_OP_DEL_ETH_ADDR = 11,
VIRTCHNL_OP_ADD_VLAN = 12,
VIRTCHNL_OP_DEL_VLAN = 13,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
/* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
VIRTCHNL_OP_SET_RSS_HENA = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
VIRTCHNL_OP_ENABLE_CHANNELS = 30,
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
/* opcode 34 is reserved */
/* opcodes 39, 40, 41, 42 and 43 are reserved */
/* opcode 44, 45, 46, 47, 48 and 49 are reserved */
};
/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
* structure/union is not of the correct size, otherwise it creates an enum
* that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
*/
struct virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
enum virtchnl_status_code v_retval; /* ditto for desc->retval */
u32 vfid; /* used by PF when sending to VF */
};
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
* in the same format, along with a return code.
* Reply from PF has its major/minor versions also in param0 and param1.
* If there is a major version mismatch, then the VF cannot operate.
* If there is a minor version mismatch, then the VF can operate but should
* add a warning to the system log.
*
* This enum element MUST always be specified as == 1, regardless of other
* changes in the API. The PF must always respond to this message without
* error regardless of version mismatch.
*/
#define VIRTCHNL_VERSION_MAJOR 1
#define VIRTCHNL_VERSION_MINOR 1
#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
struct virtchnl_version_info {
u32 major;
u32 minor;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
/* VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters
* PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
* until reset completion is indicated. The admin queue must be reinitialized
* after this operation.
*
* When reset is complete, PF must ensure that all queues in all VSIs associated
* with the VF are stopped, all queue configurations in the HMC are set to 0,
* and all MAC and VLAN filters (except the default MAC address) on all VSIs
* are cleared.
*/
/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
* vsi_type should always be 6 for backward compatibility. Add other fields
* as needed.
*/
enum virtchnl_vsi_type {
VIRTCHNL_VSI_TYPE_INVALID = 0,
VIRTCHNL_VSI_SRIOV = 6,
};
/* VIRTCHNL_OP_GET_VF_RESOURCES
* Version 1.0 VF sends this request to PF with no parameters
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* virtchnl_vf_resource and one or more
* virtchnl_vsi_resource structures.
*/
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
enum virtchnl_vsi_type vsi_type;
u16 qset_handle;
u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
/* 0X40000000 is reserved */
/* 0X04000000, 0X08000000 and 0X10000000 are reserved */
/* 0X80000000 is reserved */
/* Define below the capability flags that are not offloads */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
u16 max_vectors;
u16 max_mtu;
u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
struct virtchnl_vsi_resource vsi_res[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
* External data buffer contains one instance of virtchnl_txq_info.
* PF configures requested queue and returns a status code.
*/
/* Tx queue config info */
struct virtchnl_txq_info {
u16 vsi_id;
u16 queue_id;
u16 ring_len; /* number of descriptors, multiple of 8 */
u16 headwb_enabled; /* deprecated with AVF 1.0 */
u64 dma_ring_addr;
u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
};
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
* PF configures requested queue and returns a status code. The
* crc_disable flag disables CRC stripping on the VF. Setting
* the crc_disable flag to 1 will disable CRC stripping for each
* queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
* offload must have been set prior to sending this info or the PF
* will ignore the request. This flag should be set the same for
* all of the queues for a VF.
*/
/* Rx queue config info */
struct virtchnl_rxq_info {
u16 vsi_id;
u16 queue_id;
u32 ring_len; /* number of descriptors, multiple of 32 */
u16 hdr_size;
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
u8 crc_disable;
u8 pad1[3];
u64 dma_ring_addr;
enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
u32 pad2;
};
VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
* VF sends this message to set parameters for active TX and RX queues
* associated with the specified VSI.
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
* NOTE: The VF is not required to configure all queues in a single request.
* It may send multiple messages. PF drivers must correctly handle all VF
* requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
struct virtchnl_txq_info txq;
struct virtchnl_rxq_info rxq;
};
VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
struct virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
u32 pad;
struct virtchnl_queue_pair_info qpair[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
* additional queues must be negotiated. This is a best effort request as it
* is possible the PF does not have enough queues left to support the request.
* If the PF cannot support the number requested it will respond with the
* maximum number it is able to support. If the request is successful, PF will
* then reset the VF to institute required changes.
*/
/* VF resource request */
struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
* The "other" causes are always mapped to vector 0. The VF may not request
* that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
* NOTE: due to hardware requirements, all active queues (both TX and RX)
* should be mapped to interrupts, even if the driver intends to operate
* only in polling mode. In this case the interrupt may be disabled, but
* the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
u16 vector_id;
u16 rxq_map;
u16 txq_map;
u16 rxitr_idx;
u16 txitr_idx;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors;
struct virtchnl_vector_map vecmap[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
* VF sends these message to enable or disable TX/RX queue pairs.
* The queues fields are bitmaps indicating which queues to act upon.
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
* NOTE: The VF is not required to enable/disable all queues in a single
* request. It may send multiple messages.
* PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
u16 pad;
u32 rx_queues;
u32 tx_queues;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
/* VIRTCHNL_OP_ADD_ETH_ADDR
* VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI.
* PF adds the filters and returns status.
*/
/* VIRTCHNL_OP_DEL_ETH_ADDR
* VF sends this message in order to remove one or more unicast or multicast
* filters for the specified VSI.
* PF removes the filters and returns status.
*/
struct virtchnl_ether_addr {
u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
u8 pad[2];
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
struct virtchnl_ether_addr list[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
/* VIRTCHNL_OP_DEL_VLAN
* VF sends this message to remove one or more VLAN tag filters for receives.
* PF removes the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
struct virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
u16 vlan_id[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
* Note: we assume that broadcast accept mode is always enabled.
*/
struct virtchnl_promisc_info {
u16 vsi_id;
u16 flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
#define FLAG_VF_UNICAST_PROMISC 0x00000001
#define FLAG_VF_MULTICAST_PROMISC 0x00000002
/* VIRTCHNL_OP_GET_STATS
* VF sends this message to request stats for the selected VSI. VF uses
* the virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF.
*
* PF replies with struct virtchnl_eth_stats in an external buffer.
*/
struct virtchnl_eth_stats {
u64 rx_bytes; /* received bytes */
u64 rx_unicast; /* received unicast pkts */
u64 rx_multicast; /* received multicast pkts */
u64 rx_broadcast; /* received broadcast pkts */
u64 rx_discards;
u64 rx_unknown_protocol;
u64 tx_bytes; /* transmitted bytes */
u64 tx_unicast; /* transmitted unicast pkts */
u64 tx_multicast; /* transmitted multicast pkts */
u64 tx_broadcast; /* transmitted broadcast pkts */
u64 tx_discards;
u64 tx_errors;
};
/* VIRTCHNL_OP_CONFIG_RSS_KEY
* VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF
* and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation. If this is the case, then the RSS fields in
* the VF resource struct are valid.
* Both the key and LUT are initialized to 0 by the PF, meaning that
* RSS is effectively disabled until set up by the VF.
*/
struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
u8 lut[1]; /* RSS lookup table */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
struct virtchnl_rss_hena {
u64 hena;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
/* This is used by PF driver to enforce how many channels can be supported.
* When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
* PF driver will allow only max 4 channels
*/
#define VIRTCHNL_MAX_ADQ_CHANNELS 4
#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
/* VIRTCHNL_OP_ENABLE_CHANNELS
* VIRTCHNL_OP_DISABLE_CHANNELS
* VF sends these messages to enable or disable channels based on
* the user specified queue count and queue offset for each traffic class.
* This struct encompasses all the information that the PF needs from
* VF to create a channel.
*/
struct virtchnl_channel_info {
u16 count; /* number of queues in a channel */
u16 offset; /* queues in a channel start from 'offset' */
u32 pad;
u64 max_tx_rate;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
struct virtchnl_tc_info {
u32 num_tc;
u32 pad;
struct virtchnl_channel_info list[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
/* VIRTCHNL_ADD_CLOUD_FILTER
* VIRTCHNL_DEL_CLOUD_FILTER
* VF sends these messages to add or delete a cloud filter based on the
* user specified match and action filters. These structures encompass
* all the information that the PF needs from the VF to add/delete a
* cloud filter.
*/
struct virtchnl_l4_spec {
u8 src_mac[ETH_ALEN];
u8 dst_mac[ETH_ALEN];
/* vlan_prio is part of this 16 bit field even from OS perspective
* vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
* in future, when decided to offload vlan_prio, pass that information
* as part of the "vlan_id" field, Bit14..12
*/
__be16 vlan_id;
__be16 pad; /* reserved for future use */
__be32 src_ip[4];
__be32 dst_ip[4];
__be16 src_port;
__be16 dst_port;
};
VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
union virtchnl_flow_spec {
struct virtchnl_l4_spec tcp_spec;
u8 buffer[128]; /* reserved for future use */
};
VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
enum virtchnl_action {
/* action types */
VIRTCHNL_ACTION_DROP = 0,
VIRTCHNL_ACTION_TC_REDIRECT,
VIRTCHNL_ACTION_PASSTHRU,
VIRTCHNL_ACTION_QUEUE,
VIRTCHNL_ACTION_Q_REGION,
VIRTCHNL_ACTION_MARK,
VIRTCHNL_ACTION_COUNT,
};
enum virtchnl_flow_type {
/* flow types */
VIRTCHNL_TCP_V4_FLOW = 0,
VIRTCHNL_TCP_V6_FLOW,
VIRTCHNL_UDP_V4_FLOW,
VIRTCHNL_UDP_V6_FLOW,
};
struct virtchnl_filter {
union virtchnl_flow_spec data;
union virtchnl_flow_spec mask;
enum virtchnl_flow_type flow_type;
enum virtchnl_action action;
u32 action_meta;
u8 field_flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
* messages in response to this one.
*/
enum virtchnl_event_codes {
VIRTCHNL_EVENT_UNKNOWN = 0,
VIRTCHNL_EVENT_LINK_CHANGE,
VIRTCHNL_EVENT_RESET_IMPENDING,
VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
};
#define PF_EVENT_SEVERITY_INFO 0
#define PF_EVENT_SEVERITY_ATTENTION 1
#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
* get the speed and link information. The ability to understand
* new speeds is indicated by setting the capability flag
* VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
* in virtchnl_vf_resource struct and can be used to determine
* which link event struct to use below.
*/
struct {
enum virtchnl_link_speed link_speed;
u8 link_status;
} link_event;
struct {
/* link_speed provided in Mbps */
u32 link_speed;
u8 link_status;
} link_event_adv;
} event_data;
int severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
* VF uses this message to request PF to map IWARP vectors to IWARP queues.
* The request for this originates from the VF IWARP driver through
* a client interface between VF LAN and VF IWARP driver.
* A vector could have an AEQ and CEQ attached to it although
* there is a single AEQ per VF IWARP instance in which case
* most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
* There will never be a case where there will be multiple CEQs attached
* to a single vector.
* PF configures interrupt mapping and returns status.
*/
struct virtchnl_iwarp_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
struct virtchnl_iwarp_qvlist_info {
u32 num_vectors;
struct virtchnl_iwarp_qv_info qv_info[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
/* Since VF messages are limited by u16 size, precalculate the maximum possible
* values of nested elements in virtchnl structures that virtual channel can
* possibly handle in a single message.
*/
enum virtchnl_vector_limits {
VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX =
((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
sizeof(struct virtchnl_queue_pair_info),
VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX =
((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
sizeof(struct virtchnl_vector_map),
VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX =
((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr),
VIRTCHNL_OP_ADD_DEL_VLAN_MAX =
((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16),
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX =
((u16)(~0) - sizeof(struct virtchnl_iwarp_qvlist_info)) /
sizeof(struct virtchnl_iwarp_qv_info),
VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
sizeof(struct virtchnl_channel_info),
};
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum virtchnl_vfr_states {
VIRTCHNL_VFR_INPROGRESS = 0,
VIRTCHNL_VFR_COMPLETED,
VIRTCHNL_VFR_VFACTIVE,
};
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
* @v_opcode: Opcode for the message
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* validate msg format against struct for each opcode
*/
static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct virtchnl_version_info);
break;
case VIRTCHNL_OP_RESET_VF:
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(ver))
valid_len = sizeof(u32);
break;
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct virtchnl_txq_info);
break;
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
err_msg_format = true;
break;
}
valid_len += (vqc->num_queue_pairs *
sizeof(struct
virtchnl_queue_pair_info));
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct virtchnl_irq_map_info);
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
if (vimi->num_vectors == 0 || vimi->num_vectors >
VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
err_msg_format = true;
break;
}
valid_len += (vimi->num_vectors *
sizeof(struct virtchnl_vector_map));
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
valid_len = sizeof(struct virtchnl_ether_addr_list);
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
if (veal->num_elements == 0 || veal->num_elements >
VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
err_msg_format = true;
break;
}
valid_len += veal->num_elements *
sizeof(struct virtchnl_ether_addr);
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
if (vfl->num_elements == 0 || vfl->num_elements >
VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
err_msg_format = true;
break;
}
valid_len += vfl->num_elements * sizeof(u16);
}
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct virtchnl_promisc_info);
break;
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_IWARP:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
*/
if (msglen)
valid_len = msglen;
else
err_msg_format = true;
break;
case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
break;
case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
if (msglen >= valid_len) {
struct virtchnl_iwarp_qvlist_info *qv =
(struct virtchnl_iwarp_qvlist_info *)msg;
if (qv->num_vectors == 0 || qv->num_vectors >
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX) {
err_msg_format = true;
break;
}
valid_len += ((qv->num_vectors - 1) *
sizeof(struct virtchnl_iwarp_qv_info));
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
valid_len = sizeof(struct virtchnl_rss_key);
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
if (vrk->key_len == 0) {
/* zero length is allowed as input */
break;
}
valid_len += vrk->key_len - 1;
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
valid_len = sizeof(struct virtchnl_rss_lut);
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
if (vrl->lut_entries == 0) {
/* zero entries is allowed as input */
break;
}
valid_len += vrl->lut_entries - 1;
}
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
case VIRTCHNL_OP_ENABLE_CHANNELS:
valid_len = sizeof(struct virtchnl_tc_info);
if (msglen >= valid_len) {
struct virtchnl_tc_info *vti =
(struct virtchnl_tc_info *)msg;
if (vti->num_tc == 0 || vti->num_tc >
VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
err_msg_format = true;
break;
}
valid_len += (vti->num_tc - 1) *
sizeof(struct virtchnl_channel_info);
}
break;
case VIRTCHNL_OP_DISABLE_CHANNELS:
break;
case VIRTCHNL_OP_ADD_CLOUD_FILTER:
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
return VIRTCHNL_STATUS_ERR_PARAM;
}
/* few more checks */
if (err_msg_format || valid_len != msglen)
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
}
#endif /* _VIRTCHNL_H_ */

View File

@ -1,17 +1,18 @@
#$FreeBSD$
.PATH: ${SRCTOP}/sys/dev/ixl
.PATH: ${SRCTOP}/sys/dev/iavf
KMOD = if_iavf
SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h opt_global.h
SRCS += if_iavf.c iavf_vc.c ixl_txrx.c i40e_osdep.c
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h
SRCS += if_iavf_iflib.c iavf_lib.c iavf_osdep.c iavf_txrx_iflib.c
SRCS += iavf_vc_common.c iavf_vc_iflib.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c
SRCS += iavf_adminq.c iavf_common.c
# Debug messages / sysctls
# CFLAGS += -DIXL_DEBUG
# CFLAGS += -DIAVF_DEBUG
# Enable asserts and other debugging facilities
# CFLAGS += -DINVARIANTS -DINVARIANTS_SUPPORT -DWITNESS