mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-17 10:26:15 +00:00
Update ena-com HAL to v1.1.4.3 and update driver accordingly
The newest ena-com HAL supports LLQv2 and introduces API changes. In order not to break the driver compilation it was updated/fixed in a following way: * Change version of the driver to 0.8.0 * Provide reset cause when triggering reset of the device * Reset device after attach fails * In the reset task free management irq after calling ena_down. Admin queue can still be used before ena_down is called, or when it is being handled * Do not reset device if ena_reset_task fails * Move call of the ena_com_dev_reset to the ena_down() routine - it should be called only if interface was up * Use different function for checking empty space on the sq ring (ena-com API change) * Fix typo on ENA_TX_CLEANUP_THRESHOLD * Change checking for EPERM with EOPNOTSUPP - change in the ena-com API * Minor style fixes Submitted by: Michal Krawczyk <mk@semihalf.com> Obtained from: Amazon.com, Inc. Semihalf Sponsored by: Amazon.com, Inc. Differential Revision: https://reviews.freebsd.org/D12143
This commit is contained in:
commit
a195fab02b
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=325236
@ -45,6 +45,13 @@
|
||||
#define ENA_ASYNC_QUEUE_DEPTH 16
|
||||
#define ENA_ADMIN_QUEUE_DEPTH 32
|
||||
|
||||
#ifdef ENA_EXTENDED_STATS
|
||||
|
||||
#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
|
||||
#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
|
||||
#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
|
||||
|
||||
#endif /* ENA_EXTENDED_STATS */
|
||||
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
|
||||
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
|
||||
| (ENA_COMMON_SPEC_VERSION_MINOR))
|
||||
@ -65,6 +72,10 @@
|
||||
|
||||
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
|
||||
|
||||
#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
|
||||
|
||||
#define ENA_REGS_ADMIN_INTR_MASK 1
|
||||
|
||||
/*****************************************************************************/
|
||||
/*****************************************************************************/
|
||||
/*****************************************************************************/
|
||||
@ -102,7 +113,7 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
|
||||
}
|
||||
|
||||
ena_addr->mem_addr_low = (u32)addr;
|
||||
ena_addr->mem_addr_high = (u64)addr >> 32;
|
||||
ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -238,12 +249,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
|
||||
tail_masked = admin_queue->sq.tail & queue_size_mask;
|
||||
|
||||
/* In case of queue FULL */
|
||||
cnt = admin_queue->sq.tail - admin_queue->sq.head;
|
||||
cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
|
||||
if (cnt >= admin_queue->q_depth) {
|
||||
ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
|
||||
admin_queue->sq.tail,
|
||||
admin_queue->sq.head,
|
||||
admin_queue->q_depth);
|
||||
ena_trc_dbg("admin queue is full.\n");
|
||||
admin_queue->stats.out_of_space++;
|
||||
return ERR_PTR(ENA_COM_NO_SPACE);
|
||||
}
|
||||
@ -278,6 +286,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
|
||||
if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
|
||||
admin_queue->sq.phase = !admin_queue->sq.phase;
|
||||
|
||||
ENA_DB_SYNC(&admin_queue->sq.mem_handle);
|
||||
ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
|
||||
admin_queue->sq.db_addr);
|
||||
|
||||
@ -362,22 +371,44 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
|
||||
io_sq->desc_addr.phys_addr,
|
||||
io_sq->desc_addr.mem_handle);
|
||||
}
|
||||
} else {
|
||||
ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
|
||||
size,
|
||||
io_sq->desc_addr.virt_addr,
|
||||
ctx->numa_node,
|
||||
dev_node);
|
||||
if (!io_sq->desc_addr.virt_addr) {
|
||||
io_sq->desc_addr.virt_addr =
|
||||
ENA_MEM_ALLOC(ena_dev->dmadev, size);
|
||||
}
|
||||
}
|
||||
|
||||
if (!io_sq->desc_addr.virt_addr) {
|
||||
ena_trc_err("memory allocation failed");
|
||||
return ENA_COM_NO_MEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
||||
/* Allocate bounce buffers */
|
||||
io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
|
||||
io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
|
||||
io_sq->bounce_buf_ctrl.next_to_use = 0;
|
||||
|
||||
size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
|
||||
|
||||
ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
|
||||
size,
|
||||
io_sq->bounce_buf_ctrl.base_buffer,
|
||||
ctx->numa_node,
|
||||
dev_node);
|
||||
if (!io_sq->bounce_buf_ctrl.base_buffer)
|
||||
io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
|
||||
|
||||
if (!io_sq->bounce_buf_ctrl.base_buffer) {
|
||||
ena_trc_err("bounce buffer memory allocation failed");
|
||||
return ENA_COM_NO_MEM;
|
||||
}
|
||||
|
||||
memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
|
||||
|
||||
/* Initiate the first bounce buffer */
|
||||
io_sq->llq_buf_ctrl.curr_bounce_buf =
|
||||
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
|
||||
memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
|
||||
0x0, io_sq->llq_info.desc_list_entry_size);
|
||||
io_sq->llq_buf_ctrl.descs_left_in_line =
|
||||
io_sq->llq_info.descs_num_before_header;
|
||||
}
|
||||
|
||||
io_sq->tail = 0;
|
||||
io_sq->next_to_comp = 0;
|
||||
@ -507,7 +538,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
|
||||
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
|
||||
return ENA_COM_NO_MEM;
|
||||
case ENA_ADMIN_UNSUPPORTED_OPCODE:
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
case ENA_ADMIN_BAD_OPCODE:
|
||||
case ENA_ADMIN_MALFORMED_REQUEST:
|
||||
case ENA_ADMIN_ILLEGAL_PARAMETER:
|
||||
@ -567,6 +598,75 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
|
||||
struct ena_admin_feature_llq_desc *llq_desc)
|
||||
{
|
||||
struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
|
||||
|
||||
memset(llq_info, 0, sizeof(*llq_info));
|
||||
|
||||
switch (llq_desc->header_location_ctrl) {
|
||||
case ENA_ADMIN_INLINE_HEADER:
|
||||
llq_info->inline_header = true;
|
||||
break;
|
||||
case ENA_ADMIN_HEADER_RING:
|
||||
llq_info->inline_header = false;
|
||||
break;
|
||||
default:
|
||||
ena_trc_err("Invalid header location control\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (llq_desc->entry_size_ctrl) {
|
||||
case ENA_ADMIN_LIST_ENTRY_SIZE_128B:
|
||||
llq_info->desc_list_entry_size = 128;
|
||||
break;
|
||||
case ENA_ADMIN_LIST_ENTRY_SIZE_192B:
|
||||
llq_info->desc_list_entry_size = 192;
|
||||
break;
|
||||
case ENA_ADMIN_LIST_ENTRY_SIZE_256B:
|
||||
llq_info->desc_list_entry_size = 256;
|
||||
break;
|
||||
default:
|
||||
ena_trc_err("Invalid entry_size_ctrl %d\n",
|
||||
llq_desc->entry_size_ctrl);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((llq_info->desc_list_entry_size & 0x7)) {
|
||||
/* The desc list entry size should be whole multiply of 8
|
||||
* This requirement comes from __iowrite64_copy()
|
||||
*/
|
||||
ena_trc_err("illegal entry size %d\n",
|
||||
llq_info->desc_list_entry_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (llq_info->inline_header) {
|
||||
llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl;
|
||||
if ((llq_info->desc_stride_ctrl != ENA_ADMIN_SINGLE_DESC_PER_ENTRY) &&
|
||||
(llq_info->desc_stride_ctrl != ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) {
|
||||
ena_trc_err("Invalid desc_stride_ctrl %d\n",
|
||||
llq_info->desc_stride_ctrl);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
|
||||
}
|
||||
|
||||
if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
|
||||
llq_info->descs_per_entry = llq_info->desc_list_entry_size /
|
||||
sizeof(struct ena_eth_io_tx_desc);
|
||||
else
|
||||
llq_info->descs_per_entry = 1;
|
||||
|
||||
llq_info->descs_num_before_header = llq_desc->desc_num_before_header_ctrl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
|
||||
struct ena_com_admin_queue *admin_queue)
|
||||
{
|
||||
@ -614,13 +714,14 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
|
||||
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
|
||||
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
|
||||
mmio_read->read_resp;
|
||||
u32 mmio_read_reg, timeout, ret;
|
||||
u32 mmio_read_reg, ret, i;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
u32 timeout = mmio_read->reg_read_to;
|
||||
|
||||
ENA_MIGHT_SLEEP();
|
||||
|
||||
timeout = mmio_read->reg_read_to ? : ENA_REG_READ_TIMEOUT;
|
||||
if (timeout == 0)
|
||||
timeout = ENA_REG_READ_TIMEOUT;
|
||||
|
||||
/* If readless is disabled, perform regular read */
|
||||
if (!mmio_read->readless_supported)
|
||||
@ -745,17 +846,20 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
|
||||
if (io_sq->desc_addr.virt_addr) {
|
||||
size = io_sq->desc_entry_size * io_sq->q_depth;
|
||||
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
||||
ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
|
||||
size,
|
||||
io_sq->desc_addr.virt_addr,
|
||||
io_sq->desc_addr.phys_addr,
|
||||
io_sq->desc_addr.mem_handle);
|
||||
else
|
||||
ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
|
||||
|
||||
io_sq->desc_addr.virt_addr = NULL;
|
||||
}
|
||||
|
||||
if (io_sq->bounce_buf_ctrl.base_buffer) {
|
||||
size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
|
||||
ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
|
||||
io_sq->bounce_buf_ctrl.base_buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
|
||||
@ -807,7 +911,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
|
||||
|
||||
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
|
||||
ena_trc_dbg("Feature %d isn't supported\n", feature_id);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
memset(&get_cmd, 0x0, sizeof(get_cmd));
|
||||
@ -1366,7 +1470,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
|
||||
ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
|
||||
get_resp.u.aenq.supported_groups,
|
||||
groups_flag);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
memset(&cmd, 0x0, sizeof(cmd));
|
||||
@ -1480,7 +1584,6 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
|
||||
|
||||
if (admin_queue->comp_ctx)
|
||||
ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
|
||||
|
||||
admin_queue->comp_ctx = NULL;
|
||||
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
|
||||
if (sq->entries)
|
||||
@ -1503,6 +1606,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
|
||||
|
||||
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
|
||||
{
|
||||
u32 mask_value = 0;
|
||||
|
||||
if (polling)
|
||||
mask_value = ENA_REGS_ADMIN_INTR_MASK;
|
||||
|
||||
ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
|
||||
ena_dev->admin_queue.polling = polling;
|
||||
}
|
||||
|
||||
@ -1790,11 +1899,20 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
|
||||
if (!rc)
|
||||
memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
|
||||
sizeof(get_resp.u.hw_hints));
|
||||
else if (rc == ENA_COM_PERMISSION)
|
||||
else if (rc == ENA_COM_UNSUPPORTED)
|
||||
memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
|
||||
else
|
||||
return rc;
|
||||
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
|
||||
if (!rc)
|
||||
memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
|
||||
sizeof(get_resp.u.llq));
|
||||
else if (rc == ENA_COM_UNSUPPORTED)
|
||||
memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
|
||||
else
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1827,6 +1945,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
|
||||
struct ena_admin_aenq_common_desc *aenq_common;
|
||||
struct ena_com_aenq *aenq = &dev->aenq;
|
||||
ena_aenq_handler handler_cb;
|
||||
unsigned long long timestamp;
|
||||
u16 masked_head, processed = 0;
|
||||
u8 phase;
|
||||
|
||||
@ -1838,11 +1957,12 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
|
||||
/* Go over all the events */
|
||||
while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
|
||||
phase) {
|
||||
ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%jus]\n",
|
||||
timestamp = (unsigned long long)aenq_common->timestamp_low |
|
||||
((unsigned long long)aenq_common->timestamp_high << 32);
|
||||
ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
|
||||
aenq_common->group,
|
||||
aenq_common->syndrom,
|
||||
(u64)aenq_common->timestamp_low +
|
||||
((u64)aenq_common->timestamp_high << 32));
|
||||
timestamp);
|
||||
|
||||
/* Handle specific event*/
|
||||
handler_cb = ena_com_get_specific_aenq_cb(dev,
|
||||
@ -1872,8 +1992,30 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
|
||||
mb();
|
||||
ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
|
||||
}
|
||||
#ifdef ENA_EXTENDED_STATS
|
||||
/*
|
||||
* Sets the function Idx and Queue Idx to be used for
|
||||
* get full statistics feature
|
||||
*
|
||||
*/
|
||||
int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
|
||||
u32 func_queue)
|
||||
{
|
||||
|
||||
int ena_com_dev_reset(struct ena_com_dev *ena_dev)
|
||||
/* Function & Queue is acquired from user in the following format :
|
||||
* Bottom Half word: funct
|
||||
* Top Half Word: queue
|
||||
*/
|
||||
ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
|
||||
ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* ENA_EXTENDED_STATS */
|
||||
|
||||
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
|
||||
enum ena_regs_reset_reason_types reset_reason)
|
||||
{
|
||||
u32 stat, timeout, cap, reset_val;
|
||||
int rc;
|
||||
@ -1901,6 +2043,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
|
||||
|
||||
/* start reset */
|
||||
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
|
||||
reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
|
||||
ENA_REGS_DEV_CTL_RESET_REASON_MASK;
|
||||
ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
|
||||
|
||||
/* Write again the MMIO read request address */
|
||||
@ -1973,6 +2117,51 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
|
||||
|
||||
return ret;
|
||||
}
|
||||
#ifdef ENA_EXTENDED_STATS
|
||||
|
||||
int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
|
||||
u32 len)
|
||||
{
|
||||
struct ena_com_stats_ctx ctx;
|
||||
struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
|
||||
ena_mem_handle_t mem_handle;
|
||||
void *virt_addr;
|
||||
dma_addr_t phys_addr;
|
||||
int ret;
|
||||
|
||||
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
|
||||
virt_addr, phys_addr, mem_handle);
|
||||
if (!virt_addr) {
|
||||
ret = ENA_COM_NO_MEM;
|
||||
goto done;
|
||||
}
|
||||
memset(&ctx, 0x0, sizeof(ctx));
|
||||
ret = ena_com_mem_addr_set(ena_dev,
|
||||
&get_cmd->u.control_buffer.address,
|
||||
phys_addr);
|
||||
if (unlikely(ret)) {
|
||||
ena_trc_err("memory address set failed\n");
|
||||
return ret;
|
||||
}
|
||||
get_cmd->u.control_buffer.length = len;
|
||||
|
||||
get_cmd->device_id = ena_dev->stats_func;
|
||||
get_cmd->queue_idx = ena_dev->stats_queue;
|
||||
|
||||
ret = ena_get_dev_stats(ena_dev, &ctx,
|
||||
ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
|
||||
if (ret < 0)
|
||||
goto free_ext_stats_mem;
|
||||
|
||||
ret = snprintf(buff, len, "%s", (char *)virt_addr);
|
||||
|
||||
free_ext_stats_mem:
|
||||
ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
|
||||
mem_handle);
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
|
||||
{
|
||||
@ -1983,7 +2172,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
|
||||
|
||||
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
|
||||
ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
memset(&cmd, 0x0, sizeof(cmd));
|
||||
@ -2037,7 +2226,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
|
||||
ENA_ADMIN_RSS_HASH_FUNCTION)) {
|
||||
ena_trc_dbg("Feature %d isn't supported\n",
|
||||
ENA_ADMIN_RSS_HASH_FUNCTION);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
/* Validate hash function is supported */
|
||||
@ -2049,7 +2238,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
|
||||
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
|
||||
ena_trc_err("Func hash %d isn't supported by device, abort\n",
|
||||
rss->hash_func);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
memset(&cmd, 0x0, sizeof(cmd));
|
||||
@ -2108,7 +2297,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
|
||||
|
||||
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
|
||||
ena_trc_err("Flow hash function %d isn't supported\n", func);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
switch (func) {
|
||||
@ -2201,7 +2390,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
|
||||
ENA_ADMIN_RSS_HASH_INPUT)) {
|
||||
ena_trc_dbg("Feature %d isn't supported\n",
|
||||
ENA_ADMIN_RSS_HASH_INPUT);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
memset(&cmd, 0x0, sizeof(cmd));
|
||||
@ -2282,7 +2471,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
|
||||
ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
|
||||
i, hash_ctrl->supported_fields[i].fields,
|
||||
hash_ctrl->selected_fields[i].fields);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2360,7 +2549,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
|
||||
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
|
||||
ena_trc_dbg("Feature %d isn't supported\n",
|
||||
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
|
||||
return ENA_COM_PERMISSION;
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
|
||||
@ -2636,7 +2825,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
|
||||
ENA_ADMIN_INTERRUPT_MODERATION);
|
||||
|
||||
if (rc) {
|
||||
if (rc == ENA_COM_PERMISSION) {
|
||||
if (rc == ENA_COM_UNSUPPORTED) {
|
||||
ena_trc_dbg("Feature %d isn't supported\n",
|
||||
ENA_ADMIN_INTERRUPT_MODERATION);
|
||||
rc = 0;
|
||||
@ -2759,3 +2948,33 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
|
||||
intr_moder_tbl[level].pkts_per_interval;
|
||||
entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
|
||||
}
|
||||
|
||||
int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
|
||||
struct ena_admin_feature_llq_desc *llq)
|
||||
{
|
||||
int rc;
|
||||
int size;
|
||||
|
||||
if (llq->max_llq_num == 0) {
|
||||
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = ena_com_config_llq_info(ena_dev, llq);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Validate the descriptor is not too big */
|
||||
size = ena_dev->tx_max_header_size;
|
||||
size += ena_dev->llq_info.descs_num_before_header *
|
||||
sizeof(struct ena_eth_io_tx_desc);
|
||||
|
||||
if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
|
||||
ena_trc_err("the size of the LLQ entry is smaller than needed\n");
|
||||
return ENA_COM_INVAL;
|
||||
}
|
||||
|
||||
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -133,6 +133,15 @@ struct ena_com_tx_meta {
|
||||
u16 l4_hdr_len; /* In words */
|
||||
};
|
||||
|
||||
struct ena_com_llq_info {
|
||||
bool inline_header;
|
||||
u16 desc_stride_ctrl;
|
||||
|
||||
u16 desc_list_entry_size;
|
||||
u16 descs_num_before_header;
|
||||
u16 descs_per_entry;
|
||||
};
|
||||
|
||||
struct ena_com_io_cq {
|
||||
struct ena_com_io_desc_addr cdesc_addr;
|
||||
void *bus;
|
||||
@ -171,6 +180,20 @@ struct ena_com_io_cq {
|
||||
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct ena_com_io_bounce_buffer_control {
|
||||
u8 *base_buffer;
|
||||
u16 next_to_use;
|
||||
u16 buffer_size;
|
||||
u16 buffers_num; /* Must be a power of 2 */
|
||||
};
|
||||
|
||||
/* This struct is to keep tracking the current location of the next llq entry */
|
||||
struct ena_com_llq_pkt_ctrl {
|
||||
u8 *curr_bounce_buf;
|
||||
u16 idx;
|
||||
u16 descs_left_in_line;
|
||||
};
|
||||
|
||||
struct ena_com_io_sq {
|
||||
struct ena_com_io_desc_addr desc_addr;
|
||||
void *bus;
|
||||
@ -183,6 +206,9 @@ struct ena_com_io_sq {
|
||||
|
||||
u32 msix_vector;
|
||||
struct ena_com_tx_meta cached_tx_meta;
|
||||
struct ena_com_llq_info llq_info;
|
||||
struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
|
||||
struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
|
||||
|
||||
u16 q_depth;
|
||||
u16 qid;
|
||||
@ -190,6 +216,7 @@ struct ena_com_io_sq {
|
||||
u16 idx;
|
||||
u16 tail;
|
||||
u16 next_to_comp;
|
||||
u16 llq_last_copy_tail;
|
||||
u32 tx_max_header_size;
|
||||
u8 phase;
|
||||
u8 desc_entry_size;
|
||||
@ -321,6 +348,7 @@ struct ena_com_dev {
|
||||
void __iomem *mem_bar;
|
||||
void *dmadev;
|
||||
void *bus;
|
||||
|
||||
enum ena_admin_placement_policy_type tx_mem_queue_type;
|
||||
u32 tx_max_header_size;
|
||||
u16 stats_func; /* Selected function for extended statistic dump */
|
||||
@ -337,6 +365,8 @@ struct ena_com_dev {
|
||||
u16 intr_delay_resolution;
|
||||
u32 intr_moder_tx_interval;
|
||||
struct ena_intr_moder_entry *intr_moder_tbl;
|
||||
|
||||
struct ena_com_llq_info llq_info;
|
||||
};
|
||||
|
||||
struct ena_com_dev_get_features_ctx {
|
||||
@ -345,6 +375,7 @@ struct ena_com_dev_get_features_ctx {
|
||||
struct ena_admin_feature_aenq_desc aenq;
|
||||
struct ena_admin_feature_offload_desc offload;
|
||||
struct ena_admin_ena_hw_hints hw_hints;
|
||||
struct ena_admin_feature_llq_desc llq;
|
||||
};
|
||||
|
||||
struct ena_com_create_io_ctx {
|
||||
@ -426,10 +457,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
|
||||
|
||||
/* ena_com_dev_reset - Perform device FLR to the device.
|
||||
* @ena_dev: ENA communication layer struct
|
||||
* @reset_reason: Specify what is the trigger for the reset in case of an error.
|
||||
*
|
||||
* @return - 0 on success, negative value on failure.
|
||||
*/
|
||||
int ena_com_dev_reset(struct ena_com_dev *ena_dev);
|
||||
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
|
||||
enum ena_regs_reset_reason_types reset_reason);
|
||||
|
||||
/* ena_com_create_io_queue - Create io queue.
|
||||
* @ena_dev: ENA communication layer struct
|
||||
@ -939,6 +972,15 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
|
||||
enum ena_intr_moder_level level,
|
||||
struct ena_intr_moder_entry *entry);
|
||||
|
||||
|
||||
/* ena_com_config_dev_mode - Configure the placement policy of the device.
|
||||
* @ena_dev: ENA communication layer struct
|
||||
* @llq: LLQ feature descriptor, retrieve via ena_com_get_dev_attr_feat.
|
||||
*
|
||||
*/
|
||||
int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
|
||||
struct ena_admin_feature_llq_desc *llq);
|
||||
|
||||
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
|
||||
{
|
||||
return ena_dev->adaptive_coalescing;
|
||||
@ -1048,6 +1090,30 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
|
||||
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
|
||||
}
|
||||
|
||||
static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
|
||||
{
|
||||
u16 size, buffers_num;
|
||||
u8 *buf;
|
||||
|
||||
size = bounce_buf_ctrl->buffer_size;
|
||||
buffers_num = bounce_buf_ctrl->buffers_num;
|
||||
|
||||
buf = bounce_buf_ctrl->base_buffer +
|
||||
(bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
|
||||
|
||||
prefetch(bounce_buf_ctrl->base_buffer +
|
||||
(bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
#ifdef ENA_EXTENDED_STATS
|
||||
int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
|
||||
u32 len);
|
||||
|
||||
int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
|
||||
u32 funct_queue);
|
||||
#endif
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
1484
sys/contrib/ena-com/ena_defs/ena_admin_defs.h
Normal file
1484
sys/contrib/ena-com/ena_defs/ena_admin_defs.h
Normal file
File diff suppressed because it is too large
Load Diff
49
sys/contrib/ena-com/ena_defs/ena_common_defs.h
Normal file
49
sys/contrib/ena-com/ena_defs/ena_common_defs.h
Normal file
@ -0,0 +1,49 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of copyright holder nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _ENA_COMMON_H_
|
||||
#define _ENA_COMMON_H_
|
||||
|
||||
#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
|
||||
#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
|
||||
|
||||
/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
|
||||
struct ena_common_mem_addr {
|
||||
uint32_t mem_addr_low;
|
||||
|
||||
uint16_t mem_addr_high;
|
||||
|
||||
/* MBZ */
|
||||
uint16_t reserved16;
|
||||
};
|
||||
|
||||
#endif /*_ENA_COMMON_H_ */
|
959
sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
Normal file
959
sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
Normal file
@ -0,0 +1,959 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of copyright holder nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _ENA_ETH_IO_H_
|
||||
#define _ENA_ETH_IO_H_
|
||||
|
||||
enum ena_eth_io_l3_proto_index {
|
||||
ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
|
||||
|
||||
ENA_ETH_IO_L3_PROTO_IPV4 = 8,
|
||||
|
||||
ENA_ETH_IO_L3_PROTO_IPV6 = 11,
|
||||
|
||||
ENA_ETH_IO_L3_PROTO_FCOE = 21,
|
||||
|
||||
ENA_ETH_IO_L3_PROTO_ROCE = 22,
|
||||
};
|
||||
|
||||
enum ena_eth_io_l4_proto_index {
|
||||
ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
|
||||
|
||||
ENA_ETH_IO_L4_PROTO_TCP = 12,
|
||||
|
||||
ENA_ETH_IO_L4_PROTO_UDP = 13,
|
||||
|
||||
ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
|
||||
};
|
||||
|
||||
struct ena_eth_io_tx_desc {
|
||||
/* 15:0 : length - Buffer length in bytes, must
|
||||
* include any packet trailers that the ENA supposed
|
||||
* to update like End-to-End CRC, Authentication GMAC
|
||||
* etc. This length must not include the
|
||||
* 'Push_Buffer' length. This length must not include
|
||||
* the 4-byte added in the end for 802.3 Ethernet FCS
|
||||
* 21:16 : req_id_hi - Request ID[15:10]
|
||||
* 22 : reserved22 - MBZ
|
||||
* 23 : meta_desc - MBZ
|
||||
* 24 : phase
|
||||
* 25 : reserved1 - MBZ
|
||||
* 26 : first - Indicates first descriptor in
|
||||
* transaction
|
||||
* 27 : last - Indicates last descriptor in
|
||||
* transaction
|
||||
* 28 : comp_req - Indicates whether completion
|
||||
* should be posted, after packet is transmitted.
|
||||
* Valid only for first descriptor
|
||||
* 30:29 : reserved29 - MBZ
|
||||
* 31 : reserved31 - MBZ
|
||||
*/
|
||||
uint32_t len_ctrl;
|
||||
|
||||
/* 3:0 : l3_proto_idx - L3 protocol. This field
|
||||
* required when l3_csum_en,l3_csum or tso_en are set.
|
||||
* 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
|
||||
* DF flags of the IPv4 header is 0. Otherwise must
|
||||
* be set to 1
|
||||
* 6:5 : reserved5
|
||||
* 7 : tso_en - Enable TSO, For TCP only.
|
||||
* 12:8 : l4_proto_idx - L4 protocol. This field need
|
||||
* to be set when l4_csum_en or tso_en are set.
|
||||
* 13 : l3_csum_en - enable IPv4 header checksum.
|
||||
* 14 : l4_csum_en - enable TCP/UDP checksum.
|
||||
* 15 : ethernet_fcs_dis - when set, the controller
|
||||
* will not append the 802.3 Ethernet Frame Check
|
||||
* Sequence to the packet
|
||||
* 16 : reserved16
|
||||
* 17 : l4_csum_partial - L4 partial checksum. when
|
||||
* set to 0, the ENA calculates the L4 checksum,
|
||||
* where the Destination Address required for the
|
||||
* TCP/UDP pseudo-header is taken from the actual
|
||||
* packet L3 header. when set to 1, the ENA doesn't
|
||||
* calculate the sum of the pseudo-header, instead,
|
||||
* the checksum field of the L4 is used instead. When
|
||||
* TSO enabled, the checksum of the pseudo-header
|
||||
* must not include the tcp length field. L4 partial
|
||||
* checksum should be used for IPv6 packet that
|
||||
* contains Routing Headers.
|
||||
* 20:18 : reserved18 - MBZ
|
||||
* 21 : reserved21 - MBZ
|
||||
* 31:22 : req_id_lo - Request ID[9:0]
|
||||
*/
|
||||
uint32_t meta_ctrl;
|
||||
|
||||
uint32_t buff_addr_lo;
|
||||
|
||||
/* address high and header size
|
||||
* 15:0 : addr_hi - Buffer Pointer[47:32]
|
||||
* 23:16 : reserved16_w2
|
||||
* 31:24 : header_length - Header length. For Low
|
||||
* Latency Queues, this fields indicates the number
|
||||
* of bytes written to the headers' memory. For
|
||||
* normal queues, if packet is TCP or UDP, and longer
|
||||
* than max_header_size, then this field should be
|
||||
* set to the sum of L4 header offset and L4 header
|
||||
* size(without options), otherwise, this field
|
||||
* should be set to 0. For both modes, this field
|
||||
* must not exceed the max_header_size.
|
||||
* max_header_size value is reported by the Max
|
||||
* Queues Feature descriptor
|
||||
*/
|
||||
uint32_t buff_addr_hi_hdr_sz;
|
||||
};
|
||||
|
||||
struct ena_eth_io_tx_meta_desc {
|
||||
/* 9:0 : req_id_lo - Request ID[9:0]
|
||||
* 11:10 : reserved10 - MBZ
|
||||
* 12 : reserved12 - MBZ
|
||||
* 13 : reserved13 - MBZ
|
||||
* 14 : ext_valid - if set, offset fields in Word2
|
||||
* are valid Also MSS High in Word 0 and bits [31:24]
|
||||
* in Word 3
|
||||
* 15 : reserved15
|
||||
* 19:16 : mss_hi
|
||||
* 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
|
||||
* Extended Metadata Descriptor
|
||||
* 21 : meta_store - Store extended metadata in queue
|
||||
* cache
|
||||
* 22 : reserved22 - MBZ
|
||||
* 23 : meta_desc - MBO
|
||||
* 24 : phase
|
||||
* 25 : reserved25 - MBZ
|
||||
* 26 : first - Indicates first descriptor in
|
||||
* transaction
|
||||
* 27 : last - Indicates last descriptor in
|
||||
* transaction
|
||||
* 28 : comp_req - Indicates whether completion
|
||||
* should be posted, after packet is transmitted.
|
||||
* Valid only for first descriptor
|
||||
* 30:29 : reserved29 - MBZ
|
||||
* 31 : reserved31 - MBZ
|
||||
*/
|
||||
uint32_t len_ctrl;
|
||||
|
||||
/* 5:0 : req_id_hi
|
||||
* 31:6 : reserved6 - MBZ
|
||||
*/
|
||||
uint32_t word1;
|
||||
|
||||
/* 7:0 : l3_hdr_len
|
||||
* 15:8 : l3_hdr_off
|
||||
* 21:16 : l4_hdr_len_in_words - counts the L4 header
|
||||
* length in words. there is an explicit assumption
|
||||
* that L4 header appears right after L3 header and
|
||||
* L4 offset is based on l3_hdr_off+l3_hdr_len
|
||||
* 31:22 : mss_lo
|
||||
*/
|
||||
uint32_t word2;
|
||||
|
||||
uint32_t reserved;
|
||||
};
|
||||
|
||||
struct ena_eth_io_tx_cdesc {
|
||||
/* Request ID[15:0] */
|
||||
uint16_t req_id;
|
||||
|
||||
uint8_t status;
|
||||
|
||||
/* flags
|
||||
* 0 : phase
|
||||
* 7:1 : reserved1
|
||||
*/
|
||||
uint8_t flags;
|
||||
|
||||
uint16_t sub_qid;
|
||||
|
||||
uint16_t sq_head_idx;
|
||||
};
|
||||
|
||||
struct ena_eth_io_rx_desc {
|
||||
/* In bytes. 0 means 64KB */
|
||||
uint16_t length;
|
||||
|
||||
/* MBZ */
|
||||
uint8_t reserved2;
|
||||
|
||||
/* 0 : phase
|
||||
* 1 : reserved1 - MBZ
|
||||
* 2 : first - Indicates first descriptor in
|
||||
* transaction
|
||||
* 3 : last - Indicates last descriptor in transaction
|
||||
* 4 : comp_req
|
||||
* 5 : reserved5 - MBO
|
||||
* 7:6 : reserved6 - MBZ
|
||||
*/
|
||||
uint8_t ctrl;
|
||||
|
||||
uint16_t req_id;
|
||||
|
||||
/* MBZ */
|
||||
uint16_t reserved6;
|
||||
|
||||
uint32_t buff_addr_lo;
|
||||
|
||||
uint16_t buff_addr_hi;
|
||||
|
||||
/* MBZ */
|
||||
uint16_t reserved16_w3;
|
||||
};
|
||||
|
||||
/* 4-word format Note: all ethernet parsing information are valid only when
|
||||
* last=1
|
||||
*/
|
||||
struct ena_eth_io_rx_cdesc_base {
|
||||
/* 4:0 : l3_proto_idx
|
||||
* 6:5 : src_vlan_cnt
|
||||
* 7 : reserved7 - MBZ
|
||||
* 12:8 : l4_proto_idx
|
||||
* 13 : l3_csum_err - when set, either the L3
|
||||
* checksum error detected, or, the controller didn't
|
||||
* validate the checksum. This bit is valid only when
|
||||
* l3_proto_idx indicates IPv4 packet
|
||||
* 14 : l4_csum_err - when set, either the L4
|
||||
* checksum error detected, or, the controller didn't
|
||||
* validate the checksum. This bit is valid only when
|
||||
* l4_proto_idx indicates TCP/UDP packet, and,
|
||||
* ipv4_frag is not set
|
||||
* 15 : ipv4_frag - Indicates IPv4 fragmented packet
|
||||
* 23:16 : reserved16
|
||||
* 24 : phase
|
||||
* 25 : l3_csum2 - second checksum engine result
|
||||
* 26 : first - Indicates first descriptor in
|
||||
* transaction
|
||||
* 27 : last - Indicates last descriptor in
|
||||
* transaction
|
||||
* 29:28 : reserved28
|
||||
* 30 : buffer - 0: Metadata descriptor. 1: Buffer
|
||||
* Descriptor was used
|
||||
* 31 : reserved31
|
||||
*/
|
||||
uint32_t status;
|
||||
|
||||
uint16_t length;
|
||||
|
||||
uint16_t req_id;
|
||||
|
||||
/* 32-bit hash result */
|
||||
uint32_t hash;
|
||||
|
||||
uint16_t sub_qid;
|
||||
|
||||
uint16_t reserved;
|
||||
};
|
||||
|
||||
/* 8-word format */
|
||||
struct ena_eth_io_rx_cdesc_ext {
|
||||
struct ena_eth_io_rx_cdesc_base base;
|
||||
|
||||
uint32_t buff_addr_lo;
|
||||
|
||||
uint16_t buff_addr_hi;
|
||||
|
||||
uint16_t reserved16;
|
||||
|
||||
uint32_t reserved_w6;
|
||||
|
||||
uint32_t reserved_w7;
|
||||
};
|
||||
|
||||
struct ena_eth_io_intr_reg {
|
||||
/* 14:0 : rx_intr_delay
|
||||
* 29:15 : tx_intr_delay
|
||||
* 30 : intr_unmask
|
||||
* 31 : reserved
|
||||
*/
|
||||
uint32_t intr_control;
|
||||
};
|
||||
|
||||
struct ena_eth_io_numa_node_cfg_reg {
|
||||
/* 7:0 : numa
|
||||
* 30:8 : reserved
|
||||
* 31 : enabled
|
||||
*/
|
||||
uint32_t numa_cfg;
|
||||
};
|
||||
|
||||
/* tx_desc */
|
||||
#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
|
||||
#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
|
||||
#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
|
||||
#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
|
||||
#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
|
||||
#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
|
||||
#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
|
||||
#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
|
||||
#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
|
||||
#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
|
||||
#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
|
||||
#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
|
||||
#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
|
||||
#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
|
||||
#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
|
||||
#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
|
||||
#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
|
||||
#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
|
||||
#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
|
||||
#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
|
||||
#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
|
||||
#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
|
||||
#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
|
||||
#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
|
||||
#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
|
||||
#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
|
||||
#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
|
||||
#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
|
||||
#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
|
||||
#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
|
||||
#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
|
||||
#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
|
||||
#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
|
||||
|
||||
/* tx_meta_desc */
|
||||
#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
|
||||
#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
|
||||
#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
|
||||
#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
|
||||
#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
|
||||
#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
|
||||
#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
|
||||
#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
|
||||
#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
|
||||
#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
|
||||
#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
|
||||
#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
|
||||
#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
|
||||
#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
|
||||
#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
|
||||
#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
|
||||
#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
|
||||
#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
|
||||
#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
|
||||
#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
|
||||
#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
|
||||
#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
|
||||
#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
|
||||
#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
|
||||
#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
|
||||
#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
|
||||
#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
|
||||
|
||||
/* tx_cdesc */
|
||||
#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
|
||||
|
||||
/* rx_desc */
|
||||
#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
|
||||
#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
|
||||
#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
|
||||
#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
|
||||
#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
|
||||
#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
|
||||
#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
|
||||
|
||||
/* rx_cdesc_base */
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
|
||||
#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
|
||||
|
||||
/* intr_reg */
|
||||
#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
|
||||
#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
|
||||
#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
|
||||
#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
|
||||
#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
|
||||
|
||||
/* numa_node_cfg_reg */
|
||||
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
|
||||
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
|
||||
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
|
||||
|
||||
#if !defined(ENA_DEFS_LINUX_MAINLINE)
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p)
|
||||
{
|
||||
return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val)
|
||||
{
|
||||
p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p)
|
||||
{
|
||||
return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
|
||||
{
|
||||
p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
|
||||
}
|
||||
|
||||
static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p)
|
||||
{
|
||||
return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val)
|
||||
{
|
||||
p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
|
||||
}
|
||||
|
||||
static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p)
|
||||
{
|
||||
return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val)
|
||||
{
|
||||
p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
|
||||
}
|
||||
|
||||
static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p)
|
||||
{
|
||||
return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val)
|
||||
{
|
||||
p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK;
|
||||
}
|
||||
|
||||
static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p)
|
||||
{
|
||||
return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val)
|
||||
{
|
||||
p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK;
|
||||
}
|
||||
|
||||
static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p)
|
||||
{
|
||||
return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val)
|
||||
{
|
||||
p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p)
|
||||
{
|
||||
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
|
||||
{
|
||||
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p)
|
||||
{
|
||||
return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
|
||||
{
|
||||
p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p)
|
||||
{
|
||||
return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
|
||||
{
|
||||
p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p)
|
||||
{
|
||||
return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val)
|
||||
{
|
||||
p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p)
|
||||
{
|
||||
return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
|
||||
{
|
||||
p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p)
|
||||
{
|
||||
return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
|
||||
}
|
||||
|
||||
static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
|
||||
{
|
||||
p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
|
||||
}
|
||||
|
||||
#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
|
||||
#endif /*_ENA_ETH_IO_H_ */
|
34
sys/contrib/ena-com/ena_defs/ena_gen_info.h
Normal file
34
sys/contrib/ena-com/ena_defs/ena_gen_info.h
Normal file
@ -0,0 +1,34 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of copyright holder nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#define ENA_GEN_DATE "Sun Nov 20 11:22:05 IST 2016"
|
||||
#define ENA_GEN_COMMIT "44da4e8"
|
4
sys/contrib/ena-com/ena_defs/ena_includes.h
Normal file
4
sys/contrib/ena-com/ena_defs/ena_includes.h
Normal file
@ -0,0 +1,4 @@
|
||||
#include "ena_common_defs.h"
|
||||
#include "ena_regs_defs.h"
|
||||
#include "ena_admin_defs.h"
|
||||
#include "ena_eth_io_defs.h"
|
168
sys/contrib/ena-com/ena_defs/ena_regs_defs.h
Normal file
168
sys/contrib/ena-com/ena_defs/ena_regs_defs.h
Normal file
@ -0,0 +1,168 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of copyright holder nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _ENA_REGS_H_
|
||||
#define _ENA_REGS_H_
|
||||
|
||||
enum ena_regs_reset_reason_types {
|
||||
ENA_REGS_RESET_NORMAL = 0,
|
||||
|
||||
ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
|
||||
|
||||
ENA_REGS_RESET_ADMIN_TO = 2,
|
||||
|
||||
ENA_REGS_RESET_MISS_TX_CMPL = 3,
|
||||
|
||||
ENA_REGS_RESET_INV_RX_REQ_ID = 4,
|
||||
|
||||
ENA_REGS_RESET_INV_TX_REQ_ID = 5,
|
||||
|
||||
ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
|
||||
|
||||
ENA_REGS_RESET_INIT_ERR = 7,
|
||||
|
||||
ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
|
||||
|
||||
ENA_REGS_RESET_OS_TRIGGER = 9,
|
||||
|
||||
ENA_REGS_RESET_OS_NETDEV_WD = 10,
|
||||
|
||||
ENA_REGS_RESET_SHUTDOWN = 11,
|
||||
|
||||
ENA_REGS_RESET_USER_TRIGGER = 12,
|
||||
|
||||
ENA_REGS_RESET_GENERIC = 13,
|
||||
};
|
||||
|
||||
/* ena_registers offsets */
|
||||
#define ENA_REGS_VERSION_OFF 0x0
|
||||
#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
|
||||
#define ENA_REGS_CAPS_OFF 0x8
|
||||
#define ENA_REGS_CAPS_EXT_OFF 0xc
|
||||
#define ENA_REGS_AQ_BASE_LO_OFF 0x10
|
||||
#define ENA_REGS_AQ_BASE_HI_OFF 0x14
|
||||
#define ENA_REGS_AQ_CAPS_OFF 0x18
|
||||
#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
|
||||
#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
|
||||
#define ENA_REGS_ACQ_CAPS_OFF 0x28
|
||||
#define ENA_REGS_AQ_DB_OFF 0x2c
|
||||
#define ENA_REGS_ACQ_TAIL_OFF 0x30
|
||||
#define ENA_REGS_AENQ_CAPS_OFF 0x34
|
||||
#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
|
||||
#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
|
||||
#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
|
||||
#define ENA_REGS_AENQ_TAIL_OFF 0x44
|
||||
#define ENA_REGS_INTR_MASK_OFF 0x4c
|
||||
#define ENA_REGS_DEV_CTL_OFF 0x54
|
||||
#define ENA_REGS_DEV_STS_OFF 0x58
|
||||
#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
|
||||
#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
|
||||
#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
|
||||
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
|
||||
|
||||
/* version register */
|
||||
#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
|
||||
#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
|
||||
#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
|
||||
|
||||
/* controller_version register */
|
||||
#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
|
||||
#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
|
||||
#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
|
||||
#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
|
||||
#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
|
||||
#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
|
||||
#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
|
||||
|
||||
/* caps register */
|
||||
#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
|
||||
#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
|
||||
#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
|
||||
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
|
||||
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
|
||||
#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
|
||||
#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
|
||||
|
||||
/* aq_caps register */
|
||||
#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
|
||||
#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
|
||||
#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
|
||||
|
||||
/* acq_caps register */
|
||||
#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
|
||||
#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
|
||||
#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
|
||||
|
||||
/* aenq_caps register */
|
||||
#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
|
||||
#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
|
||||
#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
|
||||
|
||||
/* dev_ctl register */
|
||||
#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
|
||||
#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
|
||||
#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
|
||||
#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
|
||||
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
|
||||
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
|
||||
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
|
||||
#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
|
||||
#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
|
||||
|
||||
/* dev_sts register */
|
||||
#define ENA_REGS_DEV_STS_READY_MASK 0x1
|
||||
#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
|
||||
#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
|
||||
#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
|
||||
#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
|
||||
#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
|
||||
#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
|
||||
#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
|
||||
#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
|
||||
#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
|
||||
#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
|
||||
#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
|
||||
#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
|
||||
#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
|
||||
#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
|
||||
|
||||
/* mmio_reg_read register */
|
||||
#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
|
||||
#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
|
||||
#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
|
||||
|
||||
/* rss_ind_entry_update register */
|
||||
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
|
||||
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
|
||||
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
|
||||
|
||||
#endif /*_ENA_REGS_H_ */
|
@ -64,7 +64,7 @@ static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
|
||||
io_cq->phase ^= 1;
|
||||
}
|
||||
|
||||
static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
||||
static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
u16 tail_masked;
|
||||
u32 offset;
|
||||
@ -76,22 +76,27 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
||||
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
|
||||
}
|
||||
|
||||
static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
|
||||
static inline void ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
|
||||
u8 *bounce_buffer)
|
||||
{
|
||||
u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
|
||||
u32 offset = tail_masked * io_sq->desc_entry_size;
|
||||
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
||||
|
||||
/* In case this queue isn't a LLQ */
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
||||
return;
|
||||
u16 dst_tail_mask;
|
||||
u32 dst_offset;
|
||||
|
||||
memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
|
||||
io_sq->desc_addr.virt_addr + offset,
|
||||
io_sq->desc_entry_size);
|
||||
}
|
||||
dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
|
||||
dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
|
||||
|
||||
/* Make sure everything was written into the bounce buffer before
|
||||
* writing the bounce buffer to the device
|
||||
*/
|
||||
wmb();
|
||||
|
||||
/* The line is completed. Copy it to dev */
|
||||
ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
|
||||
bounce_buffer,
|
||||
llq_info->desc_list_entry_size);
|
||||
|
||||
static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
io_sq->tail++;
|
||||
|
||||
/* Switch phase bit in case of wrap around */
|
||||
@ -99,26 +104,124 @@ static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
|
||||
io_sq->phase ^= 1;
|
||||
}
|
||||
|
||||
static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
|
||||
u8 *head_src, u16 header_len)
|
||||
static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
|
||||
u8 *header_src,
|
||||
u16 header_len)
|
||||
{
|
||||
u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
|
||||
u8 __iomem *dev_head_addr =
|
||||
io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
|
||||
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
||||
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
||||
u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
|
||||
u16 header_offset;
|
||||
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
||||
return 0;
|
||||
|
||||
if (unlikely(!io_sq->header_addr)) {
|
||||
ena_trc_err("Push buffer header ptr is NULL\n");
|
||||
return ENA_COM_INVAL;
|
||||
header_offset =
|
||||
llq_info->descs_num_before_header * io_sq->desc_entry_size;
|
||||
|
||||
if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
|
||||
ena_trc_err("trying to write header larger than llq entry can accommodate\n");
|
||||
return ENA_COM_FAULT;
|
||||
}
|
||||
|
||||
memcpy_toio(dev_head_addr, head_src, header_len);
|
||||
if (unlikely(!bounce_buffer)) {
|
||||
ena_trc_err("bounce buffer is NULL\n");
|
||||
return ENA_COM_FAULT;
|
||||
}
|
||||
|
||||
memcpy(bounce_buffer + header_offset, header_src, header_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
||||
u8 *bounce_buffer;
|
||||
void *sq_desc;
|
||||
|
||||
bounce_buffer = pkt_ctrl->curr_bounce_buf;
|
||||
|
||||
if (unlikely(!bounce_buffer)) {
|
||||
ena_trc_err("bounce buffer is NULL\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
|
||||
pkt_ctrl->idx++;
|
||||
pkt_ctrl->descs_left_in_line--;
|
||||
|
||||
return sq_desc;
|
||||
}
|
||||
|
||||
static inline void ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
||||
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
||||
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
||||
return;
|
||||
|
||||
/* bounce buffer was used, so write it and get a new one */
|
||||
if (pkt_ctrl->idx) {
|
||||
ena_com_write_bounce_buffer_to_dev(io_sq,
|
||||
pkt_ctrl->curr_bounce_buf);
|
||||
pkt_ctrl->curr_bounce_buf =
|
||||
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
|
||||
memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
|
||||
0x0, llq_info->desc_list_entry_size);
|
||||
}
|
||||
|
||||
pkt_ctrl->idx = 0;
|
||||
pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
|
||||
}
|
||||
|
||||
static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
||||
return get_sq_desc_llq(io_sq);
|
||||
|
||||
return get_sq_desc_regular_queue(io_sq);
|
||||
}
|
||||
|
||||
static inline void ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
||||
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
||||
|
||||
if (!pkt_ctrl->descs_left_in_line) {
|
||||
ena_com_write_bounce_buffer_to_dev(io_sq,
|
||||
pkt_ctrl->curr_bounce_buf);
|
||||
|
||||
pkt_ctrl->curr_bounce_buf =
|
||||
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
|
||||
memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
|
||||
0x0, llq_info->desc_list_entry_size);
|
||||
|
||||
pkt_ctrl->idx = 0;
|
||||
if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
|
||||
pkt_ctrl->descs_left_in_line = 1;
|
||||
else
|
||||
pkt_ctrl->descs_left_in_line =
|
||||
llq_info->desc_list_entry_size / io_sq->desc_entry_size;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
||||
ena_com_sq_update_llq_tail(io_sq);
|
||||
return;
|
||||
}
|
||||
|
||||
io_sq->tail++;
|
||||
|
||||
/* Switch phase bit in case of wrap around */
|
||||
if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
|
||||
io_sq->phase ^= 1;
|
||||
}
|
||||
|
||||
static inline struct ena_eth_io_rx_cdesc_base *
|
||||
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
|
||||
{
|
||||
@ -228,7 +331,6 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i
|
||||
memcpy(&io_sq->cached_tx_meta, ena_meta,
|
||||
sizeof(struct ena_com_tx_meta));
|
||||
|
||||
ena_com_copy_curr_sq_desc_to_dev(io_sq);
|
||||
ena_com_sq_update_tail(io_sq);
|
||||
}
|
||||
|
||||
@ -271,10 +373,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
||||
{
|
||||
struct ena_eth_io_tx_desc *desc = NULL;
|
||||
struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
|
||||
void *push_header = ena_tx_ctx->push_header;
|
||||
void *buffer_to_push = ena_tx_ctx->push_header;
|
||||
u16 header_len = ena_tx_ctx->header_len;
|
||||
u16 num_bufs = ena_tx_ctx->num_bufs;
|
||||
int total_desc, i, rc;
|
||||
u16 start_tail = io_sq->tail;
|
||||
int i, rc;
|
||||
bool have_meta;
|
||||
u64 addr_hi;
|
||||
|
||||
@ -282,7 +385,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
||||
"wrong Q type");
|
||||
|
||||
/* num_bufs +1 for potential meta desc */
|
||||
if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
|
||||
if (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {
|
||||
ena_trc_err("Not enough space in the tx queue\n");
|
||||
return ENA_COM_NO_MEM;
|
||||
}
|
||||
@ -293,8 +396,10 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
||||
return ENA_COM_INVAL;
|
||||
}
|
||||
|
||||
/* start with pushing the header (if needed) */
|
||||
rc = ena_com_write_header(io_sq, push_header, header_len);
|
||||
if (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && !buffer_to_push))
|
||||
return ENA_COM_INVAL;
|
||||
|
||||
rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
@ -305,11 +410,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
||||
|
||||
/* If the caller doesn't want send packets */
|
||||
if (unlikely(!num_bufs && !header_len)) {
|
||||
*nb_hw_desc = have_meta ? 0 : 1;
|
||||
ena_com_close_bounce_buffer(io_sq);
|
||||
*nb_hw_desc = io_sq->tail - start_tail;
|
||||
return 0;
|
||||
}
|
||||
|
||||
desc = get_sq_desc(io_sq);
|
||||
if (unlikely(!desc))
|
||||
return ENA_COM_FAULT;
|
||||
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
|
||||
|
||||
/* Set first desc when we don't have meta descriptor */
|
||||
@ -361,10 +469,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
||||
for (i = 0; i < num_bufs; i++) {
|
||||
/* The first desc share the same desc as the header */
|
||||
if (likely(i != 0)) {
|
||||
ena_com_copy_curr_sq_desc_to_dev(io_sq);
|
||||
ena_com_sq_update_tail(io_sq);
|
||||
|
||||
desc = get_sq_desc(io_sq);
|
||||
if (unlikely(!desc))
|
||||
return ENA_COM_FAULT;
|
||||
|
||||
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
|
||||
|
||||
desc->len_ctrl |= (io_sq->phase <<
|
||||
@ -387,14 +497,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
||||
/* set the last desc indicator */
|
||||
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
|
||||
|
||||
ena_com_copy_curr_sq_desc_to_dev(io_sq);
|
||||
|
||||
ena_com_sq_update_tail(io_sq);
|
||||
|
||||
total_desc = ENA_MAX16(num_bufs, 1);
|
||||
total_desc += have_meta ? 1 : 0;
|
||||
ena_com_close_bounce_buffer(io_sq);
|
||||
|
||||
*nb_hw_desc = total_desc;
|
||||
*nb_hw_desc = io_sq->tail - start_tail;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -456,10 +563,13 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
|
||||
ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
|
||||
"wrong Q type");
|
||||
|
||||
if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
|
||||
if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
|
||||
return ENA_COM_NO_SPACE;
|
||||
|
||||
desc = get_sq_desc(io_sq);
|
||||
if (unlikely(!desc))
|
||||
return ENA_COM_FAULT;
|
||||
|
||||
memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
|
||||
|
||||
desc->length = ena_buf->len;
|
||||
@ -501,6 +611,11 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
|
||||
if (cdesc_phase != expected_phase)
|
||||
return ENA_COM_TRY_AGAIN;
|
||||
|
||||
if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
|
||||
ena_trc_err("Invalid req id %d\n", cdesc->req_id);
|
||||
return ENA_COM_INVAL;
|
||||
}
|
||||
|
||||
ena_com_cq_inc_head(io_cq);
|
||||
|
||||
*req_id = READ_ONCE(cdesc->req_id);
|
||||
|
@ -98,7 +98,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
|
||||
ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
|
||||
}
|
||||
|
||||
static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
|
||||
static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
u16 tail, next_to_comp, cnt;
|
||||
|
||||
@ -109,6 +109,25 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
|
||||
return io_sq->q_depth - 1 - cnt;
|
||||
}
|
||||
|
||||
/* Check if the submission queue has enough space to hold required_buffers */
|
||||
static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
|
||||
u16 required_buffers)
|
||||
{
|
||||
int temp;
|
||||
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
||||
return ena_com_free_desc(io_sq) >= required_buffers;
|
||||
|
||||
/* This calculation doesn't need to be 100% accurate. So to reduce
|
||||
* the calculation overhead just Subtract 2 lines from the free descs
|
||||
* (one for the header line and one to compensate the devision
|
||||
* down calculation.
|
||||
*/
|
||||
temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
|
||||
|
||||
return ena_com_free_desc(io_sq) > temp;
|
||||
}
|
||||
|
||||
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
u16 tail;
|
||||
|
@ -186,6 +186,7 @@ static inline long PTR_ERR(const void *ptr)
|
||||
#define ENA_COM_NO_MEM ENOMEM
|
||||
#define ENA_COM_NO_SPACE ENOSPC
|
||||
#define ENA_COM_TRY_AGAIN -1
|
||||
#define ENA_COM_UNSUPPORTED EOPNOTSUPP
|
||||
#define ENA_COM_NO_DEVICE ENODEV
|
||||
#define ENA_COM_PERMISSION EPERM
|
||||
#define ENA_COM_TIMER_EXPIRED ETIMEDOUT
|
||||
@ -281,6 +282,17 @@ void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg,
|
||||
int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
|
||||
int mapflags);
|
||||
|
||||
#define ENA_MEMCPY_TO_DEVICE_64(dst, src, size) \
|
||||
do { \
|
||||
int count, i; \
|
||||
volatile uint64_t *to = (volatile uint64_t *)(dst); \
|
||||
const uint64_t *from = (const uint64_t *)(src); \
|
||||
count = (size) / 8; \
|
||||
\
|
||||
for (i = 0; i < count; i++, from++, to++) \
|
||||
*to = *from; \
|
||||
} while (0)
|
||||
|
||||
#define ENA_MEM_ALLOC(dmadev, size) malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO)
|
||||
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) (virt = NULL)
|
||||
#define ENA_MEM_FREE(dmadev, ptr) free(ptr, M_DEVBUF)
|
||||
@ -321,6 +333,9 @@ int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
|
||||
((struct ena_bus*)bus)->reg_bar_h, \
|
||||
(bus_size_t)(offset))
|
||||
|
||||
#define ENA_DB_SYNC(mem_handle) bus_dmamap_sync((mem_handle)->tag, \
|
||||
(mem_handle)->map, BUS_DMASYNC_PREREAD)
|
||||
|
||||
#define time_after(a,b) ((long)((unsigned long)(b) - (unsigned long)(a)) < 0)
|
||||
|
||||
#define VLAN_HLEN sizeof(struct ether_vlan_header)
|
||||
@ -359,9 +374,6 @@ void prefetch(void *x)
|
||||
__var; \
|
||||
})
|
||||
|
||||
#include "ena_common_defs.h"
|
||||
#include "ena_admin_defs.h"
|
||||
#include "ena_eth_io_defs.h"
|
||||
#include "ena_regs_defs.h"
|
||||
#include "ena_defs/ena_includes.h"
|
||||
|
||||
#endif /* ENA_PLAT_H_ */
|
||||
|
@ -1612,7 +1612,7 @@ ena_rx_cleanup(struct ena_ring *rx_ring)
|
||||
|
||||
rx_ring->next_to_clean = next_to_clean;
|
||||
|
||||
refill_required = ena_com_sq_empty_space(io_sq);
|
||||
refill_required = ena_com_free_desc(io_sq);
|
||||
refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DEVIDER;
|
||||
|
||||
if (refill_required > refill_threshold) {
|
||||
@ -2047,17 +2047,17 @@ static int ena_rss_configure(struct ena_adapter *adapter)
|
||||
|
||||
/* Set indirect table */
|
||||
rc = ena_com_indirect_table_set(ena_dev);
|
||||
if (unlikely(rc && rc != EPERM))
|
||||
if (unlikely(rc && rc != EOPNOTSUPP))
|
||||
return rc;
|
||||
|
||||
/* Configure hash function (if supported) */
|
||||
rc = ena_com_set_hash_function(ena_dev);
|
||||
if (unlikely(rc && (rc != EPERM)))
|
||||
if (unlikely(rc && (rc != EOPNOTSUPP)))
|
||||
return rc;
|
||||
|
||||
/* Configure hash inputs (if supported) */
|
||||
rc = ena_com_set_hash_ctrl(ena_dev);
|
||||
if (unlikely(rc && (rc != EPERM)))
|
||||
if (unlikely(rc && (rc != EOPNOTSUPP)))
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
@ -2506,6 +2506,7 @@ ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
|
||||
static void
|
||||
ena_down(struct ena_adapter *adapter)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (adapter->up) {
|
||||
device_printf(adapter->pdev, "device is going DOWN\n");
|
||||
@ -2522,6 +2523,14 @@ ena_down(struct ena_adapter *adapter)
|
||||
|
||||
ena_free_io_irq(adapter);
|
||||
|
||||
if (adapter->trigger_reset) {
|
||||
rc = ena_com_dev_reset(adapter->ena_dev,
|
||||
adapter->reset_reason);
|
||||
if (rc)
|
||||
device_printf(adapter->pdev,
|
||||
"Device reset failed\n");
|
||||
}
|
||||
|
||||
ena_destroy_all_io_queues(adapter);
|
||||
|
||||
ena_free_all_tx_bufs(adapter);
|
||||
@ -2789,7 +2798,8 @@ ena_start_xmit(struct ena_ring *tx_ring)
|
||||
" header csum flags %#jx",
|
||||
mbuf, mbuf->m_flags, mbuf->m_pkthdr.csum_flags);
|
||||
|
||||
if (ena_com_sq_empty_space(io_sq) < ENA_TX_CLEANUP_TRESHOLD)
|
||||
if (!ena_com_sq_have_enough_space(io_sq,
|
||||
ENA_TX_CLEANUP_THRESHOLD))
|
||||
ena_tx_cleanup(tx_ring);
|
||||
|
||||
if ((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0) {
|
||||
@ -2831,7 +2841,8 @@ ena_start_xmit(struct ena_ring *tx_ring)
|
||||
counter_u64_add(tx_ring->tx_stats.doorbells, 1);
|
||||
}
|
||||
|
||||
if (ena_com_sq_empty_space(io_sq) < ENA_TX_CLEANUP_TRESHOLD)
|
||||
if (!ena_com_sq_have_enough_space(io_sq,
|
||||
ENA_TX_CLEANUP_THRESHOLD))
|
||||
ena_tx_cleanup(tx_ring);
|
||||
}
|
||||
|
||||
@ -3000,7 +3011,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
|
||||
#endif
|
||||
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
|
||||
ENA_IO_RXQ_IDX(qid));
|
||||
if (unlikely(rc && (rc != EPERM))) {
|
||||
if (unlikely(rc && (rc != EOPNOTSUPP))) {
|
||||
device_printf(dev, "Cannot fill indirect table\n");
|
||||
goto err_fill_indir;
|
||||
}
|
||||
@ -3008,13 +3019,13 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
|
||||
|
||||
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
|
||||
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
|
||||
if (unlikely(rc && (rc != EPERM))) {
|
||||
if (unlikely(rc && (rc != EOPNOTSUPP))) {
|
||||
device_printf(dev, "Cannot fill hash function\n");
|
||||
goto err_fill_indir;
|
||||
}
|
||||
|
||||
rc = ena_com_set_default_hash_ctrl(ena_dev);
|
||||
if (unlikely(rc && (rc != EPERM))) {
|
||||
if (unlikely(rc && (rc != EOPNOTSUPP))) {
|
||||
device_printf(dev, "Cannot fill hash control\n");
|
||||
goto err_fill_indir;
|
||||
}
|
||||
@ -3087,7 +3098,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
|
||||
|
||||
rc = ena_com_set_host_attributes(ena_dev);
|
||||
if (rc) {
|
||||
if (rc == EPERM)
|
||||
if (rc == EOPNOTSUPP)
|
||||
ena_trace(ENA_WARNING, "Cannot set host attributes\n");
|
||||
else
|
||||
ena_trace(ENA_ALERT, "Cannot set host attributes\n");
|
||||
@ -3124,7 +3135,7 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
|
||||
readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
|
||||
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
|
||||
|
||||
rc = ena_com_dev_reset(ena_dev);
|
||||
rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
|
||||
if (rc) {
|
||||
device_printf(pdev, "Can not reset device\n");
|
||||
goto err_mmio_read_less;
|
||||
@ -3255,6 +3266,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
|
||||
device_printf(adapter->pdev,
|
||||
"Keep alive watchdog timeout.\n");
|
||||
counter_u64_add(adapter->dev_stats.wd_expired, 1);
|
||||
adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
|
||||
adapter->trigger_reset = true;
|
||||
}
|
||||
}
|
||||
@ -3266,6 +3278,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
|
||||
device_printf(adapter->pdev,
|
||||
"ENA admin queue is not in running state!\n");
|
||||
counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
|
||||
adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
|
||||
adapter->trigger_reset = true;
|
||||
}
|
||||
}
|
||||
@ -3331,6 +3344,8 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
|
||||
"is above the threshold (%d > %d). "
|
||||
"Reset the device\n", missed_tx,
|
||||
adapter->missing_tx_threshold);
|
||||
adapter->reset_reason =
|
||||
ENA_REGS_RESET_MISS_TX_CMPL;
|
||||
adapter->trigger_reset = true;
|
||||
return;
|
||||
}
|
||||
@ -3398,15 +3413,15 @@ ena_reset_task(void *arg, int pending)
|
||||
dev_up = adapter->up;
|
||||
|
||||
ena_com_set_admin_running_state(ena_dev, false);
|
||||
ena_free_mgmnt_irq(adapter);
|
||||
ena_down(adapter);
|
||||
ena_com_dev_reset(ena_dev);
|
||||
ena_free_mgmnt_irq(adapter);
|
||||
ena_disable_msix(adapter);
|
||||
ena_com_abort_admin_commands(ena_dev);
|
||||
ena_com_wait_for_abort_completion(ena_dev);
|
||||
ena_com_admin_destroy(ena_dev);
|
||||
ena_com_mmio_reg_read_request_destroy(ena_dev);
|
||||
|
||||
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
|
||||
adapter->trigger_reset = false;
|
||||
|
||||
/* Finished destroy part. Restart the device */
|
||||
@ -3443,7 +3458,6 @@ ena_reset_task(void *arg, int pending)
|
||||
return;
|
||||
|
||||
err_msix_free:
|
||||
ena_com_dev_reset(ena_dev);
|
||||
ena_free_mgmnt_irq(adapter);
|
||||
ena_disable_msix(adapter);
|
||||
err_com_free:
|
||||
@ -3588,6 +3602,8 @@ ena_attach(device_t pdev)
|
||||
goto err_com_free;
|
||||
}
|
||||
|
||||
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
|
||||
|
||||
adapter->tx_ring_size = queue_size;
|
||||
adapter->rx_ring_size = queue_size;
|
||||
|
||||
@ -3664,6 +3680,7 @@ ena_attach(device_t pdev)
|
||||
err_stats_tq:
|
||||
taskqueue_free(adapter->reset_tq);
|
||||
err_reset_tq:
|
||||
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
|
||||
ena_free_mgmnt_irq(adapter);
|
||||
ena_disable_msix(adapter);
|
||||
err_ifp_free:
|
||||
@ -3745,7 +3762,7 @@ ena_detach(device_t pdev)
|
||||
|
||||
/* Reset the device only if the device is running. */
|
||||
if (adapter->running)
|
||||
ena_com_dev_reset(ena_dev);
|
||||
ena_com_dev_reset(ena_dev, adapter->reset_reason);
|
||||
|
||||
ena_com_delete_host_info(ena_dev);
|
||||
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include "ena-com/ena_eth_com.h"
|
||||
|
||||
#define DRV_MODULE_VER_MAJOR 0
|
||||
#define DRV_MODULE_VER_MINOR 7
|
||||
#define DRV_MODULE_VER_MINOR 8
|
||||
#define DRV_MODULE_VER_SUBMINOR 0
|
||||
|
||||
#define DRV_MODULE_NAME "ena"
|
||||
@ -90,7 +90,7 @@
|
||||
#define ENA_RX_HASH_KEY_NUM 10
|
||||
#define ENA_RX_THASH_TABLE_SIZE (1 << 8)
|
||||
|
||||
#define ENA_TX_CLEANUP_TRESHOLD 128
|
||||
#define ENA_TX_CLEANUP_THRESHOLD 128
|
||||
|
||||
#define DB_THRESHOLD 64
|
||||
|
||||
@ -410,6 +410,8 @@ struct ena_adapter {
|
||||
/* Statistics */
|
||||
struct ena_stats_dev dev_stats;
|
||||
struct ena_hw_stats hw_stats;
|
||||
|
||||
enum ena_regs_reset_reason_types reset_reason;
|
||||
};
|
||||
|
||||
|
||||
|
@ -219,6 +219,7 @@ ena_sysctl_add_stats(struct ena_adapter *adapter)
|
||||
SYSCTL_ADD_PROC(ctx, hw_list, OID_AUTO, "update_stats",
|
||||
CTLTYPE_INT|CTLFLAG_RD, adapter, 0, ena_sysctl_update_stats,
|
||||
"A", "Update stats from hardware");
|
||||
|
||||
/* ENA Admin queue stats */
|
||||
admin_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "admin_stats",
|
||||
CTLFLAG_RD, NULL, "ENA Admin Queue statistics");
|
||||
|
Loading…
Reference in New Issue
Block a user