1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-10-18 02:19:39 +00:00

ena: Upgrade ena-com to freebsd v2.8.0

Merge commit '0fd934a2fd12fa74ae409d3de1313e449be5d97e'

Approved by: cperciva (mentor)
MFC after: 2 weeks
Sponsored by: Amazon, Inc.
This commit is contained in:
Osama Abboud 2024-10-15 17:02:26 +00:00
commit f5f8d7c9cd
9 changed files with 425 additions and 151 deletions

View File

@ -70,15 +70,19 @@
#define ENA_REGS_ADMIN_INTR_MASK 1
#define ENA_MAX_BACKOFF_DELAY_EXP 16U
#define ENA_MIN_ADMIN_POLL_US 100
#define ENA_MAX_ADMIN_POLL_US 5000
/* PHC definitions */
#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 20
#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
#define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
#define ENA_PHC_TIMESTAMP_ERROR 0xFFFFFFFFFFFFFFFF
#define ENA_PHC_MAX_ERROR_BOUND 0xFFFFFFFF
#define ENA_PHC_REQ_ID_OFFSET 0xDEAD
#define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP | \
ENA_ADMIN_PHC_ERROR_FLAG_ERROR_BOUND)
/*****************************************************************************/
/*****************************************************************************/
@ -111,7 +115,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
struct ena_common_mem_addr *ena_addr,
dma_addr_t addr)
{
if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
if (unlikely((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr)) {
ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n");
return ENA_COM_INVAL;
}
@ -131,7 +135,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
sq->mem_handle);
if (!sq->entries) {
if (unlikely(!sq->entries)) {
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -154,7 +158,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
cq->mem_handle);
if (!cq->entries) {
if (unlikely(!cq->entries)) {
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -179,7 +183,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
aenq->dma_addr,
aenq->mem_handle);
if (!aenq->entries) {
if (unlikely(!aenq->entries)) {
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -213,6 +217,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
static void comp_ctxt_release(struct ena_com_admin_queue *queue,
struct ena_comp_ctx *comp_ctx)
{
comp_ctx->user_cqe = NULL;
comp_ctx->occupied = false;
ATOMIC32_DEC(&queue->outstanding_cmds);
}
@ -264,7 +269,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
if (unlikely(cnt >= admin_queue->q_depth)) {
ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(ENA_COM_NO_SPACE);
@ -388,7 +393,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
io_sq->desc_addr.mem_handle);
}
if (!io_sq->desc_addr.virt_addr) {
if (unlikely(!io_sq->desc_addr.virt_addr)) {
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -413,7 +418,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
if (unlikely(!io_sq->bounce_buf_ctrl.base_buffer)) {
ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -478,7 +483,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
ENA_CDESC_RING_SIZE_ALIGNMENT);
}
if (!io_cq->cdesc_addr.virt_addr) {
if (unlikely(!io_cq->cdesc_addr.virt_addr)) {
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -506,6 +511,9 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a
return;
}
if (!comp_ctx->occupied)
return;
comp_ctx->status = ENA_CMD_COMPLETED;
comp_ctx->comp_status = cqe->acq_common_descriptor.status;
@ -581,8 +589,9 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
{
exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp);
delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp));
ENA_USLEEP(delay_us);
}
@ -604,7 +613,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
if (comp_ctx->status != ENA_CMD_SUBMITTED)
break;
if (ENA_TIME_EXPIRE(timeout)) {
if (unlikely(ENA_TIME_EXPIRE(timeout))) {
ena_trc_err(admin_queue->ena_dev,
"Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
@ -803,7 +812,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_default_cfg->llq_ring_entry_size_value;
rc = ena_com_set_llq(ena_dev);
if (rc)
if (unlikely(rc))
ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
return rc;
@ -830,6 +839,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
if (comp_ctx->status == ENA_CMD_COMPLETED) {
admin_queue->is_missing_admin_interrupt = true;
ena_trc_err(admin_queue->ena_dev,
"The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
@ -850,8 +860,19 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
ret = ENA_COM_TIMER_EXPIRED;
goto err;
}
} else if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
ret = ENA_COM_NO_DEVICE;
goto err;
}
ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
admin_queue->ena_dev, "Invalid comp status %d\n",
comp_ctx->status);
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
@ -909,7 +930,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
goto err;
}
if (read_resp->reg_off != offset) {
if (unlikely(read_resp->reg_off != offset)) {
ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
@ -1033,7 +1054,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
exp_state)
return 0;
if (ENA_TIME_EXPIRE(timeout_stamp))
if (unlikely(ENA_TIME_EXPIRE(timeout_stamp)))
return ENA_COM_TIMER_EXPIRED;
ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
@ -1494,7 +1515,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_sq **io_sq,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
if (unlikely(qid >= ENA_TOTAL_NUM_QUEUES)) {
ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
qid, ENA_TOTAL_NUM_QUEUES);
return ENA_COM_INVAL;
@ -1602,7 +1623,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
int ret;
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
if (ret) {
if (unlikely(ret)) {
ena_trc_info(ena_dev, "Can't get aenq configuration\n");
return ret;
}
@ -1649,7 +1670,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
if (unlikely((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS)) {
ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
return ENA_COM_INVAL;
}
@ -1811,16 +1832,21 @@ int ena_com_phc_config(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_cmd set_feat_cmd;
int ret = 0;
/* Get device PHC default configuration */
ret = ena_com_get_feature(ena_dev, &get_feat_resp, ENA_ADMIN_PHC_CONFIG, 0);
/* Get default device PHC configuration */
ret = ena_com_get_feature(ena_dev,
&get_feat_resp,
ENA_ADMIN_PHC_CONFIG,
ENA_ADMIN_PHC_FEATURE_VERSION_0);
if (unlikely(ret)) {
ena_trc_err(ena_dev, "Failed to get PHC feature configuration, error: %d\n", ret);
return ret;
}
/* Suporting only readless PHC retrieval */
if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
ena_trc_err(ena_dev, "Unsupprted PHC type, error: %d\n", ENA_COM_UNSUPPORTED);
/* Supporting only PHC V0 (readless mode with error bound) */
if (get_feat_resp.u.phc.version != ENA_ADMIN_PHC_FEATURE_VERSION_0) {
ena_trc_err(ena_dev, "Unsupprted PHC version (0x%X), error: %d\n",
get_feat_resp.u.phc.version,
ENA_COM_UNSUPPORTED);
return ENA_COM_UNSUPPORTED;
}
@ -1837,11 +1863,11 @@ int ena_com_phc_config(struct ena_com_dev *ena_dev)
get_feat_resp.u.phc.block_timeout_usec :
ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
/* Sanity check - expire timeout must not be above skip timeout */
/* Sanity check - expire timeout must not exceed block timeout */
if (phc->expire_timeout_usec > phc->block_timeout_usec)
phc->expire_timeout_usec = phc->block_timeout_usec;
/* Prepare PHC feature command with PHC output address */
/* Prepare PHC config feature command */
memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
@ -1873,13 +1899,16 @@ int ena_com_phc_config(struct ena_com_dev *ena_dev)
void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_phc_info *phc = &ena_dev->phc;
phc->active = false;
unsigned long flags = 0;
/* In case PHC is not supported by the device, silently exiting */
if (!phc->virt_addr)
return;
ENA_SPINLOCK_LOCK(phc->lock, flags);
phc->active = false;
ENA_SPINLOCK_UNLOCK(phc->lock, flags);
ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
sizeof(*phc->virt_addr),
phc->virt_addr,
@ -1890,15 +1919,14 @@ void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
ENA_SPINLOCK_DESTROY(phc->lock);
}
int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp)
int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
{
volatile struct ena_admin_phc_resp *read_resp = ena_dev->phc.virt_addr;
const ena_time_high_res_t zero_system_time = ENA_TIME_INIT_HIGH_RES();
struct ena_com_phc_info *phc = &ena_dev->phc;
ena_time_high_res_t initial_time = ENA_TIME_INIT_HIGH_RES();
static ena_time_high_res_t start_time;
unsigned long flags = 0;
ena_time_high_res_t expire_time;
ena_time_high_res_t block_time;
unsigned long flags = 0;
int ret = ENA_COM_OK;
if (!phc->active) {
@ -1909,9 +1937,10 @@ int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp)
ENA_SPINLOCK_LOCK(phc->lock, flags);
/* Check if PHC is in blocked state */
if (unlikely(ENA_TIME_COMPARE_HIGH_RES(start_time, initial_time))) {
if (unlikely(ENA_TIME_COMPARE_HIGH_RES(phc->system_time, zero_system_time))) {
/* Check if blocking time expired */
block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->block_timeout_usec);
block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time,
phc->block_timeout_usec);
if (!ENA_TIME_EXPIRE_HIGH_RES(block_time)) {
/* PHC is still in blocked state, skip PHC request */
phc->stats.phc_skp++;
@ -1919,9 +1948,9 @@ int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp)
goto skip;
}
/* PHC is in active state, update statistics according to req_id and timestamp */
/* PHC is in active state, update statistics according to req_id and error_flags */
if ((READ_ONCE16(read_resp->req_id) != phc->req_id) ||
(read_resp->timestamp == ENA_PHC_TIMESTAMP_ERROR)) {
(read_resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
/* Device didn't update req_id during blocking time or timestamp is invalid,
* this indicates on a device error
*/
@ -1933,9 +1962,9 @@ int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp)
}
/* Setting relative timeouts */
start_time = ENA_GET_SYSTEM_TIME_HIGH_RES();
block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->block_timeout_usec);
expire_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->expire_timeout_usec);
phc->system_time = ENA_GET_SYSTEM_TIME_HIGH_RES();
block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time, phc->block_timeout_usec);
expire_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time, phc->expire_timeout_usec);
/* We expect the device to return this req_id once the new PHC timestamp is updated */
phc->req_id++;
@ -1952,35 +1981,45 @@ int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp)
while (1) {
if (unlikely(ENA_TIME_EXPIRE_HIGH_RES(expire_time))) {
/* Gave up waiting for updated req_id, PHC enters into blocked state until
* passing blocking time
* passing blocking time, during this time any get PHC timestamp or
* error bound requests will fail with device busy error
*/
phc->error_bound = ENA_PHC_MAX_ERROR_BOUND;
ret = ENA_COM_DEVICE_BUSY;
break;
}
/* Check if req_id was updated by the device */
if (READ_ONCE16(read_resp->req_id) != phc->req_id) {
/* req_id was not updated by the device, check again on next loop */
/* req_id was not updated by the device yet, check again on next loop */
continue;
}
/* req_id was updated which indicates that PHC timestamp was updated too */
*timestamp = read_resp->timestamp;
/* PHC timestamp validty check */
if (unlikely(*timestamp == ENA_PHC_TIMESTAMP_ERROR)) {
/* Retrieved invalid PHC timestamp, PHC enters into blocked state until
* passing blocking time
/* req_id was updated by the device which indicates that PHC timestamp, error_bound
* and error_flags are updated too, checking errors before retrieving timestamp and
* error_bound values
*/
if (unlikely(read_resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
/* Retrieved timestamp or error bound errors, PHC enters into blocked state
* until passing blocking time, during this time any get PHC timestamp or
* error bound requests will fail with device busy error
*/
phc->error_bound = ENA_PHC_MAX_ERROR_BOUND;
ret = ENA_COM_DEVICE_BUSY;
break;
}
/* Retrieved valid PHC timestamp */
/* PHC timestamp value is returned to the caller */
*timestamp = read_resp->timestamp;
/* Error bound value is cached for future retrieval by caller */
phc->error_bound = read_resp->error_bound;
/* Update statistic on valid PHC timestamp retrieval */
phc->stats.phc_cnt++;
/* This indicates PHC state is active */
start_time = initial_time;
phc->system_time = zero_system_time;
break;
}
@ -1990,6 +2029,24 @@ int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp)
return ret;
}
int ena_com_phc_get_error_bound(struct ena_com_dev *ena_dev, u32 *error_bound)
{
struct ena_com_phc_info *phc = &ena_dev->phc;
u32 local_error_bound = phc->error_bound;
if (!phc->active) {
ena_trc_err(ena_dev, "PHC feature is not active in the device\n");
return ENA_COM_UNSUPPORTED;
}
if (local_error_bound == ENA_PHC_MAX_ERROR_BOUND)
return ENA_COM_DEVICE_BUSY;
*error_bound = local_error_bound;
return ENA_COM_OK;
}
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@ -2083,15 +2140,15 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
ENA_SPINLOCK_INIT(admin_queue->q_lock);
ret = ena_com_init_comp_ctxt(admin_queue);
if (ret)
if (unlikely(ret))
goto error;
ret = ena_com_admin_init_sq(admin_queue);
if (ret)
if (unlikely(ret))
goto error;
ret = ena_com_admin_init_cq(admin_queue);
if (ret)
if (unlikely(ret))
goto error;
admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
@ -2124,11 +2181,12 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
if (ret)
if (unlikely(ret))
goto error;
admin_queue->ena_dev = ena_dev;
admin_queue->running_state = true;
admin_queue->is_missing_admin_interrupt = false;
return 0;
error:
@ -2144,7 +2202,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq;
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
if (unlikely(ctx->qid >= ENA_TOTAL_NUM_QUEUES)) {
ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
ctx->qid, ENA_TOTAL_NUM_QUEUES);
return ENA_COM_INVAL;
@ -2175,18 +2233,18 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
if (ret)
if (unlikely(ret))
goto error;
ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
if (ret)
if (unlikely(ret))
goto error;
ret = ena_com_create_io_cq(ena_dev, io_cq);
if (ret)
if (unlikely(ret))
goto error;
ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
if (ret)
if (unlikely(ret))
goto destroy_io_cq;
return 0;
@ -2203,7 +2261,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_sq *io_sq;
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
if (unlikely(qid >= ENA_TOTAL_NUM_QUEUES)) {
ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
qid, ENA_TOTAL_NUM_QUEUES);
return;
@ -2345,7 +2403,8 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
rc = ena_com_get_feature(ena_dev, &get_resp,
ENA_ADMIN_LLQ, ENA_ADMIN_LLQ_FEATURE_VERSION_1);
if (!rc)
memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
sizeof(get_resp.u.llq));
@ -2400,8 +2459,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
/* Go over all the events */
while ((READ_ONCE8(aenq_common->flags) &
ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the phase bit (ownership) is as expected before
* reading the rest of the descriptor.
/* Make sure the device finished writing the rest of the descriptor
* before reading it.
*/
dma_rmb();
@ -2443,6 +2502,45 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
mmiowb();
}
bool ena_com_aenq_has_keep_alive(struct ena_com_dev *ena_dev)
{
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &ena_dev->aenq;
struct ena_admin_aenq_entry *aenq_e;
u8 phase = aenq->phase;
u16 masked_head;
masked_head = aenq->head & (aenq->q_depth - 1);
aenq_e = &aenq->entries[masked_head]; /* Get first entry */
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
while ((READ_ONCE8(aenq_common->flags) &
ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the device finished writing the rest of the descriptor
* before reading it.
*/
dma_rmb();
if (aenq_common->group == ENA_ADMIN_KEEP_ALIVE)
return true;
/* Get next event entry */
masked_head++;
if (unlikely(masked_head == aenq->q_depth)) {
masked_head = 0;
phase = !phase;
}
aenq_e = &aenq->entries[masked_head];
aenq_common = &aenq_e->aenq_common_desc;
}
return false;
}
#ifdef ENA_EXTENDED_STATS
/*
* Sets the function Idx and Queue Idx to be used for
@ -2468,6 +2566,7 @@ int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
enum ena_regs_reset_reason_types reset_reason)
{
u32 reset_reason_msb, reset_reason_lsb;
u32 stat, timeout, cap, reset_val;
int rc;
@ -2494,8 +2593,28 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
/* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
/* For backward compatibility, device will interpret
* bits 24-27 as MSB, bits 28-31 as LSB
*/
reset_reason_lsb = ENA_FIELD_GET(reset_reason, ENA_RESET_REASON_LSB_MASK,
ENA_RESET_REASON_LSB_OFFSET);
reset_reason_msb = ENA_FIELD_GET(reset_reason, ENA_RESET_REASON_MSB_MASK,
ENA_RESET_REASON_MSB_OFFSET);
reset_val |= reset_reason_lsb << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT;
if (ena_com_get_cap(ena_dev, ENA_ADMIN_EXTENDED_RESET_REASONS))
reset_val |= reset_reason_msb << ENA_REGS_DEV_CTL_RESET_REASON_EXT_SHIFT;
else if (reset_reason_msb) {
/* In case the device does not support intended
* extended reset reason fallback to generic
*/
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
reset_val |= (ENA_REGS_RESET_GENERIC << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
ENA_REGS_DEV_CTL_RESET_REASON_MASK;
}
ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */
@ -2503,7 +2622,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
if (unlikely(rc)) {
ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
return rc;
}
@ -2511,7 +2630,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
/* reset done */
ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
if (unlikely(rc)) {
ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
return rc;
}
@ -2614,7 +2733,7 @@ int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
ret = ena_get_dev_stats(ena_dev, &ctx,
ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
if (ret < 0)
if (unlikely(ret < 0))
goto free_ext_stats_mem;
ret = snprintf(buff, len, "%s", (char *)virt_addr);
@ -3222,7 +3341,7 @@ int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
customer_metrics->buffer_virt_addr,
customer_metrics->buffer_dma_addr,
customer_metrics->buffer_dma_handle);
if (!customer_metrics->buffer_virt_addr)
if (unlikely(!customer_metrics->buffer_virt_addr))
return ENA_COM_NO_MEM;
return 0;
@ -3416,7 +3535,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
}
rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
if (rc)
if (unlikely(rc))
return rc;
ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -

View File

@ -51,6 +51,14 @@
#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
/* Macros used to extract LSB/MSB from the
* enums defining the reset reasons
*/
#define ENA_RESET_REASON_LSB_OFFSET 0
#define ENA_RESET_REASON_LSB_MASK 0xf
#define ENA_RESET_REASON_MSB_OFFSET 4
#define ENA_RESET_REASON_MSB_MASK 0xf0
#define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512
/*****************************************************************************/
@ -257,6 +265,8 @@ struct ena_com_admin_queue {
*/
bool running_state;
bool is_missing_admin_interrupt;
/* Count the number of outstanding admin commands */
ena_atomic32_t outstanding_cmds;
@ -294,6 +304,9 @@ struct ena_com_phc_info {
/* PHC shared memory - virtual address */
struct ena_admin_phc_resp *virt_addr;
/* System time of last PHC request */
ena_time_high_res_t system_time;
/* Spin lock to ensure a single outstanding PHC read */
ena_spinlock_t lock;
@ -313,17 +326,20 @@ struct ena_com_phc_info {
*/
u32 block_timeout_usec;
/* PHC shared memory - physical address */
dma_addr_t phys_addr;
/* PHC shared memory handle */
ena_mem_handle_t mem_handle;
/* Cached error bound per timestamp sample */
u32 error_bound;
/* Request id sent to the device */
u16 req_id;
/* True if PHC is active in the device */
bool active;
/* PHC shared memory - memory handle */
ena_mem_handle_t mem_handle;
/* PHC shared memory - physical address */
dma_addr_t phys_addr;
};
struct ena_rss {
@ -488,12 +504,19 @@ int ena_com_phc_config(struct ena_com_dev *ena_dev);
*/
void ena_com_phc_destroy(struct ena_com_dev *ena_dev);
/* ena_com_phc_get - Retrieve PHC timestamp
/* ena_com_phc_get_timestamp - Retrieve PHC timestamp
* @ena_dev: ENA communication layer struct
* @timestamp: Retrieve PHC timestamp
* @timestamp: Retrieved PHC timestamp
* @return - 0 on success, negative value on failure
*/
int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp);
int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp);
/* ena_com_phc_get_error_bound - Retrieve cached PHC error bound
* @ena_dev: ENA communication layer struct
* @error_bound: Cached PHC error bound
* @return - 0 on success, negative value on failure
*/
int ena_com_phc_get_error_bound(struct ena_com_dev *ena_dev, u32 *error_bound);
/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
@ -644,6 +667,16 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
*/
void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
/* ena_com_aenq_has_keep_alive - Retrieve if there is a keep alive notification in the aenq
* @ena_dev: ENA communication layer struct
*
* This method goes over the async event notification queue and returns if there
* is a keep alive notification.
*
* @return - true if there is a keep alive notification in the aenq or false otherwise
*/
bool ena_com_aenq_has_keep_alive(struct ena_com_dev *ena_dev);
/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
* @ena_dev: ENA communication layer struct
*
@ -1096,6 +1129,16 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_config);
/* ena_com_get_missing_admin_interrupt - Return if there is a missing admin interrupt
* @ena_dev: ENA communication layer struct
*
* @return - true if there is a missing admin interrupt or false otherwise
*/
static inline bool ena_com_get_missing_admin_interrupt(struct ena_com_dev *ena_dev)
{
return ena_dev->admin_queue.is_missing_admin_interrupt;
}
/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
* @io_sq: IO submit queue struct
*

View File

@ -98,12 +98,22 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
/* feature version for the set/get ENA_ADMIN_LLQ feature admin commands */
enum ena_admin_llq_feature_version {
/* legacy base version in older drivers */
ENA_ADMIN_LLQ_FEATURE_VERSION_0_LEGACY = 0,
/* support entry_size recommendation by device */
ENA_ADMIN_LLQ_FEATURE_VERSION_1 = 1,
};
/* device capabilities */
enum ena_admin_aq_caps_id {
ENA_ADMIN_ENI_STATS = 0,
/* ENA SRD customer metrics */
ENA_ADMIN_ENA_SRD_INFO = 1,
ENA_ADMIN_CUSTOMER_METRICS = 2,
ENA_ADMIN_EXTENDED_RESET_REASONS = 3,
ENA_ADMIN_CDESC_MBZ = 4,
};
enum ena_admin_placement_policy_type {
@ -163,8 +173,14 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
enum ena_admin_get_phc_type {
ENA_ADMIN_PHC_TYPE_READLESS = 0,
enum ena_admin_phc_feature_version {
/* Readless with error_bound */
ENA_ADMIN_PHC_FEATURE_VERSION_0 = 0,
};
enum ena_admin_phc_error_flags {
ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP = BIT(0),
ENA_ADMIN_PHC_ERROR_FLAG_ERROR_BOUND = BIT(1),
};
/* ENA SRD configuration for ENI */
@ -464,6 +480,10 @@ struct ena_admin_basic_stats {
uint32_t tx_drops_low;
uint32_t tx_drops_high;
uint32_t rx_overruns_low;
uint32_t rx_overruns_high;
};
/* ENI Statistics Command. */
@ -696,8 +716,17 @@ struct ena_admin_feature_llq_desc {
/* the stride control the driver selected to use */
uint16_t descriptors_stride_ctrl_enabled;
/* reserved */
uint32_t reserved1;
/* feature version of device resp to either GET/SET commands. */
uint8_t feature_version;
/* llq entry size recommended by the device,
* values correlated to enum ena_admin_llq_ring_entry_size.
* used only for GET command.
*/
uint8_t entry_size_recommended;
/* max depth of wide llq, or 0 for N/A */
uint16_t max_wide_llq_depth;
/* accelerated low latency queues requirement. driver needs to
* support those requirements in order to use accelerated llq
@ -933,19 +962,8 @@ struct ena_admin_feature_rss_flow_hash_input {
uint16_t enabled_input_sort;
};
enum ena_admin_os_type {
ENA_ADMIN_OS_LINUX = 1,
ENA_ADMIN_OS_WIN = 2,
ENA_ADMIN_OS_DPDK = 3,
ENA_ADMIN_OS_FREEBSD = 4,
ENA_ADMIN_OS_IPXE = 5,
ENA_ADMIN_OS_ESXI = 6,
ENA_ADMIN_OS_MACOS = 7,
ENA_ADMIN_OS_GROUPS_NUM = 7,
};
struct ena_admin_host_info {
/* defined in enum ena_admin_os_type */
/* Host OS type defined as ENA_ADMIN_OS_* */
uint32_t os_type;
/* os distribution string format */
@ -992,7 +1010,9 @@ struct ena_admin_host_info {
* 4 : rss_configurable_function_key
* 5 : reserved
* 6 : rx_page_reuse
* 31:7 : reserved
* 7 : tx_ipv6_csum_offload
* 8 : phc
* 31:9 : reserved
*/
uint32_t driver_supported_features;
};
@ -1078,10 +1098,10 @@ struct ena_admin_queue_ext_feature_desc {
};
struct ena_admin_feature_phc_desc {
/* PHC type as defined in enum ena_admin_get_phc_type,
* used only for GET command.
/* PHC version as defined in enum ena_admin_phc_feature_version,
* used only for GET command as max supported PHC version by the device.
*/
uint8_t type;
uint8_t version;
/* Reserved - MBZ */
uint8_t reserved1[3];
@ -1221,7 +1241,9 @@ enum ena_admin_aenq_group {
ENA_ADMIN_NOTIFICATION = 3,
ENA_ADMIN_KEEP_ALIVE = 4,
ENA_ADMIN_REFRESH_CAPABILITIES = 5,
ENA_ADMIN_AENQ_GROUPS_NUM = 6,
ENA_ADMIN_CONF_NOTIFICATIONS = 6,
ENA_ADMIN_DEVICE_REQUEST_RESET = 7,
ENA_ADMIN_AENQ_GROUPS_NUM = 8,
};
enum ena_admin_aenq_notification_syndrome {
@ -1252,6 +1274,18 @@ struct ena_admin_aenq_keep_alive_desc {
uint32_t tx_drops_low;
uint32_t tx_drops_high;
uint32_t rx_overruns_low;
uint32_t rx_overruns_high;
};
struct ena_admin_aenq_conf_notifications_desc {
struct ena_admin_aenq_common_desc aenq_common_desc;
uint64_t notifications_bitmap;
uint64_t reserved;
};
struct ena_admin_ena_mmio_req_read_less_resp {
@ -1264,13 +1298,23 @@ struct ena_admin_ena_mmio_req_read_less_resp {
};
struct ena_admin_phc_resp {
/* Request Id, received from DB register */
uint16_t req_id;
uint8_t reserved1[6];
/* PHC timestamp (nsec) */
uint64_t timestamp;
uint8_t reserved2[48];
uint8_t reserved2[8];
/* Timestamp error limit (nsec) */
uint32_t error_bound;
/* Bit field of enum ena_admin_phc_error_flags */
uint32_t error_flags;
uint8_t reserved3[32];
};
/* aq_common_desc */
@ -1371,6 +1415,10 @@ struct ena_admin_phc_resp {
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6)
#define ENA_ADMIN_HOST_INFO_TX_IPV6_CSUM_OFFLOAD_SHIFT 7
#define ENA_ADMIN_HOST_INFO_TX_IPV6_CSUM_OFFLOAD_MASK BIT(7)
#define ENA_ADMIN_HOST_INFO_PHC_SHIFT 8
#define ENA_ADMIN_HOST_INFO_PHC_MASK BIT(8)
/* feature_rss_ind_table */
#define ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK BIT(0)
@ -1842,6 +1890,16 @@ static inline void set_ena_admin_host_info_rx_page_reuse(struct ena_admin_host_i
p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT) & ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
}
static inline uint32_t get_ena_admin_host_info_tx_ipv6_csum_offload(const struct ena_admin_host_info *p)
{
return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_TX_IPV6_CSUM_OFFLOAD_MASK) >> ENA_ADMIN_HOST_INFO_TX_IPV6_CSUM_OFFLOAD_SHIFT;
}
static inline void set_ena_admin_host_info_tx_ipv6_csum_offload(struct ena_admin_host_info *p, uint32_t val)
{
p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_TX_IPV6_CSUM_OFFLOAD_SHIFT) & ENA_ADMIN_HOST_INFO_TX_IPV6_CSUM_OFFLOAD_MASK;
}
static inline uint8_t get_ena_admin_feature_rss_ind_table_one_entry_update(const struct ena_admin_feature_rss_ind_table *p)
{
return p->flags & ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK;
@ -1852,6 +1910,16 @@ static inline void set_ena_admin_feature_rss_ind_table_one_entry_update(struct e
p->flags |= val & ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK;
}
static inline uint32_t get_ena_admin_host_info_phc(const struct ena_admin_host_info *p)
{
return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_PHC_MASK) >> ENA_ADMIN_HOST_INFO_PHC_SHIFT;
}
static inline void set_ena_admin_host_info_phc(struct ena_admin_host_info *p, uint32_t val)
{
p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_PHC_SHIFT) & ENA_ADMIN_HOST_INFO_PHC_MASK;
}
static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
{
return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;

View File

@ -181,7 +181,8 @@ struct ena_eth_io_tx_cdesc {
/* flags
* 0 : phase
* 7:1 : reserved1
* 5:1 : reserved1
* 7:6 : mbz6 - MBZ
*/
uint8_t flags;
@ -227,7 +228,7 @@ struct ena_eth_io_rx_desc {
struct ena_eth_io_rx_cdesc_base {
/* 4:0 : l3_proto_idx
* 6:5 : src_vlan_cnt
* 7 : reserved7 - MBZ
* 7 : mbz7 - MBZ
* 12:8 : l4_proto_idx
* 13 : l3_csum_err - when set, either the L3
* checksum error detected, or, the controller didn't
@ -243,7 +244,8 @@ struct ena_eth_io_rx_cdesc_base {
* 16 : l4_csum_checked - L4 checksum was verified
* (could be OK or error), when cleared the status of
* checksum is unknown
* 23:17 : reserved17 - MBZ
* 17 : mbz17 - MBZ
* 23:18 : reserved18
* 24 : phase
* 25 : l3_csum2 - second checksum engine result
* 26 : first - Indicates first descriptor in
@ -370,6 +372,8 @@ struct ena_eth_io_numa_node_cfg_reg {
/* tx_cdesc */
#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
#define ENA_ETH_IO_TX_CDESC_MBZ6_SHIFT 6
#define ENA_ETH_IO_TX_CDESC_MBZ6_MASK GENMASK(7, 6)
/* rx_desc */
#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
@ -384,6 +388,8 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
#define ENA_ETH_IO_RX_CDESC_BASE_MBZ7_SHIFT 7
#define ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK BIT(7)
#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
@ -394,6 +400,8 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16
#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16)
#define ENA_ETH_IO_RX_CDESC_BASE_MBZ17_SHIFT 17
#define ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK BIT(17)
#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
@ -760,6 +768,15 @@ static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p,
p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
}
static inline uint8_t get_ena_eth_io_tx_cdesc_mbz6(const struct ena_eth_io_tx_cdesc *p)
{
return (p->flags & ENA_ETH_IO_TX_CDESC_MBZ6_MASK) >> ENA_ETH_IO_TX_CDESC_MBZ6_SHIFT;
}
static inline void set_ena_eth_io_tx_cdesc_mbz6(struct ena_eth_io_tx_cdesc *p, uint8_t val)
{
p->flags |= (val << ENA_ETH_IO_TX_CDESC_MBZ6_SHIFT) & ENA_ETH_IO_TX_CDESC_MBZ6_MASK;
}
static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p)
{
return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
@ -820,6 +837,16 @@ static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_r
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
}
static inline uint32_t get_ena_eth_io_rx_cdesc_base_mbz7(const struct ena_eth_io_rx_cdesc_base *p)
{
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_MBZ7_SHIFT;
}
static inline void set_ena_eth_io_rx_cdesc_base_mbz7(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_MBZ7_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK;
}
static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
{
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
@ -870,6 +897,16 @@ static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_checked(struct ena_eth_i
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK;
}
static inline uint32_t get_ena_eth_io_rx_cdesc_base_mbz17(const struct ena_eth_io_rx_cdesc_base *p)
{
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_MBZ17_SHIFT;
}
static inline void set_ena_eth_io_rx_cdesc_base_mbz17(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_MBZ17_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK;
}
static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p)
{
return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;

View File

@ -1,34 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define ENA_GEN_DATE "Tue Jan 19 12:45:09 STD 2021"
#define ENA_GEN_COMMIT "f023ae8f"

View File

@ -52,6 +52,9 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_MISS_INTERRUPT = 14,
ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED = 16,
ENA_REGS_RESET_TX_DESCRIPTOR_MALFORMED = 17,
ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT = 18,
ENA_REGS_RESET_DEVICE_REQUEST = 19,
ENA_REGS_RESET_LAST,
};
@ -134,6 +137,8 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
#define ENA_REGS_DEV_CTL_RESET_REASON_EXT_SHIFT 24
#define ENA_REGS_DEV_CTL_RESET_REASON_EXT_MASK 0xf000000
#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000

View File

@ -102,7 +102,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
wmb();
/* The line is completed. Copy it to dev */
ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
ENA_MEMCPY_TO_DEVICE_64(io_sq->bus,
io_sq->desc_addr.pbuf_dev_addr + dst_offset,
bounce_buffer,
llq_info->desc_list_entry_size);
@ -237,11 +238,8 @@ static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
return ENA_COM_OK;
}
static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
static int ena_com_sq_update_reqular_queue_tail(struct ena_com_io_sq *io_sq)
{
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
return ena_com_sq_update_llq_tail(io_sq);
io_sq->tail++;
/* Switch phase bit in case of wrap around */
@ -251,6 +249,14 @@ static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
return ENA_COM_OK;
}
static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
{
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
return ena_com_sq_update_llq_tail(io_sq);
return ena_com_sq_update_reqular_queue_tail(io_sq);
}
static struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
{
@ -264,6 +270,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
u16 *first_cdesc_idx,
u16 *num_descs)
{
struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
struct ena_eth_io_rx_cdesc_base *cdesc;
u32 last = 0;
@ -279,13 +286,21 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
ena_trc_err(dev,
"First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
count, io_cq->qid, cdesc->req_id);
return ENA_COM_FAULT;
}
if (unlikely((status & (ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK |
ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK)) &&
ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
ena_trc_err(dev,
"Corrupted RX descriptor #%d on q_id: %d, req_id: %u\n",
count, io_cq->qid, cdesc->req_id);
return ENA_COM_FAULT;
}
count++;
last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
@ -473,7 +488,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* If the caller doesn't want to send packets */
if (unlikely(!num_bufs && !header_len)) {
rc = ena_com_close_bounce_buffer(io_sq);
if (rc)
if (unlikely(rc))
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Failed to write buffers to LLQ\n");
*nb_hw_desc = io_sq->tail - start_tail;
@ -658,9 +673,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
return ENA_COM_NO_SPACE;
desc = get_sq_desc(io_sq);
if (unlikely(!desc))
return ENA_COM_FAULT;
/* virt_addr allocation success is checked before calling this function */
desc = get_sq_desc_regular_queue(io_sq);
memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
@ -681,7 +695,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
return ena_com_sq_update_tail(io_sq);
return ena_com_sq_update_reqular_queue_tail(io_sq);
}
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)

View File

@ -39,6 +39,11 @@ extern "C" {
#endif
#include "ena_com.h"
/* we allow 2 DMA descriptors per LLQ entry */
#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
#define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
struct ena_com_tx_ctx {
struct ena_com_tx_meta ena_meta;
struct ena_com_buf *ena_bufs;
@ -227,9 +232,11 @@ static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
u16 *req_id)
{
struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
u8 expected_phase, cdesc_phase;
struct ena_eth_io_tx_cdesc *cdesc;
u16 masked_head;
u8 flags;
masked_head = io_cq->head & (io_cq->q_depth - 1);
expected_phase = io_cq->phase;
@ -238,14 +245,24 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
((uintptr_t)io_cq->cdesc_addr.virt_addr +
(masked_head * io_cq->cdesc_entry_size_in_bytes));
flags = READ_ONCE8(cdesc->flags);
/* When the current completion descriptor phase isn't the same as the
* expected, it mean that the device still didn't update
* this completion.
*/
cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
cdesc_phase = flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
if (cdesc_phase != expected_phase)
return ENA_COM_TRY_AGAIN;
if (unlikely((flags & ENA_ETH_IO_TX_CDESC_MBZ6_MASK) &&
ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
ena_trc_err(dev,
"Corrupted TX descriptor on q_id: %d, req_id: %u\n",
io_cq->qid, cdesc->req_id);
return ENA_COM_FAULT;
}
dma_rmb();
*req_id = READ_ONCE16(cdesc->req_id);

View File

@ -334,11 +334,12 @@ ena_reg_read32(struct ena_bus *bus, bus_size_t offset)
return v;
}
#define ENA_MEMCPY_TO_DEVICE_64(dst, src, size) \
#define ENA_MEMCPY_TO_DEVICE_64(bus, dst, src, size) \
do { \
int count, i; \
volatile uint64_t *to = (volatile uint64_t *)(dst); \
const uint64_t *from = (const uint64_t *)(src); \
(void)(bus); \
count = (size) / 8; \
\
for (i = 0; i < count; i++, from++, to++) \
@ -469,8 +470,12 @@ void ena_rss_key_fill(void *key, size_t size);
#define ENA_RSS_FILL_KEY(key, size) ena_rss_key_fill(key, size)
#define ENA_FIELD_GET(value, mask, offset) ((value & mask) >> offset)
#include "ena_defs/ena_includes.h"
#define ENA_BITS_PER_U64(bitmap) (bitcount64(bitmap))
#define ENA_ADMIN_OS_FREEBSD 4
#endif /* ENA_PLAT_H_ */