Add RDMA (iWARP and RoCEv1) support

David Somayajulu (davidcs): Overall RDMA Driver infrastructure and iWARP
Anand Khoje (akhoje@marvell.com): RoCEv1 verbs implementation

MFC after:5 days
This commit is contained in:
David C Somayajulu 2019-01-31 00:09:38 +00:00
parent fb4e718261
commit fa790ea99f
18 changed files with 23231 additions and 0 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,603 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File : ecore_ooo.c
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_status.h"
#include "ecore_ll2.h"
#include "ecore_ooo.h"
#include "ecore_iscsi.h"
#include "ecore_cxt.h"
/*
* Static OOO functions
*/
static struct ecore_ooo_archipelago *
ecore_ooo_seek_archipelago(struct ecore_ooo_info *p_ooo_info, u32 cid)
{
u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
struct ecore_ooo_archipelago *p_archipelago;
if (idx >= p_ooo_info->max_num_archipelagos)
return OSAL_NULL;
p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
if (OSAL_LIST_IS_EMPTY(&p_archipelago->isles_list))
return OSAL_NULL;
return p_archipelago;
}
static struct ecore_ooo_isle *ecore_ooo_seek_isle(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid, u8 isle)
{
struct ecore_ooo_archipelago *p_archipelago = OSAL_NULL;
struct ecore_ooo_isle *p_isle = OSAL_NULL;
u8 the_num_of_isle = 1;
p_archipelago = ecore_ooo_seek_archipelago(p_ooo_info, cid);
if (!p_archipelago) {
DP_NOTICE(p_hwfn, true,
"Connection %d is not found in OOO list\n", cid);
return OSAL_NULL;
}
OSAL_LIST_FOR_EACH_ENTRY(p_isle,
&p_archipelago->isles_list,
list_entry, struct ecore_ooo_isle) {
if (the_num_of_isle == isle)
return p_isle;
the_num_of_isle++;
}
return OSAL_NULL;
}
void ecore_ooo_save_history_entry(struct ecore_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe)
{
struct ecore_ooo_history *p_history = &p_ooo_info->ooo_history;
if (p_history->head_idx == p_history->num_of_cqes)
p_history->head_idx = 0;
p_history->p_cqes[p_history->head_idx] = *p_cqe;
p_history->head_idx++;
}
//#ifdef CONFIG_ECORE_ISCSI
#if defined(CONFIG_ECORE_ISCSI) || defined(CONFIG_ECORE_IWARP)
enum _ecore_status_t ecore_ooo_alloc(struct ecore_hwfn *p_hwfn)
{
u16 max_num_archipelagos = 0, cid_base;
struct ecore_ooo_info *p_ooo_info;
u16 max_num_isles = 0;
u32 i;
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ISCSI:
max_num_archipelagos =
p_hwfn->pf_params.iscsi_pf_params.num_cons;
cid_base =(u16)ecore_cxt_get_proto_cid_start(p_hwfn,
PROTOCOLID_ISCSI);
break;
case ECORE_PCI_ETH_RDMA:
case ECORE_PCI_ETH_IWARP:
max_num_archipelagos =
(u16)ecore_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_IWARP,
OSAL_NULL);
cid_base = (u16)ecore_cxt_get_proto_cid_start(p_hwfn,
PROTOCOLID_IWARP);
break;
default:
DP_NOTICE(p_hwfn, true,
"Failed to allocate ecore_ooo_info: unknown personalization\n");
return ECORE_INVAL;
}
max_num_isles = ECORE_MAX_NUM_ISLES + max_num_archipelagos;
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate ecore_ooo_info: unknown amount of connections\n");
return ECORE_INVAL;
}
p_ooo_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_ooo_info));
if (!p_ooo_info) {
DP_NOTICE(p_hwfn, true, "Failed to allocate ecore_ooo_info\n");
return ECORE_NOMEM;
}
p_ooo_info->cid_base = cid_base; /* We look only at the icid */
p_ooo_info->max_num_archipelagos = max_num_archipelagos;
OSAL_LIST_INIT(&p_ooo_info->free_buffers_list);
OSAL_LIST_INIT(&p_ooo_info->ready_buffers_list);
OSAL_LIST_INIT(&p_ooo_info->free_isles_list);
p_ooo_info->p_isles_mem =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct ecore_ooo_isle) *
max_num_isles);
if (!p_ooo_info->p_isles_mem) {
DP_NOTICE(p_hwfn,true,
"Failed to allocate ecore_ooo_info (isles)\n");
goto no_isles_mem;
}
for (i = 0; i < max_num_isles; i++) {
OSAL_LIST_INIT(&p_ooo_info->p_isles_mem[i].buffers_list);
OSAL_LIST_PUSH_TAIL(&p_ooo_info->p_isles_mem[i].list_entry,
&p_ooo_info->free_isles_list);
}
p_ooo_info->p_archipelagos_mem =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct ecore_ooo_archipelago) *
max_num_archipelagos);
if (!p_ooo_info->p_archipelagos_mem) {
DP_NOTICE(p_hwfn,true,
"Failed to allocate ecore_ooo_info(archpelagos)\n");
goto no_archipelagos_mem;
}
for (i = 0; i < max_num_archipelagos; i++) {
OSAL_LIST_INIT(&p_ooo_info->p_archipelagos_mem[i].isles_list);
}
p_ooo_info->ooo_history.p_cqes =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct ooo_opaque) *
ECORE_MAX_NUM_OOO_HISTORY_ENTRIES);
if (!p_ooo_info->ooo_history.p_cqes) {
DP_NOTICE(p_hwfn,true,
"Failed to allocate ecore_ooo_info(history)\n");
goto no_history_mem;
}
p_ooo_info->ooo_history.num_of_cqes =
ECORE_MAX_NUM_OOO_HISTORY_ENTRIES;
p_hwfn->p_ooo_info = p_ooo_info;
return ECORE_SUCCESS;
no_history_mem:
OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_archipelagos_mem);
no_archipelagos_mem:
OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_isles_mem);
no_isles_mem:
OSAL_FREE(p_hwfn->p_dev, p_ooo_info);
return ECORE_NOMEM;
}
#endif
void ecore_ooo_release_connection_isles(struct ecore_ooo_info *p_ooo_info,
u32 cid)
{
struct ecore_ooo_archipelago *p_archipelago;
struct ecore_ooo_buffer *p_buffer;
struct ecore_ooo_isle *p_isle;
p_archipelago = ecore_ooo_seek_archipelago(p_ooo_info, cid);
if (!p_archipelago)
return;
while (!OSAL_LIST_IS_EMPTY(&p_archipelago->isles_list)) {
p_isle = OSAL_LIST_FIRST_ENTRY(
&p_archipelago->isles_list,
struct ecore_ooo_isle, list_entry);
#if defined(_NTDDK_)
#pragma warning(suppress : 6011 28182)
#endif
OSAL_LIST_REMOVE_ENTRY(&p_isle->list_entry,
&p_archipelago->isles_list);
while (!OSAL_LIST_IS_EMPTY(&p_isle->buffers_list)) {
p_buffer =
OSAL_LIST_FIRST_ENTRY(
&p_isle->buffers_list ,
struct ecore_ooo_buffer, list_entry);
if (p_buffer == OSAL_NULL)
break;
#if defined(_NTDDK_)
#pragma warning(suppress : 6011 28182)
#endif
OSAL_LIST_REMOVE_ENTRY(&p_buffer->list_entry,
&p_isle->buffers_list);
OSAL_LIST_PUSH_TAIL(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
OSAL_LIST_PUSH_TAIL(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
}
void ecore_ooo_release_all_isles(struct ecore_ooo_info *p_ooo_info)
{
struct ecore_ooo_archipelago *p_archipelago;
struct ecore_ooo_buffer *p_buffer;
struct ecore_ooo_isle *p_isle;
u32 i;
for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) {
p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]);
#if defined(_NTDDK_)
#pragma warning(suppress : 6011 28182)
#endif
while (!OSAL_LIST_IS_EMPTY(&p_archipelago->isles_list)) {
p_isle = OSAL_LIST_FIRST_ENTRY(
&p_archipelago->isles_list,
struct ecore_ooo_isle, list_entry);
#if defined(_NTDDK_)
#pragma warning(suppress : 6011 28182)
#endif
OSAL_LIST_REMOVE_ENTRY(&p_isle->list_entry,
&p_archipelago->isles_list);
while (!OSAL_LIST_IS_EMPTY(&p_isle->buffers_list)) {
p_buffer =
OSAL_LIST_FIRST_ENTRY(
&p_isle->buffers_list ,
struct ecore_ooo_buffer, list_entry);
if (p_buffer == OSAL_NULL)
break;
#if defined(_NTDDK_)
#pragma warning(suppress : 6011 28182)
#endif
OSAL_LIST_REMOVE_ENTRY(&p_buffer->list_entry,
&p_isle->buffers_list);
OSAL_LIST_PUSH_TAIL(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
OSAL_LIST_PUSH_TAIL(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
}
if (!OSAL_LIST_IS_EMPTY(&p_ooo_info->ready_buffers_list)) {
OSAL_LIST_SPLICE_TAIL_INIT(&p_ooo_info->ready_buffers_list,
&p_ooo_info->free_buffers_list);
}
}
//#ifdef CONFIG_ECORE_ISCSI
#if defined(CONFIG_ECORE_ISCSI) || defined(CONFIG_ECORE_IWARP)
void ecore_ooo_setup(struct ecore_hwfn *p_hwfn)
{
ecore_ooo_release_all_isles(p_hwfn->p_ooo_info);
OSAL_MEM_ZERO(p_hwfn->p_ooo_info->ooo_history.p_cqes,
p_hwfn->p_ooo_info->ooo_history.num_of_cqes *
sizeof(struct ooo_opaque));
p_hwfn->p_ooo_info->ooo_history.head_idx = 0;
}
void ecore_ooo_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_ooo_info *p_ooo_info = p_hwfn->p_ooo_info;
struct ecore_ooo_buffer *p_buffer;
if (!p_ooo_info)
return;
ecore_ooo_release_all_isles(p_ooo_info);
while (!OSAL_LIST_IS_EMPTY(&p_ooo_info->free_buffers_list)) {
p_buffer = OSAL_LIST_FIRST_ENTRY(&p_ooo_info->
free_buffers_list,
struct ecore_ooo_buffer,
list_entry);
if (p_buffer == OSAL_NULL)
break;
#if defined(_NTDDK_)
#pragma warning(suppress : 6011 28182)
#endif
OSAL_LIST_REMOVE_ENTRY(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
p_buffer->rx_buffer_virt_addr,
p_buffer->rx_buffer_phys_addr,
p_buffer->rx_buffer_size);
OSAL_FREE(p_hwfn->p_dev, p_buffer);
}
OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_isles_mem);
OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_archipelagos_mem);
OSAL_FREE(p_hwfn->p_dev, p_ooo_info->ooo_history.p_cqes);
OSAL_FREE(p_hwfn->p_dev, p_ooo_info);
p_hwfn->p_ooo_info = OSAL_NULL;
}
#endif
void ecore_ooo_put_free_buffer(struct ecore_ooo_info *p_ooo_info,
struct ecore_ooo_buffer *p_buffer)
{
OSAL_LIST_PUSH_TAIL(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
struct ecore_ooo_buffer *
ecore_ooo_get_free_buffer(struct ecore_ooo_info *p_ooo_info)
{
struct ecore_ooo_buffer *p_buffer = OSAL_NULL;
if (!OSAL_LIST_IS_EMPTY(&p_ooo_info->free_buffers_list)) {
p_buffer =
OSAL_LIST_FIRST_ENTRY(
&p_ooo_info->free_buffers_list,
struct ecore_ooo_buffer, list_entry);
OSAL_LIST_REMOVE_ENTRY(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
return p_buffer;
}
void ecore_ooo_put_ready_buffer(struct ecore_ooo_info *p_ooo_info,
struct ecore_ooo_buffer *p_buffer, u8 on_tail)
{
if (on_tail) {
OSAL_LIST_PUSH_TAIL(&p_buffer->list_entry,
&p_ooo_info->ready_buffers_list);
} else {
OSAL_LIST_PUSH_HEAD(&p_buffer->list_entry,
&p_ooo_info->ready_buffers_list);
}
}
struct ecore_ooo_buffer *
ecore_ooo_get_ready_buffer(struct ecore_ooo_info *p_ooo_info)
{
struct ecore_ooo_buffer *p_buffer = OSAL_NULL;
if (!OSAL_LIST_IS_EMPTY(&p_ooo_info->ready_buffers_list)) {
p_buffer =
OSAL_LIST_FIRST_ENTRY(
&p_ooo_info->ready_buffers_list,
struct ecore_ooo_buffer, list_entry);
OSAL_LIST_REMOVE_ENTRY(&p_buffer->list_entry,
&p_ooo_info->ready_buffers_list);
}
return p_buffer;
}
void ecore_ooo_delete_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid,
u8 drop_isle,
u8 drop_size)
{
struct ecore_ooo_archipelago *p_archipelago = OSAL_NULL;
struct ecore_ooo_isle *p_isle = OSAL_NULL;
u8 isle_idx;
p_archipelago = ecore_ooo_seek_archipelago(p_ooo_info, cid);
for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
p_isle = ecore_ooo_seek_isle(p_hwfn, p_ooo_info,
cid, drop_isle);
if (!p_isle) {
DP_NOTICE(p_hwfn, true,
"Isle %d is not found(cid %d)\n",
drop_isle, cid);
return;
}
if (OSAL_LIST_IS_EMPTY(&p_isle->buffers_list)) {
DP_NOTICE(p_hwfn, true,
"Isle %d is empty(cid %d)\n",
drop_isle, cid);
} else {
OSAL_LIST_SPLICE_TAIL_INIT(&p_isle->buffers_list,
&p_ooo_info->free_buffers_list);
}
#if defined(_NTDDK_)
#pragma warning(suppress : 6011)
#endif
OSAL_LIST_REMOVE_ENTRY(&p_isle->list_entry,
&p_archipelago->isles_list);
p_ooo_info->cur_isles_number--;
OSAL_LIST_PUSH_HEAD(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
}
void ecore_ooo_add_new_isle(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct ecore_ooo_buffer *p_buffer)
{
struct ecore_ooo_archipelago *p_archipelago = OSAL_NULL;
struct ecore_ooo_isle *p_prev_isle = OSAL_NULL;
struct ecore_ooo_isle *p_isle = OSAL_NULL;
if (ooo_isle > 1) {
p_prev_isle = ecore_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle - 1);
if (!p_prev_isle) {
DP_NOTICE(p_hwfn, true,
"Isle %d is not found(cid %d)\n",
ooo_isle - 1, cid);
return;
}
}
p_archipelago = ecore_ooo_seek_archipelago(p_ooo_info, cid);
if (!p_archipelago && (ooo_isle != 1)) {
DP_NOTICE(p_hwfn, true,
"Connection %d is not found in OOO list\n", cid);
return;
}
if (!OSAL_LIST_IS_EMPTY(&p_ooo_info->free_isles_list)) {
p_isle =
OSAL_LIST_FIRST_ENTRY(
&p_ooo_info->free_isles_list,
struct ecore_ooo_isle, list_entry);
OSAL_LIST_REMOVE_ENTRY(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
if (!OSAL_LIST_IS_EMPTY(&p_isle->buffers_list)) {
DP_NOTICE(p_hwfn, true, "Free isle is not empty\n");
OSAL_LIST_INIT(&p_isle->buffers_list);
}
} else {
DP_NOTICE(p_hwfn, true, "No more free isles\n");
return;
}
if (!p_archipelago) {
u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
}
OSAL_LIST_PUSH_HEAD(&p_buffer->list_entry, &p_isle->buffers_list);
p_ooo_info->cur_isles_number++;
p_ooo_info->gen_isles_number++;
if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number)
p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number;
if (!p_prev_isle) {
OSAL_LIST_PUSH_HEAD(&p_isle->list_entry, &p_archipelago->isles_list);
} else {
OSAL_LIST_INSERT_ENTRY_AFTER(&p_isle->list_entry,
&p_prev_isle->list_entry,
&p_archipelago->isles_list);
}
}
void ecore_ooo_add_new_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle,
struct ecore_ooo_buffer *p_buffer,
u8 buffer_side)
{
struct ecore_ooo_isle * p_isle = OSAL_NULL;
p_isle = ecore_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
if (!p_isle) {
DP_NOTICE(p_hwfn, true,
"Isle %d is not found(cid %d)\n",
ooo_isle, cid);
return;
}
if (buffer_side == ECORE_OOO_LEFT_BUF) {
OSAL_LIST_PUSH_HEAD(&p_buffer->list_entry,
&p_isle->buffers_list);
} else {
OSAL_LIST_PUSH_TAIL(&p_buffer->list_entry,
&p_isle->buffers_list);
}
}
void ecore_ooo_join_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid, u8 left_isle)
{
struct ecore_ooo_archipelago *p_archipelago = OSAL_NULL;
struct ecore_ooo_isle *p_right_isle = OSAL_NULL;
struct ecore_ooo_isle *p_left_isle = OSAL_NULL;
p_right_isle = ecore_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle + 1);
if (!p_right_isle) {
DP_NOTICE(p_hwfn, true,
"Right isle %d is not found(cid %d)\n",
left_isle + 1, cid);
return;
}
p_archipelago = ecore_ooo_seek_archipelago(p_ooo_info, cid);
OSAL_LIST_REMOVE_ENTRY(&p_right_isle->list_entry,
&p_archipelago->isles_list);
p_ooo_info->cur_isles_number--;
if (left_isle) {
p_left_isle = ecore_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle);
if (!p_left_isle) {
DP_NOTICE(p_hwfn, true,
"Left isle %d is not found(cid %d)\n",
left_isle, cid);
return;
}
OSAL_LIST_SPLICE_TAIL_INIT(&p_right_isle->buffers_list,
&p_left_isle->buffers_list);
} else {
OSAL_LIST_SPLICE_TAIL_INIT(&p_right_isle->buffers_list,
&p_ooo_info->ready_buffers_list);
}
OSAL_LIST_PUSH_TAIL(&p_right_isle->list_entry,
&p_ooo_info->free_isles_list);
}
void ecore_ooo_dump_rx_event(struct ecore_hwfn *p_hwfn,
struct ooo_opaque *iscsi_ooo,
struct ecore_ooo_buffer *p_buffer)
{
int i;
u32 dp_module = ECORE_MSG_OOO;
u32 ph_hi, ph_lo;
u8 *packet_buffer = 0;
if (p_hwfn->dp_level > ECORE_LEVEL_VERBOSE)
return;
if (!(p_hwfn->dp_module & dp_module))
return;
packet_buffer = (u8 *)p_buffer->rx_buffer_virt_addr +
p_buffer->placement_offset;
DP_VERBOSE(p_hwfn, dp_module,
"******************************************************\n");
ph_hi = DMA_HI(p_buffer->rx_buffer_phys_addr);
ph_lo = DMA_LO(p_buffer->rx_buffer_phys_addr);
DP_VERBOSE(p_hwfn, dp_module,
"0x%x-%x: CID 0x%x, OP 0x%x, ISLE 0x%x\n",
ph_hi, ph_lo,
iscsi_ooo->cid, iscsi_ooo->ooo_opcode, iscsi_ooo->ooo_isle);
for (i = 0; i < 64; i = i + 8) {
DP_VERBOSE(p_hwfn, dp_module,
"0x%x-%x: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
ph_hi, ph_lo,
packet_buffer[i],
packet_buffer[i + 1],
packet_buffer[i + 2],
packet_buffer[i + 3],
packet_buffer[i + 4],
packet_buffer[i + 5],
packet_buffer[i + 6],
packet_buffer[i + 7]);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,347 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File : qlnx_rdma.c
* Author: David C Somayajulu
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "qlnx_os.h"
#include "bcm_osal.h"
#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore.h"
#include "ecore_chain.h"
#include "ecore_status.h"
#include "ecore_hw.h"
#include "ecore_rt_defs.h"
#include "ecore_init_ops.h"
#include "ecore_int.h"
#include "ecore_cxt.h"
#include "ecore_spq.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
#include "ecore_dev_api.h"
#include "ecore_l2_api.h"
#ifdef CONFIG_ECORE_SRIOV
#include "ecore_sriov.h"
#include "ecore_vf.h"
#endif
#ifdef CONFIG_ECORE_LL2
#include "ecore_ll2.h"
#endif
#ifdef CONFIG_ECORE_FCOE
#include "ecore_fcoe.h"
#endif
#ifdef CONFIG_ECORE_ISCSI
#include "ecore_iscsi.h"
#endif
#include "ecore_mcp.h"
#include "ecore_hw_defs.h"
#include "mcp_public.h"
#ifdef CONFIG_ECORE_RDMA
#include "ecore_rdma.h"
#endif
#ifdef CONFIG_ECORE_ROCE
#include "ecore_roce.h"
#endif
#ifdef CONFIG_ECORE_IWARP
#include "ecore_iwarp.h"
#endif
#include "ecore_iro.h"
#include "nvm_cfg.h"
#include "ecore_dev_api.h"
#include "ecore_dbg_fw_funcs.h"
#include "qlnx_ioctl.h"
#include "qlnx_def.h"
#include "qlnx_rdma.h"
#include "qlnx_ver.h"
#include <sys/smp.h>
struct mtx qlnx_rdma_dev_lock;
struct qlnx_rdma_if *qlnx_rdma_if = NULL;
qlnx_host_t *qlnx_host_list = NULL;
void
qlnx_rdma_init(void)
{
if (!mtx_initialized(&qlnx_rdma_dev_lock)) {
mtx_init(&qlnx_rdma_dev_lock, "qlnx_rdma_dev_lock", NULL, MTX_DEF);
}
return;
}
void
qlnx_rdma_deinit(void)
{
if (mtx_initialized(&qlnx_rdma_dev_lock) && (qlnx_host_list == NULL)) {
mtx_destroy(&qlnx_rdma_dev_lock);
}
return;
}
static void
_qlnx_rdma_dev_add(struct qlnx_host *ha)
{
QL_DPRINT12(ha, "enter ha = %p qlnx_rdma_if = %p\n", ha, qlnx_rdma_if);
if (qlnx_rdma_if == NULL)
return;
if (ha->personality != ECORE_PCI_ETH_IWARP &&
ha->personality != ECORE_PCI_ETH_ROCE)
return;
ha->qlnx_rdma = qlnx_rdma_if->add(ha);
QL_DPRINT12(ha, "exit (ha = %p, qlnx_rdma = %p)\n", ha, ha->qlnx_rdma);
return;
}
void
qlnx_rdma_dev_add(struct qlnx_host *ha)
{
QL_DPRINT12(ha, "enter ha = %p\n", ha);
if (ha->personality != ECORE_PCI_ETH_IWARP &&
ha->personality != ECORE_PCI_ETH_ROCE)
return;
mtx_lock(&qlnx_rdma_dev_lock);
if (qlnx_host_list == NULL) {
qlnx_host_list = ha;
ha->next = NULL;
} else {
ha->next = qlnx_host_list;
qlnx_host_list = ha;
}
mtx_unlock(&qlnx_rdma_dev_lock);
_qlnx_rdma_dev_add(ha);
QL_DPRINT12(ha, "exit (%p)\n", ha);
return;
}
static int
_qlnx_rdma_dev_remove(struct qlnx_host *ha)
{
int ret = 0;
QL_DPRINT12(ha, "enter ha = %p qlnx_rdma_if = %p\n", ha, qlnx_rdma_if);
if (qlnx_rdma_if == NULL)
return (ret);
if (ha->personality != ECORE_PCI_ETH_IWARP &&
ha->personality != ECORE_PCI_ETH_ROCE)
return (ret);
ret = qlnx_rdma_if->remove(ha, ha->qlnx_rdma);
QL_DPRINT12(ha, "exit ha = %p qlnx_rdma_if = %p\n", ha, qlnx_rdma_if);
return (ret);
}
int
qlnx_rdma_dev_remove(struct qlnx_host *ha)
{
int ret = 0;
qlnx_host_t *ha_prev;
qlnx_host_t *ha_cur;
QL_DPRINT12(ha, "enter ha = %p\n", ha);
if ((qlnx_host_list == NULL) || (ha == NULL))
return (ret);
if (ha->personality != ECORE_PCI_ETH_IWARP &&
ha->personality != ECORE_PCI_ETH_ROCE)
return (ret);
ret = _qlnx_rdma_dev_remove(ha);
if (ret)
return (ret);
mtx_lock(&qlnx_rdma_dev_lock);
if (qlnx_host_list == ha) {
qlnx_host_list = ha->next;
ha->next = NULL;
mtx_unlock(&qlnx_rdma_dev_lock);
QL_DPRINT12(ha, "exit0 ha = %p\n", ha);
return (ret);
}
ha_prev = ha_cur = qlnx_host_list;
while ((ha_cur != ha) && (ha_cur != NULL)) {
ha_prev = ha_cur;
ha_cur = ha_cur->next;
}
if (ha_cur == ha) {
ha_prev = ha->next;
ha->next = NULL;
}
mtx_unlock(&qlnx_rdma_dev_lock);
QL_DPRINT12(ha, "exit1 ha = %p\n", ha);
return (ret);
}
int
qlnx_rdma_register_if(qlnx_rdma_if_t *rdma_if)
{
qlnx_host_t *ha;
if (mtx_initialized(&qlnx_rdma_dev_lock)) {
mtx_lock(&qlnx_rdma_dev_lock);
qlnx_rdma_if = rdma_if;
ha = qlnx_host_list;
while (ha != NULL) {
_qlnx_rdma_dev_add(ha);
ha = ha->next;
}
mtx_unlock(&qlnx_rdma_dev_lock);
return (0);
}
return (-1);
}
int
qlnx_rdma_deregister_if(qlnx_rdma_if_t *rdma_if)
{
int ret = 0;
qlnx_host_t *ha;
printf("%s: enter rdma_if = %p\n", __func__, rdma_if);
if (mtx_initialized(&qlnx_rdma_dev_lock)) {
mtx_lock(&qlnx_rdma_dev_lock);
ha = qlnx_host_list;
while (ha != NULL) {
mtx_unlock(&qlnx_rdma_dev_lock);
if (ha->dbg_level & 0xF000)
ret = EBUSY;
else
ret = _qlnx_rdma_dev_remove(ha);
device_printf(ha->pci_dev, "%s [%d]: ret = 0x%x\n",
__func__, __LINE__, ret);
if (ret)
return (ret);
mtx_lock(&qlnx_rdma_dev_lock);
ha->qlnx_rdma = NULL;
ha = ha->next;
}
if (!ret)
qlnx_rdma_if = NULL;
mtx_unlock(&qlnx_rdma_dev_lock);
}
printf("%s: exit rdma_if = %p\n", __func__, rdma_if);
return (ret);
}
void
qlnx_rdma_dev_open(struct qlnx_host *ha)
{
QL_DPRINT12(ha, "enter ha = %p qlnx_rdma_if = %p\n", ha, qlnx_rdma_if);
if (qlnx_rdma_if == NULL)
return;
if (ha->personality != ECORE_PCI_ETH_IWARP &&
ha->personality != ECORE_PCI_ETH_ROCE)
return;
qlnx_rdma_if->notify(ha, ha->qlnx_rdma, QLNX_ETHDEV_UP);
QL_DPRINT12(ha, "exit ha = %p qlnx_rdma_if = %p\n", ha, qlnx_rdma_if);
return;
}
void
qlnx_rdma_dev_close(struct qlnx_host *ha)
{
QL_DPRINT12(ha, "enter ha = %p qlnx_rdma_if = %p\n", ha, qlnx_rdma_if);
if (qlnx_rdma_if == NULL)
return;
if (ha->personality != ECORE_PCI_ETH_IWARP &&
ha->personality != ECORE_PCI_ETH_ROCE)
return;
qlnx_rdma_if->notify(ha, ha->qlnx_rdma, QLNX_ETHDEV_DOWN);
QL_DPRINT12(ha, "exit ha = %p qlnx_rdma_if = %p\n", ha, qlnx_rdma_if);
return;
}
int
qlnx_rdma_get_num_irqs(struct qlnx_host *ha)
{
return (QLNX_NUM_CNQ + ecore_rdma_get_sb_id(&ha->cdev.hwfns[0], 0) + 2);
}

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* File: qlnx_rdma.h
* Author: David C Somayajulu
*/
#ifndef _QLNX_RDMA_H_
#define _QLNX_RDMA_H_
enum qlnx_rdma_event {
QLNX_ETHDEV_UP = 0x10,
QLNX_ETHDEV_DOWN = 0x11,
QLNX_ETHDEV_CHANGE_ADDR = 0x12
};
struct qlnx_rdma_if {
void * (*add)(void *ha);
int (*remove)(void *ha, void *qlnx_rdma_dev);
void (*notify)(void *ha, void *qlnx_rdma_dev, enum qlnx_rdma_event);
};
typedef struct qlnx_rdma_if qlnx_rdma_if_t;
extern int qlnx_rdma_register_if(qlnx_rdma_if_t *rdma_if);
extern int qlnx_rdma_deregister_if(qlnx_rdma_if_t *rdma_if);
extern int qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address,
uint8_t *new_mac_address);
#define QLNX_NUM_CNQ 1
extern int qlnx_rdma_get_num_irqs(struct qlnx_host *ha);
extern void qlnx_rdma_dev_add(struct qlnx_host *ha);
extern void qlnx_rdma_dev_open(struct qlnx_host *ha);
extern void qlnx_rdma_dev_close(struct qlnx_host *ha);
extern int qlnx_rdma_dev_remove(struct qlnx_host *ha);
extern void qlnx_rdma_changeaddr(struct qlnx_host *ha);
extern void qlnx_rdma_init(void);
extern void qlnx_rdma_deinit(void);
#endif /* #ifndef _QLNX_RDMA_H_ */

View File

@ -0,0 +1,887 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "qlnxr_def.h"
#include "rdma_common.h"
#include "qlnxr_cm.h"
void
qlnxr_inc_sw_gsi_cons(struct qlnxr_qp_hwq_info *info)
{
info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
}
void
qlnxr_store_gsi_qp_cq(struct qlnxr_dev *dev,
struct qlnxr_qp *qp,
struct ib_qp_init_attr *attrs)
{
QL_DPRINT12(dev->ha, "enter\n");
dev->gsi_qp_created = 1;
dev->gsi_sqcq = get_qlnxr_cq((attrs->send_cq));
dev->gsi_rqcq = get_qlnxr_cq((attrs->recv_cq));
dev->gsi_qp = qp;
QL_DPRINT12(dev->ha, "exit\n");
return;
}
void
qlnxr_ll2_complete_tx_packet(void *cxt,
uint8_t connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet)
{
struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
struct ecore_roce_ll2_packet *pkt = cookie;
struct qlnxr_cq *cq = dev->gsi_sqcq;
struct qlnxr_qp *qp = dev->gsi_qp;
unsigned long flags;
QL_DPRINT12(dev->ha, "enter\n");
qlnx_dma_free_coherent(&dev->ha->cdev, pkt->header.vaddr,
pkt->header.baddr, pkt->header.len);
kfree(pkt);
spin_lock_irqsave(&qp->q_lock, flags);
qlnxr_inc_sw_gsi_cons(&qp->sq);
spin_unlock_irqrestore(&qp->q_lock, flags);
if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
QL_DPRINT12(dev->ha, "exit\n");
return;
}
void
qlnxr_ll2_complete_rx_packet(void *cxt,
struct ecore_ll2_comp_rx_data *data)
{
struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
struct qlnxr_cq *cq = dev->gsi_rqcq;
// struct qlnxr_qp *qp = dev->gsi_qp;
struct qlnxr_qp *qp = NULL;
unsigned long flags;
uint32_t qp_num = 0;
// uint32_t delay_count = 0, gsi_cons = 0;
//void * dest_va;
QL_DPRINT12(dev->ha, "enter\n");
if (data->u.data_length_error) {
/* TODO: add statistic */
}
if (data->cookie == NULL) {
QL_DPRINT12(dev->ha, "cookie is NULL, bad sign\n");
}
qp_num = (0xFF << 16) | data->qp_id;
if (data->qp_id == 1) {
qp = dev->gsi_qp;
} else {
/* TODO: This will be needed for UD QP support */
/* For RoCEv1 this is invalid */
QL_DPRINT12(dev->ha, "invalid QP\n");
return;
}
/* note: currently only one recv sg is supported */
QL_DPRINT12(dev->ha, "MAD received on QP : %x\n", data->rx_buf_addr);
spin_lock_irqsave(&qp->q_lock, flags);
qp->rqe_wr_id[qp->rq.gsi_cons].rc =
data->u.data_length_error ? -EINVAL : 0;
qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
/* note: length stands for data length i.e. GRH is excluded */
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
data->length.data_length;
*((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
ntohl(data->opaque_data_0);
*((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
ntohs((u16)data->opaque_data_1);
qlnxr_inc_sw_gsi_cons(&qp->rq);
spin_unlock_irqrestore(&qp->q_lock, flags);
if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
QL_DPRINT12(dev->ha, "exit\n");
return;
}
void qlnxr_ll2_release_rx_packet(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
bool b_last_packet)
{
/* Do nothing... */
}
static void
qlnxr_destroy_gsi_cq(struct qlnxr_dev *dev,
struct ib_qp_init_attr *attrs)
{
struct ecore_rdma_destroy_cq_in_params iparams;
struct ecore_rdma_destroy_cq_out_params oparams;
struct qlnxr_cq *cq;
QL_DPRINT12(dev->ha, "enter\n");
cq = get_qlnxr_cq((attrs->send_cq));
iparams.icid = cq->icid;
ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
ecore_chain_free(&dev->ha->cdev, &cq->pbl);
cq = get_qlnxr_cq((attrs->recv_cq));
/* if a dedicated recv_cq was used, delete it too */
if (iparams.icid != cq->icid) {
iparams.icid = cq->icid;
ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
ecore_chain_free(&dev->ha->cdev, &cq->pbl);
}
QL_DPRINT12(dev->ha, "exit\n");
return;
}
static inline int
qlnxr_check_gsi_qp_attrs(struct qlnxr_dev *dev,
struct ib_qp_init_attr *attrs)
{
QL_DPRINT12(dev->ha, "enter\n");
if (attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE) {
QL_DPRINT11(dev->ha,
"(attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE)\n");
return -EINVAL;
}
if (attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR) {
QL_DPRINT11(dev->ha,
"(attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR)\n");
return -EINVAL;
}
if (attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR) {
QL_DPRINT11(dev->ha,
"(attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR)\n");
return -EINVAL;
}
QL_DPRINT12(dev->ha, "exit\n");
return 0;
}
static int
qlnxr_ll2_post_tx(struct qlnxr_dev *dev, struct ecore_roce_ll2_packet *pkt)
{
enum ecore_ll2_roce_flavor_type roce_flavor;
struct ecore_ll2_tx_pkt_info ll2_tx_pkt;
int rc;
int i;
QL_DPRINT12(dev->ha, "enter\n");
memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
if (pkt->roce_mode != ROCE_V1) {
QL_DPRINT11(dev->ha, "roce_mode != ROCE_V1\n");
return (-1);
}
roce_flavor = (pkt->roce_mode == ROCE_V1) ?
ECORE_LL2_ROCE : ECORE_LL2_RROCE;
ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg;
ll2_tx_pkt.vlan = 0; /* ??? */
ll2_tx_pkt.tx_dest = ECORE_LL2_TX_DEST_NW;
ll2_tx_pkt.ecore_roce_flavor = roce_flavor;
ll2_tx_pkt.first_frag = pkt->header.baddr;
ll2_tx_pkt.first_frag_len = pkt->header.len;
ll2_tx_pkt.cookie = pkt;
ll2_tx_pkt.enable_ip_cksum = 1; // Only for RoCEv2:IPv4
/* tx header */
rc = ecore_ll2_prepare_tx_packet(dev->rdma_ctx,
dev->gsi_ll2_handle,
&ll2_tx_pkt,
1);
if (rc) {
QL_DPRINT11(dev->ha, "ecore_ll2_prepare_tx_packet failed\n");
/* TX failed while posting header - release resources*/
qlnx_dma_free_coherent(&dev->ha->cdev,
pkt->header.vaddr,
pkt->header.baddr,
pkt->header.len);
kfree(pkt);
return rc;
}
/* tx payload */
for (i = 0; i < pkt->n_seg; i++) {
rc = ecore_ll2_set_fragment_of_tx_packet(dev->rdma_ctx,
dev->gsi_ll2_handle,
pkt->payload[i].baddr,
pkt->payload[i].len);
if (rc) {
/* if failed not much to do here, partial packet has
* been posted we can't free memory, will need to wait
* for completion
*/
QL_DPRINT11(dev->ha,
"ecore_ll2_set_fragment_of_tx_packet failed\n");
return rc;
}
}
struct ecore_ll2_stats stats = {0};
rc = ecore_ll2_get_stats(dev->rdma_ctx, dev->gsi_ll2_handle, &stats);
if (rc) {
QL_DPRINT11(dev->ha, "failed to obtain ll2 stats\n");
}
QL_DPRINT12(dev->ha, "exit\n");
return 0;
}
int
qlnxr_ll2_stop(struct qlnxr_dev *dev)
{
int rc;
QL_DPRINT12(dev->ha, "enter\n");
if (dev->gsi_ll2_handle == 0xFF)
return 0;
/* remove LL2 MAC address filter */
rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx,
dev->gsi_ll2_mac_address, NULL);
rc = ecore_ll2_terminate_connection(dev->rdma_ctx,
dev->gsi_ll2_handle);
ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
dev->gsi_ll2_handle = 0xFF;
QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
return rc;
}
int qlnxr_ll2_start(struct qlnxr_dev *dev,
struct ib_qp_init_attr *attrs,
struct qlnxr_qp *qp)
{
struct ecore_ll2_acquire_data data;
struct ecore_ll2_cbs cbs;
int rc;
QL_DPRINT12(dev->ha, "enter\n");
/* configure and start LL2 */
cbs.rx_comp_cb = qlnxr_ll2_complete_rx_packet;
cbs.tx_comp_cb = qlnxr_ll2_complete_tx_packet;
cbs.rx_release_cb = qlnxr_ll2_release_rx_packet;
cbs.tx_release_cb = qlnxr_ll2_complete_tx_packet;
cbs.cookie = dev;
dev->gsi_ll2_handle = 0xFF;
memset(&data, 0, sizeof(data));
data.input.conn_type = ECORE_LL2_TYPE_ROCE;
data.input.mtu = dev->ha->ifp->if_mtu;
data.input.rx_num_desc = 8 * 1024;
data.input.rx_drop_ttl0_flg = 1;
data.input.rx_vlan_removal_en = 0;
data.input.tx_num_desc = 8 * 1024;
data.input.tx_tc = 0;
data.input.tx_dest = ECORE_LL2_TX_DEST_NW;
data.input.ai_err_packet_too_big = ECORE_LL2_DROP_PACKET;
data.input.ai_err_no_buf = ECORE_LL2_DROP_PACKET;
data.input.gsi_enable = 1;
data.p_connection_handle = &dev->gsi_ll2_handle;
data.cbs = &cbs;
rc = ecore_ll2_acquire_connection(dev->rdma_ctx, &data);
if (rc) {
QL_DPRINT11(dev->ha,
"ecore_ll2_acquire_connection failed: %d\n",
rc);
return rc;
}
QL_DPRINT11(dev->ha,
"ll2 connection acquired successfully\n");
rc = ecore_ll2_establish_connection(dev->rdma_ctx,
dev->gsi_ll2_handle);
if (rc) {
QL_DPRINT11(dev->ha,
"ecore_ll2_establish_connection failed\n", rc);
goto err1;
}
QL_DPRINT11(dev->ha,
"ll2 connection established successfully\n");
rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx, NULL,
dev->ha->primary_mac);
if (rc) {
QL_DPRINT11(dev->ha, "qlnx_rdma_ll2_set_mac_filter failed\n", rc);
goto err2;
}
QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
return 0;
err2:
ecore_ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
err1:
ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
return rc;
}
struct ib_qp*
qlnxr_create_gsi_qp(struct qlnxr_dev *dev,
struct ib_qp_init_attr *attrs,
struct qlnxr_qp *qp)
{
int rc;
QL_DPRINT12(dev->ha, "enter\n");
rc = qlnxr_check_gsi_qp_attrs(dev, attrs);
if (rc) {
QL_DPRINT11(dev->ha, "qlnxr_check_gsi_qp_attrs failed\n");
return ERR_PTR(rc);
}
rc = qlnxr_ll2_start(dev, attrs, qp);
if (rc) {
QL_DPRINT11(dev->ha, "qlnxr_ll2_start failed\n");
return ERR_PTR(rc);
}
/* create QP */
qp->ibqp.qp_num = 1;
qp->rq.max_wr = attrs->cap.max_recv_wr;
qp->sq.max_wr = attrs->cap.max_send_wr;
qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
GFP_KERNEL);
if (!qp->rqe_wr_id) {
QL_DPRINT11(dev->ha, "(!qp->rqe_wr_id)\n");
goto err;
}
qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
GFP_KERNEL);
if (!qp->wqe_wr_id) {
QL_DPRINT11(dev->ha, "(!qp->wqe_wr_id)\n");
goto err;
}
qlnxr_store_gsi_qp_cq(dev, qp, attrs);
memcpy(dev->gsi_ll2_mac_address, dev->ha->primary_mac, ETH_ALEN);
/* the GSI CQ is handled by the driver so remove it from the FW */
qlnxr_destroy_gsi_cq(dev, attrs);
dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
QL_DPRINT12(dev->ha, "exit &qp->ibqp = %p\n", &qp->ibqp);
return &qp->ibqp;
err:
kfree(qp->rqe_wr_id);
rc = qlnxr_ll2_stop(dev);
QL_DPRINT12(dev->ha, "exit with error\n");
return ERR_PTR(-ENOMEM);
}
int
qlnxr_destroy_gsi_qp(struct qlnxr_dev *dev)
{
int rc = 0;
QL_DPRINT12(dev->ha, "enter\n");
rc = qlnxr_ll2_stop(dev);
QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
return (rc);
}
static inline bool
qlnxr_get_vlan_id_gsi(struct ib_ah_attr *ah_attr, u16 *vlan_id)
{
u16 tmp_vlan_id;
union ib_gid *dgid = &ah_attr->grh.dgid;
tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
if (tmp_vlan_id < 0x1000) {
*vlan_id = tmp_vlan_id;
return true;
} else {
*vlan_id = 0;
return false;
}
}
#define QLNXR_MAX_UD_HEADER_SIZE (100)
#define QLNXR_GSI_QPN (1)
static inline int
qlnxr_gsi_build_header(struct qlnxr_dev *dev,
struct qlnxr_qp *qp,
struct ib_send_wr *swr,
struct ib_ud_header *udh,
int *roce_mode)
{
bool has_vlan = false, has_grh_ipv6 = true;
struct ib_ah_attr *ah_attr = &get_qlnxr_ah((ud_wr(swr)->ah))->attr;
struct ib_global_route *grh = &ah_attr->grh;
union ib_gid sgid;
int send_size = 0;
u16 vlan_id = 0;
u16 ether_type;
#if __FreeBSD_version >= 1102000
int rc = 0;
int ip_ver = 0;
bool has_udp = false;
#endif /* #if __FreeBSD_version >= 1102000 */
#if !DEFINE_IB_AH_ATTR_WITH_DMAC
u8 mac[ETH_ALEN];
#endif
int i;
send_size = 0;
for (i = 0; i < swr->num_sge; ++i)
send_size += swr->sg_list[i].length;
has_vlan = qlnxr_get_vlan_id_gsi(ah_attr, &vlan_id);
ether_type = ETH_P_ROCE;
*roce_mode = ROCE_V1;
if (grh->sgid_index < QLNXR_MAX_SGID)
sgid = dev->sgid_tbl[grh->sgid_index];
else
sgid = dev->sgid_tbl[0];
#if __FreeBSD_version >= 1102000
rc = ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
has_vlan, has_grh_ipv6, ip_ver, has_udp,
0 /* immediate */, udh);
if (rc) {
QL_DPRINT11(dev->ha, "gsi post send: failed to init header\n");
return rc;
}
#else
ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
has_vlan, has_grh_ipv6, 0 /* immediate */, udh);
#endif /* #if __FreeBSD_version >= 1102000 */
/* ENET + VLAN headers*/
#if DEFINE_IB_AH_ATTR_WITH_DMAC
memcpy(udh->eth.dmac_h, ah_attr->dmac, ETH_ALEN);
#else
qlnxr_get_dmac(dev, ah_attr, mac);
memcpy(udh->eth.dmac_h, mac, ETH_ALEN);
#endif
memcpy(udh->eth.smac_h, dev->ha->primary_mac, ETH_ALEN);
if (has_vlan) {
udh->eth.type = htons(ETH_P_8021Q);
udh->vlan.tag = htons(vlan_id);
udh->vlan.type = htons(ether_type);
} else {
udh->eth.type = htons(ether_type);
}
for (int j = 0; j < 4; j++) {
QL_DPRINT12(dev->ha, "destination mac: %x\n",
udh->eth.dmac_h[j]);
}
for (int j = 0; j < 4; j++) {
QL_DPRINT12(dev->ha, "source mac: %x\n",
udh->eth.smac_h[j]);
}
QL_DPRINT12(dev->ha, "QP: %p, opcode: %d, wq: %lx, roce: %x, hops:%d,"
"imm : %d, vlan :%d, AH: %p\n",
qp, swr->opcode, swr->wr_id, *roce_mode, grh->hop_limit,
0, has_vlan, get_qlnxr_ah((ud_wr(swr)->ah)));
if (has_grh_ipv6) {
/* GRH / IPv6 header */
udh->grh.traffic_class = grh->traffic_class;
udh->grh.flow_label = grh->flow_label;
udh->grh.hop_limit = grh->hop_limit;
udh->grh.destination_gid = grh->dgid;
memcpy(&udh->grh.source_gid.raw, &sgid.raw,
sizeof(udh->grh.source_gid.raw));
QL_DPRINT12(dev->ha, "header: tc: %x, flow_label : %x, "
"hop_limit: %x \n", udh->grh.traffic_class,
udh->grh.flow_label, udh->grh.hop_limit);
for (i = 0; i < 16; i++) {
QL_DPRINT12(dev->ha, "udh dgid = %x\n", udh->grh.destination_gid.raw[i]);
}
for (i = 0; i < 16; i++) {
QL_DPRINT12(dev->ha, "udh sgid = %x\n", udh->grh.source_gid.raw[i]);
}
udh->grh.next_header = 0x1b;
}
#ifdef DEFINE_IB_UD_HEADER_INIT_UDP_PRESENT
/* This is for RoCEv2 */
else {
/* IPv4 header */
u32 ipv4_addr;
udh->ip4.protocol = IPPROTO_UDP;
udh->ip4.tos = htonl(grh->flow_label);
udh->ip4.frag_off = htons(IP_DF);
udh->ip4.ttl = grh->hop_limit;
ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
udh->ip4.saddr = ipv4_addr;
ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
udh->ip4.daddr = ipv4_addr;
/* note: checksum is calculated by the device */
}
#endif
/* BTH */
udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
udh->bth.pkey = QLNXR_ROCE_PKEY_DEFAULT;/* TODO: ib_get_cahced_pkey?! */
//udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
udh->bth.destination_qpn = OSAL_CPU_TO_BE32(ud_wr(swr)->remote_qpn);
//udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
udh->bth.psn = OSAL_CPU_TO_BE32((qp->sq_psn++) & ((1 << 24) - 1));
udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
/* DETH */
//udh->deth.qkey = htonl(0x80010000); /* qp->qkey */ /* TODO: what is?! */
//udh->deth.source_qpn = htonl(QLNXR_GSI_QPN);
udh->deth.qkey = OSAL_CPU_TO_BE32(0x80010000); /* qp->qkey */ /* TODO: what is?! */
udh->deth.source_qpn = OSAL_CPU_TO_BE32(QLNXR_GSI_QPN);
QL_DPRINT12(dev->ha, "exit\n");
return 0;
}
static inline int
qlnxr_gsi_build_packet(struct qlnxr_dev *dev,
struct qlnxr_qp *qp, struct ib_send_wr *swr,
struct ecore_roce_ll2_packet **p_packet)
{
u8 ud_header_buffer[QLNXR_MAX_UD_HEADER_SIZE];
struct ecore_roce_ll2_packet *packet;
int roce_mode, header_size;
struct ib_ud_header udh;
int i, rc;
QL_DPRINT12(dev->ha, "enter\n");
*p_packet = NULL;
rc = qlnxr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
if (rc) {
QL_DPRINT11(dev->ha,
"qlnxr_gsi_build_header failed rc = %d\n", rc);
return rc;
}
header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
if (!packet) {
QL_DPRINT11(dev->ha, "packet == NULL\n");
return -ENOMEM;
}
packet->header.vaddr = qlnx_dma_alloc_coherent(&dev->ha->cdev,
&packet->header.baddr,
header_size);
if (!packet->header.vaddr) {
QL_DPRINT11(dev->ha, "packet->header.vaddr == NULL\n");
kfree(packet);
return -ENOMEM;
}
if (memcmp(udh.eth.smac_h, udh.eth.dmac_h, ETH_ALEN))
packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_NW;
else
packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_LB;
packet->roce_mode = roce_mode;
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
packet->header.len = header_size;
packet->n_seg = swr->num_sge;
qp->wqe_wr_id[qp->sq.prod].bytes_len = IB_GRH_BYTES; //RDMA_GRH_BYTES
for (i = 0; i < packet->n_seg; i++) {
packet->payload[i].baddr = swr->sg_list[i].addr;
packet->payload[i].len = swr->sg_list[i].length;
qp->wqe_wr_id[qp->sq.prod].bytes_len +=
packet->payload[i].len;
QL_DPRINT11(dev->ha, "baddr: %p, len: %d\n",
packet->payload[i].baddr,
packet->payload[i].len);
}
*p_packet = packet;
QL_DPRINT12(dev->ha, "exit, packet->n_seg: %d\n", packet->n_seg);
return 0;
}
int
qlnxr_gsi_post_send(struct ib_qp *ibqp,
struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct ecore_roce_ll2_packet *pkt = NULL;
struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
struct qlnxr_dev *dev = qp->dev;
unsigned long flags;
int rc;
QL_DPRINT12(dev->ha, "exit\n");
if (qp->state != ECORE_ROCE_QP_STATE_RTS) {
QL_DPRINT11(dev->ha,
"(qp->state != ECORE_ROCE_QP_STATE_RTS)\n");
*bad_wr = wr;
return -EINVAL;
}
if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
QL_DPRINT11(dev->ha,
"(wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE)\n");
rc = -EINVAL;
goto err;
}
if (wr->opcode != IB_WR_SEND) {
QL_DPRINT11(dev->ha, "(wr->opcode > IB_WR_SEND)\n");
rc = -EINVAL;
goto err;
}
spin_lock_irqsave(&qp->q_lock, flags);
rc = qlnxr_gsi_build_packet(dev, qp, wr, &pkt);
if(rc) {
spin_unlock_irqrestore(&qp->q_lock, flags);
QL_DPRINT11(dev->ha, "qlnxr_gsi_build_packet failed\n");
goto err;
}
rc = qlnxr_ll2_post_tx(dev, pkt);
if (!rc) {
qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
qp->wqe_wr_id[qp->sq.prod].signaled =
!!(wr->send_flags & IB_SEND_SIGNALED);
qp->wqe_wr_id[qp->sq.prod].opcode = IB_WC_SEND;
qlnxr_inc_sw_prod(&qp->sq);
QL_DPRINT11(dev->ha, "packet sent over gsi qp\n");
} else {
QL_DPRINT11(dev->ha, "qlnxr_ll2_post_tx failed\n");
rc = -EAGAIN;
*bad_wr = wr;
}
spin_unlock_irqrestore(&qp->q_lock, flags);
if (wr->next != NULL) {
*bad_wr = wr->next;
rc=-EINVAL;
}
QL_DPRINT12(dev->ha, "exit\n");
return rc;
err:
*bad_wr = wr;
QL_DPRINT12(dev->ha, "exit error\n");
return rc;
}
#define QLNXR_LL2_RX_BUFFER_SIZE (4 * 1024)
int
qlnxr_gsi_post_recv(struct ib_qp *ibqp,
struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct qlnxr_dev *dev = get_qlnxr_dev((ibqp->device));
struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
unsigned long flags;
int rc = 0;
QL_DPRINT12(dev->ha, "enter, wr: %p\n", wr);
if ((qp->state != ECORE_ROCE_QP_STATE_RTR) &&
(qp->state != ECORE_ROCE_QP_STATE_RTS)) {
*bad_wr = wr;
QL_DPRINT11(dev->ha, "exit 0\n");
return -EINVAL;
}
spin_lock_irqsave(&qp->q_lock, flags);
while (wr) {
if (wr->num_sge > QLNXR_GSI_MAX_RECV_SGE) {
QL_DPRINT11(dev->ha, "exit 1\n");
goto err;
}
rc = ecore_ll2_post_rx_buffer(dev->rdma_ctx,
dev->gsi_ll2_handle,
wr->sg_list[0].addr,
wr->sg_list[0].length,
0 /* cookie */,
1 /* notify_fw */);
if (rc) {
QL_DPRINT11(dev->ha, "exit 2\n");
goto err;
}
memset(&qp->rqe_wr_id[qp->rq.prod], 0,
sizeof(qp->rqe_wr_id[qp->rq.prod]));
qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
qlnxr_inc_sw_prod(&qp->rq);
wr = wr->next;
}
spin_unlock_irqrestore(&qp->q_lock, flags);
QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
return rc;
err:
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
QL_DPRINT12(dev->ha, "exit with -ENOMEM\n");
return -ENOMEM;
}
int
qlnxr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
struct qlnxr_qp *qp = dev->gsi_qp;
unsigned long flags;
int i = 0;
QL_DPRINT12(dev->ha, "enter\n");
spin_lock_irqsave(&cq->cq_lock, flags);
while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
memset(&wc[i], 0, sizeof(*wc));
wc[i].qp = &qp->ibqp;
wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
wc[i].opcode = IB_WC_RECV;
wc[i].pkey_index = 0;
wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc)?
IB_WC_GENERAL_ERR:IB_WC_SUCCESS;
/* 0 - currently only one recv sg is supported */
wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
#if __FreeBSD_version >= 1100000
memcpy(&wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac, ETH_ALEN);
wc[i].wc_flags |= IB_WC_WITH_SMAC;
if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
wc[i].wc_flags |= IB_WC_WITH_VLAN;
wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
}
#endif
qlnxr_inc_sw_cons(&qp->rq);
i++;
}
while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
memset(&wc[i], 0, sizeof(*wc));
wc[i].qp = &qp->ibqp;
wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
wc[i].opcode = IB_WC_SEND;
wc[i].status = IB_WC_SUCCESS;
qlnxr_inc_sw_cons(&qp->sq);
i++;
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
QL_DPRINT12(dev->ha, "exit i = %d\n", i);
return i;
}

View File

@ -0,0 +1,112 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __QLNXR_CM_H__
#define __QLNXR_CM_H__
/* ECORE LL2 has a limit to the number of buffers it can handle.
* FYI, OFED used 512 and 128 for recv and send.
*/
#define QLNXR_GSI_MAX_RECV_WR (4096)
#define QLNXR_GSI_MAX_SEND_WR (4096)
#define QLNXR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */
/* future OFED/kernel will have these */
#define ETH_P_ROCE (0x8915)
#define QLNXR_ROCE_V2_UDP_SPORT (0000)
#if __FreeBSD_version >= 1102000
#define rdma_wr(_wr) rdma_wr(_wr)
#define ud_wr(_wr) ud_wr(_wr)
#define atomic_wr(_wr) atomic_wr(_wr)
#else
#define rdma_wr(_wr) (&(_wr->wr.rdma))
#define ud_wr(_wr) (&(_wr->wr.ud))
#define atomic_wr(_wr) (&(_wr->wr.atomic))
#endif /* #if __FreeBSD_version >= 1102000 */
static inline u32 qlnxr_get_ipv4_from_gid(u8 *gid)
{
return *(u32 *)(void *)&gid[12];
}
struct ecore_roce_ll2_header {
void *vaddr;
dma_addr_t baddr;
size_t len;
};
struct ecore_roce_ll2_buffer {
dma_addr_t baddr;
size_t len;
};
struct ecore_roce_ll2_packet {
struct ecore_roce_ll2_header header;
int n_seg;
struct ecore_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
int roce_mode;
enum ecore_roce_ll2_tx_dest tx_dest;
};
/* RDMA CM */
extern int qlnxr_gsi_poll_cq(struct ib_cq *ibcq,
int num_entries,
struct ib_wc *wc);
extern int qlnxr_gsi_post_recv(struct ib_qp *ibqp,
struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
extern int qlnxr_gsi_post_send(struct ib_qp *ibqp,
struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
extern struct ib_qp* qlnxr_create_gsi_qp(struct qlnxr_dev *dev,
struct ib_qp_init_attr *attrs,
struct qlnxr_qp *qp);
extern void qlnxr_store_gsi_qp_cq(struct qlnxr_dev *dev,
struct qlnxr_qp *qp,
struct ib_qp_init_attr *attrs);
extern void qlnxr_inc_sw_gsi_cons(struct qlnxr_qp_hwq_info *info);
extern int qlnxr_destroy_gsi_qp(struct qlnxr_dev *dev);
#endif /* #ifndef __QLNXR_CM_H__ */

View File

@ -0,0 +1,924 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* File: qlnxr_def.h
* Author: David C Somayajulu
*/
#ifndef __QLNX_DEF_H_
#define __QLNX_DEF_H_
#include <sys/ktr.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/completion.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <sys/vmem.h>
#include <asm/byteorder.h>
#include <netinet/in.h>
#include <net/ipv6.h>
#include <netinet/toecore.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_verbs.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_sa.h>
#if __FreeBSD_version < 1100000
#undef MODULE_VERSION
#endif
#include "qlnx_os.h"
#include "bcm_osal.h"
#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore.h"
#include "ecore_chain.h"
#include "ecore_status.h"
#include "ecore_hw.h"
#include "ecore_rt_defs.h"
#include "ecore_init_ops.h"
#include "ecore_int.h"
#include "ecore_cxt.h"
#include "ecore_spq.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
#include "ecore_dev_api.h"
#include "ecore_l2_api.h"
#ifdef CONFIG_ECORE_SRIOV
#include "ecore_sriov.h"
#include "ecore_vf.h"
#endif
#ifdef CONFIG_ECORE_LL2
#include "ecore_ll2.h"
#endif
#ifdef CONFIG_ECORE_FCOE
#include "ecore_fcoe.h"
#endif
#ifdef CONFIG_ECORE_ISCSI
#include "ecore_iscsi.h"
#endif
#include "ecore_mcp.h"
#include "ecore_hw_defs.h"
#include "mcp_public.h"
#ifdef CONFIG_ECORE_RDMA
#include "ecore_rdma.h"
#include "ecore_rdma_api.h"
#endif
#ifdef CONFIG_ECORE_ROCE
#include "ecore_roce.h"
#endif
#ifdef CONFIG_ECORE_IWARP
#include "ecore_iwarp.h"
#endif
#include "ecore_iro.h"
#include "nvm_cfg.h"
#include "ecore_dbg_fw_funcs.h"
#include "rdma_common.h"
#include "qlnx_ioctl.h"
#include "qlnx_def.h"
#include "qlnx_rdma.h"
#include "qlnxr_verbs.h"
#include "qlnxr_user.h"
#include "qlnx_ver.h"
#include <sys/smp.h>
#define QLNXR_ROCE_INTERFACE_VERSION 1801
#define QLNXR_MODULE_VERSION "8.18.1.0"
#define QLNXR_NODE_DESC "QLogic 579xx RoCE HCA"
#define OC_SKH_DEVICE_PF 0x720
#define OC_SKH_DEVICE_VF 0x728
#define QLNXR_MAX_AH 512
/* QLNXR Limitations */
/* SQ/RQ Limitations
* An S/RQ PBL contains a list a pointers to pages. Each page contains S/RQE
* elements. Several S/RQE elements make an S/RQE, up to a certain maximum that
* is different between SQ and RQ. The size of the PBL was chosen such as not to
* limit the MAX_WR supported by ECORE, and rounded up to a power of two.
*/
/* SQ */
#define QLNXR_MAX_SQ_PBL (0x8000) /* 2^15 bytes */
#define QLNXR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
#define QLNXR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) /* bytes */
#define QLNXR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
QLNXR_SQE_ELEMENT_SIZE) /* number */
#define QLNXR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
QLNXR_SQE_ELEMENT_SIZE) /* number */
#define QLNXR_MAX_SQE ((QLNXR_MAX_SQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
(QLNXR_SQE_ELEMENT_SIZE) / (QLNXR_MAX_SQE_ELEMENTS_PER_SQE))
/* RQ */
#define QLNXR_MAX_RQ_PBL (0x2000) /* 2^13 bytes */
#define QLNXR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
#define QLNXR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) /* bytes */
#define QLNXR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) /* number */
#define QLNXR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
QLNXR_RQE_ELEMENT_SIZE) /* number */
#define QLNXR_MAX_RQE ((QLNXR_MAX_RQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
(QLNXR_RQE_ELEMENT_SIZE) / (QLNXR_MAX_RQE_ELEMENTS_PER_RQE))
/* CQE Limitation
* Although FW supports two layer PBL we use single layer since it is more
* than enough. For that layer we use a maximum size of 512 kB, again, because
* it reaches the maximum number of page pointers. Notice is the '-1' in the
* calculation that comes from having a u16 for the number of pages i.e. 0xffff
* is the maximum number of pages (in single layer).
*/
#define QLNXR_CQE_SIZE (sizeof(union rdma_cqe))
#define QLNXR_MAX_CQE_PBL_SIZE (512*1024) /* 512kB */
#define QLNXR_MAX_CQE_PBL_ENTRIES (((QLNXR_MAX_CQE_PBL_SIZE) / \
sizeof(u64)) - 1) /* 64k -1 */
#define QLNXR_MAX_CQES ((u32)((QLNXR_MAX_CQE_PBL_ENTRIES) * (ECORE_CHAIN_PAGE_SIZE)\
/ QLNXR_CQE_SIZE)) /* 8M -4096/32 = 8,388,480 */
/* CNQ size Limitation
* The maximum CNQ size is not reachable because the FW supports a chain of u16
* (specifically 64k-1). The FW can buffer CNQ elements avoiding an overflow, on
* the expense of performance. Hence we set it to an arbitrarily smaller value
* than the maximum.
*/
#define QLNXR_ROCE_MAX_CNQ_SIZE (0x4000) /* 2^16 */
#define QLNXR_MAX_PORT (1)
#define QLNXR_PORT (1)
#define QLNXR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
/* The following number is used to determine if a handle recevied from the FW
* actually point to a CQ/QP.
*/
#define QLNXR_CQ_MAGIC_NUMBER (0x11223344)
#define QLNXR_QP_MAGIC_NUMBER (0x77889900)
/* Fast path debug prints */
#define FP_DP_VERBOSE(...)
/* #define FP_DP_VERBOSE(...) DP_VERBOSE(__VA_ARGS__) */
#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
#define QLNXR_MSG_INIT 0x10000,
#define QLNXR_MSG_FAIL 0x10000,
#define QLNXR_MSG_CQ 0x20000,
#define QLNXR_MSG_RQ 0x40000,
#define QLNXR_MSG_SQ 0x80000,
#define QLNXR_MSG_QP (QLNXR_MSG_SQ | QLNXR_MSG_RQ),
#define QLNXR_MSG_MR 0x100000,
#define QLNXR_MSG_GSI 0x200000,
#define QLNXR_MSG_MISC 0x400000,
#define QLNXR_MSG_SRQ 0x800000,
#define QLNXR_MSG_IWARP 0x1000000,
#define QLNXR_ROCE_PKEY_MAX 1
#define QLNXR_ROCE_PKEY_TABLE_LEN 1
#define QLNXR_ROCE_PKEY_DEFAULT 0xffff
#define QLNXR_MAX_SGID 128 /* TBD - add more source gids... */
#define QLNXR_ENET_STATE_BIT (0)
#define QLNXR_MAX_MSIX (16)
struct qlnxr_cnq {
struct qlnxr_dev *dev;
struct ecore_chain pbl;
struct ecore_sb_info *sb;
char name[32];
u64 n_comp;
__le16 *hw_cons_ptr;
u8 index;
int irq_rid;
struct resource *irq;
void *irq_handle;
};
struct qlnxr_device_attr {
/* Vendor specific information */
u32 vendor_id;
u32 vendor_part_id;
u32 hw_ver;
u64 fw_ver;
u64 node_guid; /* node GUID */
u64 sys_image_guid; /* System image GUID */
u8 max_cnq;
u8 max_sge; /* Maximum # of scatter/gather entries
* per Work Request supported
*/
u16 max_inline;
u32 max_sqe; /* Maximum number of send outstanding send work
* requests on any Work Queue supported
*/
u32 max_rqe; /* Maximum number of receive outstanding receive
* work requests on any Work Queue supported
*/
u8 max_qp_resp_rd_atomic_resc; /* Maximum number of RDMA Reads
* & atomic operation that can
* be outstanding per QP
*/
u8 max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
* initiation of RDMA Read
* & atomic operations
*/
u64 max_dev_resp_rd_atomic_resc;
u32 max_cq;
u32 max_qp;
u32 max_mr; /* Maximum # of MRs supported */
u64 max_mr_size; /* Size (in bytes) of largest contiguous memory
* block that can be registered by this device
*/
u32 max_cqe;
u32 max_mw; /* Maximum # of memory windows supported */
u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd; /* Maximum # of protection domains supported */
u32 max_ah;
u8 max_pkey;
u32 max_srq; /* Maximum number of SRQs */
u32 max_srq_wr; /* Maximum number of WRs per SRQ */
u8 max_srq_sge; /* Maximum number of SGE per WQE */
u8 max_stats_queues; /* Maximum number of statistics queues */
u32 dev_caps;
/* Abilty to support RNR-NAK generation */
#define QLNXR_ROCE_DEV_CAP_RNR_NAK_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_RNR_NAK_SHIFT 0
/* Abilty to support shutdown port */
#define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
/* Abilty to support port active event */
#define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
/* Abilty to support port change event */
#define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
/* Abilty to support system image GUID */
#define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_SHIFT 4
/* Abilty to support bad P_Key counter support */
#define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
/* Abilty to support atomic operations */
#define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_SHIFT 6
#define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_SHIFT 7
/* Abilty to support modifying the maximum number of
* outstanding work requests per QP
*/
#define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
/* Abilty to support automatic path migration */
#define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
/* Abilty to support the base memory management extensions */
#define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
#define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
/* Abilty to support multipile page sizes per memory region */
#define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
/* Abilty to support block list physical buffer list */
#define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_SHIFT 13
/* Abilty to support zero based virtual addresses */
#define QLNXR_ROCE_DEV_CAP_ZBVA_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_ZBVA_SHIFT 14
/* Abilty to support local invalidate fencing */
#define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
/* Abilty to support Loopback on QP */
#define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_MASK 0x1
#define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_SHIFT 16
u64 page_size_caps;
u8 dev_ack_delay;
u32 reserved_lkey; /* Value of reserved L_key */
u32 bad_pkey_counter;/* Bad P_key counter support
* indicator
*/
struct ecore_rdma_events events;
};
struct qlnxr_dev {
struct ib_device ibdev;
qlnx_host_t *ha;
struct ecore_dev *cdev;
/* Added to extend Applications Support */
struct pci_dev *pdev;
uint32_t dp_module;
uint8_t dp_level;
void *rdma_ctx;
struct mtx idr_lock;
struct idr qpidr;
uint32_t wq_multiplier;
int num_cnq;
struct ecore_sb_info sb_array[QLNXR_MAX_MSIX];
struct qlnxr_cnq cnq_array[QLNXR_MAX_MSIX];
int sb_start;
int gsi_qp_created;
struct qlnxr_cq *gsi_sqcq;
struct qlnxr_cq *gsi_rqcq;
struct qlnxr_qp *gsi_qp;
/* TBD: we'll need an array of these probablly per DPI... */
void __iomem *db_addr;
uint64_t db_phys_addr;
uint32_t db_size;
uint16_t dpi;
uint64_t guid;
enum ib_atomic_cap atomic_cap;
union ib_gid sgid_tbl[QLNXR_MAX_SGID];
struct mtx sgid_lock;
struct notifier_block nb_inet;
struct notifier_block nb_inet6;
uint8_t mr_key;
struct list_head entry;
struct dentry *dbgfs;
uint8_t gsi_ll2_mac_address[ETH_ALEN];
uint8_t gsi_ll2_handle;
unsigned long enet_state;
struct workqueue_struct *iwarp_wq;
volatile uint32_t pd_count;
struct qlnxr_device_attr attr;
uint8_t user_dpm_enabled;
};
typedef struct qlnxr_dev qlnxr_dev_t;
struct qlnxr_pd {
struct ib_pd ibpd;
u32 pd_id;
struct qlnxr_ucontext *uctx;
};
struct qlnxr_ucontext {
struct ib_ucontext ibucontext;
struct qlnxr_dev *dev;
struct qlnxr_pd *pd;
u64 dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
u16 dpi;
struct list_head mm_head;
struct mutex mm_list_lock;
};
struct qlnxr_dev_attr {
struct ib_device_attr ib_attr;
};
struct qlnxr_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
};
struct qlnxr_pbl {
struct list_head list_entry;
void *va;
dma_addr_t pa;
};
struct qlnxr_queue_info {
void *va;
dma_addr_t dma;
u32 size;
u16 len;
u16 entry_size; /* Size of an element in the queue */
u16 id; /* qid, where to ring the doorbell. */
u16 head, tail;
bool created;
};
struct qlnxr_eq {
struct qlnxr_queue_info q;
u32 vector;
int cq_cnt;
struct qlnxr_dev *dev;
char irq_name[32];
};
struct qlnxr_mq {
struct qlnxr_queue_info sq;
struct qlnxr_queue_info cq;
bool rearm_cq;
};
struct phy_info {
u16 auto_speeds_supported;
u16 fixed_speeds_supported;
u16 phy_type;
u16 interface_type;
};
union db_prod64 {
struct rdma_pwm_val32_data data;
u64 raw;
};
enum qlnxr_cq_type {
QLNXR_CQ_TYPE_GSI,
QLNXR_CQ_TYPE_KERNEL,
QLNXR_CQ_TYPE_USER
};
struct qlnxr_pbl_info {
u32 num_pbls;
u32 num_pbes;
u32 pbl_size;
u32 pbe_size;
bool two_layered;
};
struct qlnxr_userq {
struct ib_umem *umem;
struct qlnxr_pbl_info pbl_info;
struct qlnxr_pbl *pbl_tbl;
u64 buf_addr;
size_t buf_len;
};
struct qlnxr_cq {
struct ib_cq ibcq; /* must be first */
enum qlnxr_cq_type cq_type;
uint32_t sig;
uint16_t icid;
/* relevant to cqs created from kernel space only (ULPs) */
spinlock_t cq_lock;
uint8_t arm_flags;
struct ecore_chain pbl;
void __iomem *db_addr; /* db address for cons update*/
union db_prod64 db;
uint8_t pbl_toggle;
union rdma_cqe *latest_cqe;
union rdma_cqe *toggle_cqe;
/* TODO: remove since it is redundant with 32 bit chains */
uint32_t cq_cons;
/* relevant to cqs created from user space only (applications) */
struct qlnxr_userq q;
/* destroy-IRQ handler race prevention */
uint8_t destroyed;
uint16_t cnq_notif;
};
struct qlnxr_ah {
struct ib_ah ibah;
struct ib_ah_attr attr;
};
union db_prod32 {
struct rdma_pwm_val16_data data;
u32 raw;
};
struct qlnxr_qp_hwq_info {
/* WQE Elements*/
struct ecore_chain pbl;
u64 p_phys_addr_tbl;
u32 max_sges;
/* WQE */
u16 prod; /* WQE prod index for SW ring */
u16 cons; /* WQE cons index for SW ring */
u16 wqe_cons;
u16 gsi_cons; /* filled in by GSI implementation */
u16 max_wr;
/* DB */
void __iomem *db; /* Doorbell address */
union db_prod32 db_data; /* Doorbell data */
/* Required for iwarp_only */
void __iomem *iwarp_db2; /* Doorbell address */
union db_prod32 iwarp_db2_data; /* Doorbell data */
};
#define QLNXR_INC_SW_IDX(p_info, index) \
do { \
p_info->index = (p_info->index + 1) & \
ecore_chain_get_capacity(p_info->pbl) \
} while (0)
struct qlnxr_srq_hwq_info {
u32 max_sges;
u32 max_wr;
struct ecore_chain pbl;
u64 p_phys_addr_tbl;
u32 wqe_prod; /* WQE prod index in HW ring */
u32 sge_prod; /* SGE prod index in HW ring */
u32 wr_prod_cnt; /* wr producer count */
u32 wr_cons_cnt; /* wr consumer count */
u32 num_elems;
u32 *virt_prod_pair_addr; /* producer pair virtual address */
dma_addr_t phy_prod_pair_addr; /* producer pair physical address */
};
struct qlnxr_srq {
struct ib_srq ibsrq;
struct qlnxr_dev *dev;
/* relevant to cqs created from user space only (applications) */
struct qlnxr_userq usrq;
struct qlnxr_srq_hwq_info hw_srq;
struct ib_umem *prod_umem;
u16 srq_id;
/* lock to protect srq recv post */
spinlock_t lock;
};
enum qlnxr_qp_err_bitmap {
QLNXR_QP_ERR_SQ_FULL = 1 << 0,
QLNXR_QP_ERR_RQ_FULL = 1 << 1,
QLNXR_QP_ERR_BAD_SR = 1 << 2,
QLNXR_QP_ERR_BAD_RR = 1 << 3,
QLNXR_QP_ERR_SQ_PBL_FULL = 1 << 4,
QLNXR_QP_ERR_RQ_PBL_FULL = 1 << 5,
};
struct mr_info {
struct qlnxr_pbl *pbl_table;
struct qlnxr_pbl_info pbl_info;
struct list_head free_pbl_list;
struct list_head inuse_pbl_list;
u32 completed;
u32 completed_handled;
};
#if __FreeBSD_version < 1102000
#define DEFINE_IB_FAST_REG
#else
#define DEFINE_ALLOC_MR
#endif
#ifdef DEFINE_IB_FAST_REG
struct qlnxr_fast_reg_page_list {
struct ib_fast_reg_page_list ibfrpl;
struct qlnxr_dev *dev;
struct mr_info info;
};
#endif
struct qlnxr_qp {
struct ib_qp ibqp; /* must be first */
struct qlnxr_dev *dev;
struct qlnxr_iw_ep *ep;
struct qlnxr_qp_hwq_info sq;
struct qlnxr_qp_hwq_info rq;
u32 max_inline_data;
#if __FreeBSD_version >= 1100000
spinlock_t q_lock ____cacheline_aligned;
#else
spinlock_t q_lock;
#endif
struct qlnxr_cq *sq_cq;
struct qlnxr_cq *rq_cq;
struct qlnxr_srq *srq;
enum ecore_roce_qp_state state; /* QP state */
u32 id;
struct qlnxr_pd *pd;
enum ib_qp_type qp_type;
struct ecore_rdma_qp *ecore_qp;
u32 qp_id;
u16 icid;
u16 mtu;
int sgid_idx;
u32 rq_psn;
u32 sq_psn;
u32 qkey;
u32 dest_qp_num;
u32 sig; /* unique siganture to identify valid QP */
/* relevant to qps created from kernel space only (ULPs) */
u8 prev_wqe_size;
u16 wqe_cons;
u32 err_bitmap;
bool signaled;
/* SQ shadow */
struct {
u64 wr_id;
enum ib_wc_opcode opcode;
u32 bytes_len;
u8 wqe_size;
bool signaled;
dma_addr_t icrc_mapping;
u32 *icrc;
#ifdef DEFINE_IB_FAST_REG
struct qlnxr_fast_reg_page_list *frmr;
#endif
struct qlnxr_mr *mr;
} *wqe_wr_id;
/* RQ shadow */
struct {
u64 wr_id;
struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
uint8_t wqe_size;
/* for GSI only */
u8 smac[ETH_ALEN];
u16 vlan_id;
int rc;
} *rqe_wr_id;
/* relevant to qps created from user space only (applications) */
struct qlnxr_userq usq;
struct qlnxr_userq urq;
atomic_t refcnt;
bool destroyed;
};
enum qlnxr_mr_type {
QLNXR_MR_USER,
QLNXR_MR_KERNEL,
QLNXR_MR_DMA,
QLNXR_MR_FRMR
};
struct qlnxr_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
struct ecore_rdma_register_tid_in_params hw_mr;
enum qlnxr_mr_type type;
struct qlnxr_dev *dev;
struct mr_info info;
u64 *pages;
u32 npages;
u64 *iova_start; /* valid only for kernel_mr */
};
struct qlnxr_mm {
struct {
u64 phy_addr;
unsigned long len;
} key;
struct list_head entry;
};
struct qlnxr_iw_listener {
struct qlnxr_dev *dev;
struct iw_cm_id *cm_id;
int backlog;
void *ecore_handle;
};
struct qlnxr_iw_ep {
struct qlnxr_dev *dev;
struct iw_cm_id *cm_id;
struct qlnxr_qp *qp;
void *ecore_context;
u8 during_connect;
};
static inline void
qlnxr_inc_sw_cons(struct qlnxr_qp_hwq_info *info)
{
info->cons = (info->cons + 1) % info->max_wr;
info->wqe_cons++;
}
static inline void
qlnxr_inc_sw_prod(struct qlnxr_qp_hwq_info *info)
{
info->prod = (info->prod + 1) % info->max_wr;
}
static inline struct qlnxr_dev *
get_qlnxr_dev(struct ib_device *ibdev)
{
return container_of(ibdev, struct qlnxr_dev, ibdev);
}
static inline struct qlnxr_ucontext *
get_qlnxr_ucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct qlnxr_ucontext, ibucontext);
}
static inline struct qlnxr_pd *
get_qlnxr_pd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct qlnxr_pd, ibpd);
}
static inline struct qlnxr_cq *
get_qlnxr_cq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct qlnxr_cq, ibcq);
}
static inline struct qlnxr_qp *
get_qlnxr_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct qlnxr_qp, ibqp);
}
static inline struct qlnxr_mr *
get_qlnxr_mr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct qlnxr_mr, ibmr);
}
static inline struct qlnxr_ah *
get_qlnxr_ah(struct ib_ah *ibah)
{
return container_of(ibah, struct qlnxr_ah, ibah);
}
static inline struct qlnxr_srq *
get_qlnxr_srq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct qlnxr_srq, ibsrq);
}
static inline bool qlnxr_qp_has_srq(struct qlnxr_qp *qp)
{
return !!qp->srq;
}
static inline bool qlnxr_qp_has_sq(struct qlnxr_qp *qp)
{
if (qp->qp_type == IB_QPT_GSI)
return 0;
return 1;
}
static inline bool qlnxr_qp_has_rq(struct qlnxr_qp *qp)
{
if (qp->qp_type == IB_QPT_GSI || qlnxr_qp_has_srq(qp))
return 0;
return 1;
}
#ifdef DEFINE_IB_FAST_REG
static inline struct qlnxr_fast_reg_page_list *get_qlnxr_frmr_list(
struct ib_fast_reg_page_list *ifrpl)
{
return container_of(ifrpl, struct qlnxr_fast_reg_page_list, ibfrpl);
}
#endif
#define SET_FIELD2(value, name, flag) \
do { \
(value) |= ((flag) << (name ## _SHIFT)); \
} while (0)
#define QLNXR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
#define QLNXR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
#define QLNXR_RESP_INV (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
#define QLNXR_RESP_RDMA_IMM (QLNXR_RESP_IMM | QLNXR_RESP_RDMA)
static inline int
qlnxr_get_dmac(struct qlnxr_dev *dev, struct ib_ah_attr *ah_attr, u8 *mac_addr)
{
#ifdef DEFINE_NO_IP_BASED_GIDS
u8 *guid = &ah_attr->grh.dgid.raw[8]; /* GID's 64 MSBs are the GUID */
#endif
union ib_gid zero_sgid = { { 0 } };
struct in6_addr in6;
if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
memset(mac_addr, 0x00, ETH_ALEN);
return -EINVAL;
}
memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
#ifdef DEFINE_NO_IP_BASED_GIDS
/* get the MAC address from the GUID i.e. EUI-64 to MAC address */
mac_addr[0] = guid[0] ^ 2; /* toggle the local/universal bit to local */
mac_addr[1] = guid[1];
mac_addr[2] = guid[2];
mac_addr[3] = guid[5];
mac_addr[4] = guid[6];
mac_addr[5] = guid[7];
#else
memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
#endif
return 0;
}
extern int qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address,
uint8_t *new_mac_address);
#define QLNXR_ROCE_PKEY_MAX 1
#define QLNXR_ROCE_PKEY_TABLE_LEN 1
#define QLNXR_ROCE_PKEY_DEFAULT 0xffff
#if __FreeBSD_version < 1100000
#define DEFINE_IB_AH_ATTR_WITH_DMAC (0)
#define DEFINE_IB_UMEM_WITH_CHUNK (1)
#else
#define DEFINE_IB_AH_ATTR_WITH_DMAC (1)
#endif
#define QLNX_IS_IWARP(rdev) IS_IWARP(ECORE_LEADING_HWFN(rdev->cdev))
#define QLNX_IS_ROCE(rdev) IS_ROCE(ECORE_LEADING_HWFN(rdev->cdev))
#define MAX_RXMIT_CONNS 16
#endif /* #ifndef __QLNX_DEF_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,675 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __QLNXR_ROCE_H__
#define __QLNXR_ROCE_H__
/*
* roce completion notification queue element
*/
struct roce_cnqe {
struct regpair cq_handle;
};
struct roce_cqe_responder {
struct regpair srq_wr_id;
struct regpair qp_handle;
__le32 imm_data_or_inv_r_Key;
__le32 length;
__le32 reserved0;
__le16 rq_cons;
u8 flags;
#define ROCE_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
#define ROCE_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
#define ROCE_CQE_RESPONDER_TYPE_MASK 0x3
#define ROCE_CQE_RESPONDER_TYPE_SHIFT 1
#define ROCE_CQE_RESPONDER_INV_FLG_MASK 0x1
#define ROCE_CQE_RESPONDER_INV_FLG_SHIFT 3
#define ROCE_CQE_RESPONDER_IMM_FLG_MASK 0x1
#define ROCE_CQE_RESPONDER_IMM_FLG_SHIFT 4
#define ROCE_CQE_RESPONDER_RDMA_FLG_MASK 0x1
#define ROCE_CQE_RESPONDER_RDMA_FLG_SHIFT 5
#define ROCE_CQE_RESPONDER_RESERVED2_MASK 0x3
#define ROCE_CQE_RESPONDER_RESERVED2_SHIFT 6
u8 status;
};
struct roce_cqe_requester {
__le16 sq_cons;
__le16 reserved0;
__le32 reserved1;
struct regpair qp_handle;
struct regpair reserved2;
__le32 reserved3;
__le16 reserved4;
u8 flags;
#define ROCE_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
#define ROCE_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
#define ROCE_CQE_REQUESTER_TYPE_MASK 0x3
#define ROCE_CQE_REQUESTER_TYPE_SHIFT 1
#define ROCE_CQE_REQUESTER_RESERVED5_MASK 0x1F
#define ROCE_CQE_REQUESTER_RESERVED5_SHIFT 3
u8 status;
};
struct roce_cqe_common {
struct regpair reserved0;
struct regpair qp_handle;
__le16 reserved1[7];
u8 flags;
#define ROCE_CQE_COMMON_TOGGLE_BIT_MASK 0x1
#define ROCE_CQE_COMMON_TOGGLE_BIT_SHIFT 0
#define ROCE_CQE_COMMON_TYPE_MASK 0x3
#define ROCE_CQE_COMMON_TYPE_SHIFT 1
#define ROCE_CQE_COMMON_RESERVED2_MASK 0x1F
#define ROCE_CQE_COMMON_RESERVED2_SHIFT 3
u8 status;
};
/*
* roce completion queue element
*/
union roce_cqe {
struct roce_cqe_responder resp;
struct roce_cqe_requester req;
struct roce_cqe_common cmn;
};
/*
* CQE requester status enumeration
*/
enum roce_cqe_requester_status_enum {
ROCE_CQE_REQ_STS_OK,
ROCE_CQE_REQ_STS_BAD_RESPONSE_ERR,
ROCE_CQE_REQ_STS_LOCAL_LENGTH_ERR,
ROCE_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
ROCE_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
ROCE_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
ROCE_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
ROCE_CQE_REQ_STS_REMOTE_ACCESS_ERR,
ROCE_CQE_REQ_STS_REMOTE_OPERATION_ERR,
ROCE_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
ROCE_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
MAX_ROCE_CQE_REQUESTER_STATUS_ENUM
};
/*
* CQE responder status enumeration
*/
enum roce_cqe_responder_status_enum {
ROCE_CQE_RESP_STS_OK,
ROCE_CQE_RESP_STS_LOCAL_ACCESS_ERR,
ROCE_CQE_RESP_STS_LOCAL_LENGTH_ERR,
ROCE_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
ROCE_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
ROCE_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
ROCE_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
ROCE_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
MAX_ROCE_CQE_RESPONDER_STATUS_ENUM
};
/*
* CQE type enumeration
*/
enum roce_cqe_type {
ROCE_CQE_TYPE_REQUESTER,
ROCE_CQE_TYPE_RESPONDER_RQ,
ROCE_CQE_TYPE_RESPONDER_SRQ,
ROCE_CQE_TYPE_INVALID,
MAX_ROCE_CQE_TYPE
};
/*
* memory window type enumeration
*/
enum roce_mw_type {
ROCE_MW_TYPE_1,
ROCE_MW_TYPE_2A,
MAX_ROCE_MW_TYPE
};
struct roce_rq_sge {
struct regpair addr;
__le32 length;
__le32 flags;
#define ROCE_RQ_SGE_L_KEY_MASK 0x3FFFFFF
#define ROCE_RQ_SGE_L_KEY_SHIFT 0
#define ROCE_RQ_SGE_NUM_SGES_MASK 0x7
#define ROCE_RQ_SGE_NUM_SGES_SHIFT 26
#define ROCE_RQ_SGE_RESERVED0_MASK 0x7
#define ROCE_RQ_SGE_RESERVED0_SHIFT 29
};
struct roce_sq_atomic_wqe {
struct regpair remote_va;
__le32 xrc_srq;
u8 req_type;
u8 flags;
#define ROCE_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
#define ROCE_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
#define ROCE_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
#define ROCE_SQ_ATOMIC_WQE_RESERVED0_MASK 0x7
#define ROCE_SQ_ATOMIC_WQE_RESERVED0_SHIFT 5
u8 reserved1;
u8 prev_wqe_size;
struct regpair swap_data;
__le32 r_key;
__le32 reserved2;
struct regpair cmp_data;
struct regpair reserved3;
};
/*
* First element (16 bytes) of atomic wqe
*/
struct roce_sq_atomic_wqe_1st {
struct regpair remote_va;
__le32 xrc_srq;
u8 req_type;
u8 flags;
#define ROCE_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
#define ROCE_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
#define ROCE_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1
#define ROCE_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
#define ROCE_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
#define ROCE_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
u8 reserved1;
u8 prev_wqe_size;
};
/*
* Second element (16 bytes) of atomic wqe
*/
struct roce_sq_atomic_wqe_2nd {
struct regpair swap_data;
__le32 r_key;
__le32 reserved2;
};
/*
* Third element (16 bytes) of atomic wqe
*/
struct roce_sq_atomic_wqe_3rd {
struct regpair cmp_data;
struct regpair reserved3;
};
struct roce_sq_bind_wqe {
struct regpair addr;
__le32 l_key;
u8 req_type;
u8 flags;
#define ROCE_SQ_BIND_WQE_COMP_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_COMP_FLG_SHIFT 0
#define ROCE_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_BIND_WQE_SE_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_SE_FLG_SHIFT 3
#define ROCE_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
#define ROCE_SQ_BIND_WQE_RESERVED0_MASK 0x7
#define ROCE_SQ_BIND_WQE_RESERVED0_SHIFT 5
u8 access_ctrl;
#define ROCE_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
#define ROCE_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
#define ROCE_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
#define ROCE_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
#define ROCE_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
#define ROCE_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
#define ROCE_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
#define ROCE_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
#define ROCE_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
#define ROCE_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
#define ROCE_SQ_BIND_WQE_RESERVED1_MASK 0x7
#define ROCE_SQ_BIND_WQE_RESERVED1_SHIFT 5
u8 prev_wqe_size;
u8 bind_ctrl;
#define ROCE_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
#define ROCE_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
#define ROCE_SQ_BIND_WQE_MW_TYPE_MASK 0x1
#define ROCE_SQ_BIND_WQE_MW_TYPE_SHIFT 1
#define ROCE_SQ_BIND_WQE_RESERVED2_MASK 0x3F
#define ROCE_SQ_BIND_WQE_RESERVED2_SHIFT 2
u8 reserved3[2];
u8 length_hi;
__le32 length_lo;
__le32 parent_l_key;
__le32 reserved6;
};
/*
* First element (16 bytes) of bind wqe
*/
struct roce_sq_bind_wqe_1st {
struct regpair addr;
__le32 l_key;
u8 req_type;
u8 flags;
#define ROCE_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
#define ROCE_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
#define ROCE_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
#define ROCE_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
#define ROCE_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
u8 access_ctrl;
#define ROCE_SQ_BIND_WQE_1ST_REMOTE_READ_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_REMOTE_READ_SHIFT 0
#define ROCE_SQ_BIND_WQE_1ST_REMOTE_WRITE_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_REMOTE_WRITE_SHIFT 1
#define ROCE_SQ_BIND_WQE_1ST_ENABLE_ATOMIC_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_ENABLE_ATOMIC_SHIFT 2
#define ROCE_SQ_BIND_WQE_1ST_LOCAL_READ_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_LOCAL_READ_SHIFT 3
#define ROCE_SQ_BIND_WQE_1ST_LOCAL_WRITE_MASK 0x1
#define ROCE_SQ_BIND_WQE_1ST_LOCAL_WRITE_SHIFT 4
#define ROCE_SQ_BIND_WQE_1ST_RESERVED1_MASK 0x7
#define ROCE_SQ_BIND_WQE_1ST_RESERVED1_SHIFT 5
u8 prev_wqe_size;
};
/*
* Second element (16 bytes) of bind wqe
*/
struct roce_sq_bind_wqe_2nd {
u8 bind_ctrl;
#define ROCE_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
#define ROCE_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
#define ROCE_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1
#define ROCE_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
#define ROCE_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x3F
#define ROCE_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 2
u8 reserved3[2];
u8 length_hi;
__le32 length_lo;
__le32 parent_l_key;
__le32 reserved6;
};
/*
* Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
*/
struct roce_sq_common_wqe {
__le32 reserved1[3];
u8 req_type;
u8 flags;
#define ROCE_SQ_COMMON_WQE_COMP_FLG_MASK 0x1
#define ROCE_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
#define ROCE_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_COMMON_WQE_SE_FLG_MASK 0x1
#define ROCE_SQ_COMMON_WQE_SE_FLG_SHIFT 3
#define ROCE_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1
#define ROCE_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
#define ROCE_SQ_COMMON_WQE_RESERVED0_MASK 0x7
#define ROCE_SQ_COMMON_WQE_RESERVED0_SHIFT 5
u8 reserved2;
u8 prev_wqe_size;
};
struct roce_sq_fmr_wqe {
struct regpair addr;
__le32 l_key;
u8 req_type;
u8 flags;
#define ROCE_SQ_FMR_WQE_COMP_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_COMP_FLG_SHIFT 0
#define ROCE_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_FMR_WQE_SE_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_SE_FLG_SHIFT 3
#define ROCE_SQ_FMR_WQE_INLINE_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
#define ROCE_SQ_FMR_WQE_RESERVED0_MASK 0x7
#define ROCE_SQ_FMR_WQE_RESERVED0_SHIFT 5
u8 access_ctrl;
#define ROCE_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
#define ROCE_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
#define ROCE_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
#define ROCE_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
#define ROCE_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
#define ROCE_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
#define ROCE_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
#define ROCE_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
#define ROCE_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
#define ROCE_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
#define ROCE_SQ_FMR_WQE_RESERVED1_MASK 0x7
#define ROCE_SQ_FMR_WQE_RESERVED1_SHIFT 5
u8 prev_wqe_size;
u8 fmr_ctrl;
#define ROCE_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F
#define ROCE_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
#define ROCE_SQ_FMR_WQE_ZERO_BASED_MASK 0x1
#define ROCE_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
#define ROCE_SQ_FMR_WQE_BIND_EN_MASK 0x1
#define ROCE_SQ_FMR_WQE_BIND_EN_SHIFT 6
#define ROCE_SQ_FMR_WQE_RESERVED2_MASK 0x1
#define ROCE_SQ_FMR_WQE_RESERVED2_SHIFT 7
u8 reserved3[2];
u8 length_hi;
__le32 length_lo;
struct regpair pbl_addr;
};
/*
* First element (16 bytes) of fmr wqe
*/
struct roce_sq_fmr_wqe_1st {
struct regpair addr;
__le32 l_key;
u8 req_type;
u8 flags;
#define ROCE_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
#define ROCE_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
#define ROCE_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
#define ROCE_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x7
#define ROCE_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 5
u8 access_ctrl;
#define ROCE_SQ_FMR_WQE_1ST_REMOTE_READ_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_REMOTE_READ_SHIFT 0
#define ROCE_SQ_FMR_WQE_1ST_REMOTE_WRITE_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_REMOTE_WRITE_SHIFT 1
#define ROCE_SQ_FMR_WQE_1ST_ENABLE_ATOMIC_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_ENABLE_ATOMIC_SHIFT 2
#define ROCE_SQ_FMR_WQE_1ST_LOCAL_READ_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_LOCAL_READ_SHIFT 3
#define ROCE_SQ_FMR_WQE_1ST_LOCAL_WRITE_MASK 0x1
#define ROCE_SQ_FMR_WQE_1ST_LOCAL_WRITE_SHIFT 4
#define ROCE_SQ_FMR_WQE_1ST_RESERVED1_MASK 0x7
#define ROCE_SQ_FMR_WQE_1ST_RESERVED1_SHIFT 5
u8 prev_wqe_size;
};
/*
* Second element (16 bytes) of fmr wqe
*/
struct roce_sq_fmr_wqe_2nd {
u8 fmr_ctrl;
#define ROCE_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F
#define ROCE_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
#define ROCE_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1
#define ROCE_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
#define ROCE_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1
#define ROCE_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
#define ROCE_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x1
#define ROCE_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 7
u8 reserved3[2];
u8 length_hi;
__le32 length_lo;
struct regpair pbl_addr;
};
struct roce_sq_local_inv_wqe {
struct regpair reserved;
__le32 inv_l_key;
u8 req_type;
u8 flags;
#define ROCE_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1
#define ROCE_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
#define ROCE_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1
#define ROCE_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
#define ROCE_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1
#define ROCE_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
#define ROCE_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x7
#define ROCE_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 5
u8 reserved1;
u8 prev_wqe_size;
};
struct roce_sq_rdma_wqe {
__le32 imm_data;
__le32 length;
__le32 xrc_srq;
u8 req_type;
u8 flags;
#define ROCE_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
#define ROCE_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_RDMA_WQE_SE_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_SE_FLG_SHIFT 3
#define ROCE_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
#define ROCE_SQ_RDMA_WQE_RESERVED0_MASK 0x7
#define ROCE_SQ_RDMA_WQE_RESERVED0_SHIFT 5
u8 wqe_size;
u8 prev_wqe_size;
struct regpair remote_va;
__le32 r_key;
__le32 reserved1;
};
/*
* First element (16 bytes) of rdma wqe
*/
struct roce_sq_rdma_wqe_1st {
__le32 imm_data;
__le32 length;
__le32 xrc_srq;
u8 req_type;
u8 flags;
#define ROCE_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
#define ROCE_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
#define ROCE_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1
#define ROCE_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
#define ROCE_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x7
#define ROCE_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 5
u8 wqe_size;
u8 prev_wqe_size;
};
/*
* Second element (16 bytes) of rdma wqe
*/
struct roce_sq_rdma_wqe_2nd {
struct regpair remote_va;
__le32 r_key;
__le32 reserved1;
};
/*
* SQ WQE req type enumeration
*/
enum roce_sq_req_type {
ROCE_SQ_REQ_TYPE_SEND,
ROCE_SQ_REQ_TYPE_SEND_WITH_IMM,
ROCE_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
ROCE_SQ_REQ_TYPE_RDMA_WR,
ROCE_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
ROCE_SQ_REQ_TYPE_RDMA_RD,
ROCE_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
ROCE_SQ_REQ_TYPE_ATOMIC_ADD,
ROCE_SQ_REQ_TYPE_LOCAL_INVALIDATE,
ROCE_SQ_REQ_TYPE_FAST_MR,
ROCE_SQ_REQ_TYPE_BIND,
ROCE_SQ_REQ_TYPE_INVALID,
MAX_ROCE_SQ_REQ_TYPE
};
struct roce_sq_send_wqe {
__le32 inv_key_or_imm_data;
__le32 length;
__le32 xrc_srq;
u8 req_type;
u8 flags;
#define ROCE_SQ_SEND_WQE_COMP_FLG_MASK 0x1
#define ROCE_SQ_SEND_WQE_COMP_FLG_SHIFT 0
#define ROCE_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1
#define ROCE_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
#define ROCE_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1
#define ROCE_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
#define ROCE_SQ_SEND_WQE_SE_FLG_MASK 0x1
#define ROCE_SQ_SEND_WQE_SE_FLG_SHIFT 3
#define ROCE_SQ_SEND_WQE_INLINE_FLG_MASK 0x1
#define ROCE_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
#define ROCE_SQ_SEND_WQE_RESERVED0_MASK 0x7
#define ROCE_SQ_SEND_WQE_RESERVED0_SHIFT 5
u8 wqe_size;
u8 prev_wqe_size;
};
struct roce_sq_sge {
__le32 length;
struct regpair addr;
__le32 l_key;
};
struct roce_srq_prod {
__le16 prod;
};
struct roce_srq_sge {
struct regpair addr;
__le32 length;
__le32 l_key;
struct regpair wr_id;
u8 flags;
#define ROCE_SRQ_SGE_NUM_SGES_MASK 0x3
#define ROCE_SRQ_SGE_NUM_SGES_SHIFT 0
#define ROCE_SRQ_SGE_RESERVED0_MASK 0x3F
#define ROCE_SRQ_SGE_RESERVED0_SHIFT 2
u8 reserved1;
__le16 reserved2;
__le32 reserved3;
};
/*
* RoCE doorbell data for SQ and RQ
*/
struct roce_pwm_val16_data {
__le16 icid;
__le16 prod_val;
};
union roce_pwm_val16_data_union {
struct roce_pwm_val16_data as_struct;
__le32 as_dword;
};
/*
* RoCE doorbell data for CQ
*/
struct roce_pwm_val32_data {
__le16 icid;
u8 agg_flags;
u8 params;
#define ROCE_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
#define ROCE_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
#define ROCE_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
#define ROCE_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
#define ROCE_PWM_VAL32_DATA_RESERVED_MASK 0x1F
#define ROCE_PWM_VAL32_DATA_RESERVED_SHIFT 3
__le32 cq_cons_val;
};
union roce_pwm_val32_data_union {
struct roce_pwm_val32_data as_struct;
struct regpair as_repair;
};
#endif /* __QLNXR_ROCE_H__ */

View File

@ -0,0 +1,112 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __QLNXR_USER_H__
#define __QLNXR_USER_H__
#define QLNXR_ABI_VERSION (7)
#define QLNXR_BE_ROCE_ABI_VERSION (1)
/* user kernel communication data structures. */
struct qlnxr_alloc_ucontext_resp {
u64 db_pa;
u32 db_size;
uint32_t max_send_wr;
uint32_t max_recv_wr;
uint32_t max_srq_wr;
uint32_t sges_per_send_wr;
uint32_t sges_per_recv_wr;
uint32_t sges_per_srq_wr;
int max_cqes;
uint8_t dpm_enabled;
uint8_t wids_enabled;
uint16_t wid_count;
};
struct qlnxr_alloc_pd_ureq {
u64 rsvd1;
};
struct qlnxr_alloc_pd_uresp {
u32 pd_id;
};
struct qlnxr_create_cq_ureq {
uint64_t addr; /* user space virtual address of CQ buffer */
size_t len; /* size of CQ buffer */
};
struct qlnxr_create_cq_uresp {
u32 db_offset;
u16 icid;
};
struct qlnxr_create_qp_ureq {
u32 qp_handle_hi;
u32 qp_handle_lo;
/* SQ */
uint64_t sq_addr; /* user space virtual address of SQ buffer */
size_t sq_len; /* length of SQ buffer */
/* RQ */
uint64_t rq_addr; /* user space virtual address of RQ buffer */
size_t rq_len; /* length of RQ buffer */
};
struct qlnxr_create_qp_uresp {
u32 qp_id;
int atomic_supported;
/* SQ*/
u32 sq_db_offset;
u16 sq_icid;
/* RQ */
u32 rq_db_offset;
u16 rq_icid;
u32 rq_db2_offset;
};
struct qlnxr_create_srq_ureq {
/* user space virtual address of producer pair */
uint64_t prod_pair_addr;
uint64_t srq_addr; /* user space virtual address of SQ buffer */
size_t srq_len; /* length of SQ buffer */
};
struct qlnxr_create_srq_uresp {
u16 srq_id;
};
#endif /* #ifndef __QLNXR_USER_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,267 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __QLNXR_VERBS_H__
#define __QLNXR_VERBS_H__
extern int qlnxr_iw_query_gid(struct ib_device *,
uint8_t port,
int index,
union ib_gid *gid);
extern int qlnxr_query_gid(struct ib_device *,
u8 port,
int index,
union ib_gid *gid);
extern struct ib_srq *qlnxr_create_srq(struct ib_pd *,
struct ib_srq_init_attr *,
struct ib_udata *);
extern int qlnxr_destroy_srq(struct ib_srq *);
extern int qlnxr_modify_srq(struct ib_srq *,
struct ib_srq_attr *,
enum ib_srq_attr_mask,
struct ib_udata *);
extern int qlnxr_query_srq(struct ib_srq *,
struct ib_srq_attr *);
extern int qlnxr_post_srq_recv(struct ib_srq *,
struct ib_recv_wr *,
struct ib_recv_wr **bad_recv_wr);
#if __FreeBSD_version < 1102000
extern int qlnxr_query_device(struct ib_device *, struct ib_device_attr *);
#else
extern int qlnxr_query_device(struct ib_device *, struct ib_device_attr *,
struct ib_udata *);
extern int qlnxr_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
#endif
extern int qlnxr_query_port(struct ib_device *,
u8 port,
struct ib_port_attr *props);
extern int qlnxr_modify_port(struct ib_device *,
u8 port,
int mask,
struct ib_port_modify *props);
extern enum rdma_link_layer qlnxr_link_layer(struct ib_device *device,
uint8_t port_num);
struct ib_pd *qlnxr_alloc_pd(struct ib_device *,
struct ib_ucontext *,
struct ib_udata *);
extern int qlnxr_dealloc_pd(struct ib_pd *pd);
#if __FreeBSD_version >= 1102000
extern struct ib_cq *qlnxr_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata);
#else
#if __FreeBSD_version >= 1100000
extern struct ib_cq *qlnxr_create_cq(struct ib_device *ibdev,
struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata);
#else
extern struct ib_cq *qlnxr_create_cq(struct ib_device *ibdev,
int cqe,
int comp_vector,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata);
#endif
#endif /* #if __FreeBSD_version >= 1102000 */
extern int qlnxr_destroy_cq(struct ib_cq *);
extern int qlnxr_resize_cq(struct ib_cq *,
int cqe,
struct ib_udata *);
extern int qlnxr_poll_cq(struct ib_cq *,
int num_entries,
struct ib_wc *wc);
extern struct ib_qp *qlnxr_create_qp(struct ib_pd *,
struct ib_qp_init_attr *attrs,
struct ib_udata *);
extern int qlnxr_modify_qp(struct ib_qp *,
struct ib_qp_attr *attr,
int attr_mask,
struct ib_udata *udata);
extern int qlnxr_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *);
extern int qlnxr_destroy_qp(struct ib_qp *);
extern int qlnxr_query_pkey(struct ib_device *,
u8 port,
u16 index,
u16 *pkey);
#if __FreeBSD_version >= 1102000
extern struct ib_ah *qlnxr_create_ah(struct ib_pd *ibpd,
struct ib_ah_attr *attr, struct ib_udata *udata);
#else
extern struct ib_ah *qlnxr_create_ah(struct ib_pd *ibpd,
struct ib_ah_attr *attr);
#endif /* #if __FreeBSD_version >= 1102000 */
extern int qlnxr_destroy_ah(struct ib_ah *ibah);
extern int qlnxr_query_ah(struct ib_ah *ibah,
struct ib_ah_attr *attr);
extern int qlnxr_modify_ah(struct ib_ah *ibah,
struct ib_ah_attr *attr);
#if __FreeBSD_version >= 1102000
extern int qlnxr_process_mad(struct ib_device *ibdev,
int process_mad_flags,
u8 port_num,
const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad_hdr *mad_hdr,
size_t in_mad_size,
struct ib_mad_hdr *out_mad,
size_t *out_mad_size,
u16 *out_mad_pkey_index);
#else
extern int qlnxr_process_mad(struct ib_device *ibdev,
int process_mad_flags,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad);
#endif /* #if __FreeBSD_version >= 1102000 */
extern int qlnxr_post_send(struct ib_qp *,
struct ib_send_wr *,
struct ib_send_wr **bad_wr);
extern int qlnxr_post_recv(struct ib_qp *,
struct ib_recv_wr *,
struct ib_recv_wr **bad_wr);
extern int qlnxr_arm_cq(struct ib_cq *,
enum ib_cq_notify_flags flags);
extern struct ib_mr *qlnxr_get_dma_mr(struct ib_pd *,
int acc);
#if __FreeBSD_version < 1102000
extern struct ib_mr *qlnxr_reg_kernel_mr(struct ib_pd *,
struct ib_phys_buf *buffer_list,
int num_phys_buf,
int acc,
u64 *iova_start);
#endif /* #if __FreeBSD_version < 1102000 */
extern int qlnxr_dereg_mr(struct ib_mr *);
#if __FreeBSD_version >= 1102000
extern struct ib_mr *qlnxr_reg_user_mr(struct ib_pd *,
u64 start,
u64 length,
u64 virt,
int acc,
struct ib_udata *);
#else
extern struct ib_mr *qlnxr_reg_user_mr(struct ib_pd *,
u64 start,
u64 length,
u64 virt,
int acc,
struct ib_udata *,
int mr_id);
#endif /* #if __FreeBSD_version >= 1102000 */
#if __FreeBSD_version >= 1102000
extern struct ib_mr *qlnxr_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type, u32 max_num_sg);
extern int qlnxr_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
#else
extern struct ib_mr *qlnxr_alloc_frmr(struct ib_pd *pd,
int max_page_list_len);
extern struct ib_fast_reg_page_list *qlnxr_alloc_frmr_page_list(
struct ib_device *ibdev,
int page_list_len);
extern void qlnxr_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);
#endif /* #if __FreeBSD_version >= 1102000 */
extern struct ib_ucontext *qlnxr_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata);
extern int qlnxr_dealloc_ucontext(struct ib_ucontext *ibctx);
extern int qlnxr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
extern int qlnxr_iw_connect(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *conn_param);
extern int qlnxr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
void qlnxr_iw_destroy_listen(struct iw_cm_id *cm_id);
extern int qlnxr_iw_accept(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *conn_param);
extern int qlnxr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
extern void qlnxr_iw_qp_add_ref(struct ib_qp *qp);
extern void qlnxr_iw_qp_rem_ref(struct ib_qp *qp);
extern struct ib_qp *qlnxr_iw_get_qp(struct ib_device *dev, int qpn);
#endif /* #ifndef __QLNXR_VERBS_H__ */

View File

@ -37,5 +37,6 @@ SYSDIR?=${SRCTOP}/sys
SUBDIR=qlnxe
SUBDIR+=qlnxev
SUBDIR+=qlnxr
.include <bsd.subdir.mk>

View File

@ -47,6 +47,17 @@ SRCS+=ecore_mng_tlv.c
SRCS+=ecore_sriov.c
SRCS+=ecore_vf.c
#roce/iwarp files. Compilation can be turned off roce/iwarp are not required.
# In other words if you don't need RDMA please comment out SRCS adds for
# ecore_rdma.c ecore_roce.c ecore_iwarp.c ecore_ooo.c ecore_ll2.c qlnx_rdma.c
SRCS+=ecore_rdma.c
SRCS+=ecore_roce.c
SRCS+=ecore_iwarp.c
SRCS+=ecore_ooo.c
SRCS+=ecore_ll2.c
SRCS+=qlnx_rdma.c
SRCS+=qlnx_ioctl.c
SRCS+=qlnx_os.c
@ -75,4 +86,13 @@ CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
CFLAGS += -DCONFIG_ECORE_SRIOV
# For roce/iwarp files. Compilation can be turned off if roce/iwarp are not required.
# In other words if you don't need RDMA please comment out the CFLAGS which define
# CONFIG_ECORE_LL2 CONFIG_ECORE_ROCE CONFIG_ECORE_IWARP QLNX_ENABLE_IWARP
CFLAGS += -DCONFIG_ECORE_LL2
CFLAGS += -DCONFIG_ECORE_ROCE
CFLAGS += -DCONFIG_ECORE_IWARP
CFLAGS += -DCONFIG_ECORE_RDMA
CFLAGS += -DQLNX_ENABLE_IWARP
CWARNFLAGS+= -Wno-cast-qual

View File

@ -0,0 +1,85 @@
#/*
# * Copyright (c) 2017-2018 Cavium, Inc.
# * All rights reserved.
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions
# * are met:
# *
# * 1. Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in the
# * documentation and/or other materials provided with the distribution.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# * POSSIBILITY OF SUCH DAMAGE.
# */
#/*
# * File : Makefile
# * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
# */
#
# $FreeBSD$
#
#.PATH: ${.CURDIR}
#OFEDDIR= /usr/src/sys
#ETHDRVR=${.CURDIR}/../qlnxe
.PATH: ${SRCTOP}/sys/dev/qlnx/qlnxr
OFEDDIR=${SRCTOP}/sys
ETHDRVR=${SRCTOP}/sys/dev/qlnx/qlnxe
KMOD= qlnxr
SRCS= device_if.h bus_if.h vnode_if.h pci_if.h \
opt_inet.h opt_inet6.h \
qlnxr_os.c\
qlnxr_cm.c\
qlnxr_verbs.c
.include <bsd.kmod.mk>
CFLAGS+= -I${.CURDIR}
CFLAGS+= -I${ETHDRVR}
CFLAGS+= -I${OFEDDIR}/ofed/include
CFLAGS+= -I${OFEDDIR}/ofed/include/uapi
CFLAGS+= -I${OFEDDIR}/compat/linuxkpi/common/include
CFLAGS+= -DLINUX_TYPES_DEFINED
CFLAGS+= -DCONFIG_INFINIBAND_USER_MEM
CFLAGS+= -DINET6 -DINET
#CFLAGS+= -DDEFINE_NO_IP_BASED_GIDS
CWARNEXTRA += -Wno-cast-qual
CWARNEXTRA += -Wno-unused-function
CWARNEXTRA += -Wno-gnu-variable-sized-type-not-at-end
CWARNEXTRA += -Wno-missing-prototypes
CWARNEXTRA += -Wno-constant-conversion
CWARNEXTRA += -Wno-format
CWARNEXTRA += -Wno-shift-sign-overflow
CWARNEXTRA += -Wno-empty-body
CFLAGS += -DQLNX_DEBUG
CFLAGS += -DECORE_PACKAGE
CFLAGS += -DCONFIG_ECORE_L2
CFLAGS += -DCONFIG_ECORE_LL2
CFLAGS += -DCONFIG_ECORE_ROCE
CFLAGS += -DCONFIG_ECORE_IWARP
CFLAGS += -DCONFIG_ECORE_RDMA
CFLAGS += -DECORE_CONFIG_DIRECT_HWFN
CFLAGS += -g -fno-inline
CFLAGS += -DQLNX_RDMA
CFLAGS+= -Wno-cast-qual -Wno-pointer-arith