1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-28 11:57:28 +00:00

Update mlx5ib(4) to match Linux 4.9 and the new ibcore APIs.

Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2017-11-10 15:02:17 +00:00
parent f819030092
commit 8e6e287f8d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bsd_rdma_4_9/; revision=325665
16 changed files with 7186 additions and 3614 deletions

View File

@ -4623,6 +4623,8 @@ dev/mlx5/mlx5_ib/mlx5_ib_cq.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_gsi.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_mad.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_main.c optional mlx5ib pci ofed \
@ -4633,10 +4635,10 @@ dev/mlx5/mlx5_ib/mlx5_ib_mr.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_qp.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_roce.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_srq.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_virt.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_alloc.c optional mlx5 pci \
compile-with "${OFED_C}"

File diff suppressed because it is too large Load Diff

View File

@ -27,15 +27,11 @@
#include "mlx5_ib.h"
#define IPV6_DEFAULT_HOPLIMIT 64
struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
struct ib_ah_attr *ah_attr,
struct mlx5_ib_ah *ah, enum rdma_link_layer ll)
static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
struct mlx5_ib_ah *ah,
struct ib_ah_attr *ah_attr,
enum rdma_link_layer ll)
{
int err;
int gid_type;
if (ah_attr->ah_flags & IB_AH_GRH) {
memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16);
ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label |
@ -48,21 +44,12 @@ struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
ah->av.stat_rate_sl = (ah_attr->static_rate << 4);
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_get_roce_gid_type(dev, ah_attr->port_num,
ah_attr->grh.sgid_index,
&gid_type);
if (err)
return ERR_PTR(err);
memcpy(ah->av.rmac, ah_attr->dmac, sizeof(ah_attr->dmac));
ah->av.udp_sport = mlx5_get_roce_udp_sport(
dev,
ah_attr->port_num,
ah_attr->grh.sgid_index,
0);
ah->av.udp_sport =
mlx5_get_roce_udp_sport(dev,
ah_attr->port_num,
ah_attr->grh.sgid_index);
ah->av.stat_rate_sl |= (ah_attr->sl & 0x7) << 1;
ah->av.hop_limit = ah_attr->grh.hop_limit;
/* TODO: initialize other eth fields */
} else {
ah->av.rlid = cpu_to_be16(ah_attr->dlid);
ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
@ -77,22 +64,17 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
struct mlx5_ib_ah *ah;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
enum rdma_link_layer ll;
struct ib_ah *ret = ERR_PTR(-EINVAL);
ll = pd->device->get_link_layer(pd->device, ah_attr->port_num);
if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
ll = pd->device->get_link_layer(pd->device, ah_attr->port_num);
if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
goto err_kfree_ah;
return create_ib_ah(dev, ah_attr, ah, ll); /* never fails */
err_kfree_ah:
kfree(ah);
return ret;
return create_ib_ah(dev, ah, ah_attr, ll); /* never fails */
}
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)

View File

@ -28,8 +28,8 @@
#include <linux/kref.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
#include "user.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
{
@ -96,15 +96,18 @@ static void *next_cqe_sw(struct mlx5_ib_cq *cq)
static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
{
switch (wq->swr_ctx[idx].wr_data) {
switch (wq->wr_data[idx]) {
case MLX5_IB_WR_UMR:
return 0;
case IB_WR_LOCAL_INV:
return IB_WC_LOCAL_INV;
case IB_WR_FAST_REG_MR:
return IB_WC_FAST_REG_MR;
case IB_WR_REG_MR:
return IB_WC_REG_MR;
default:
printf("mlx5_ib: WARN: ""unknown completion status\n");
pr_warn("unknown completion status\n");
return 0;
}
}
@ -121,7 +124,6 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
break;
case MLX5_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
case MLX5_OPCODE_NOP:
case MLX5_OPCODE_SEND:
case MLX5_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
@ -146,9 +148,6 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wc->opcode = IB_WC_MASKED_FETCH_ADD;
wc->byte_len = 8;
break;
case MLX5_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
case MLX5_OPCODE_UMR:
wc->opcode = get_umr_comp(wq, idx);
break;
@ -163,14 +162,12 @@ enum {
static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_ib_qp *qp)
{
enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
struct mlx5_ib_srq *srq;
struct mlx5_ib_wq *wq;
u16 wqe_ctr;
u8 g;
#if defined(DX_ROCE_V1_5) || defined(DX_WINDOWS)
u8 udp_header_valid;
#endif
if (qp->ibqp.srq || qp->ibqp.xrcd) {
struct mlx5_core_srq *msrq = NULL;
@ -191,7 +188,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
}
} else {
wq = &qp->rq;
wc->wr_id = wq->rwr_ctx[wq->tail & (wq->wqe_cnt - 1)].wrid;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
}
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
@ -204,7 +201,10 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
wc->wc_flags = 0;
wc->wc_flags = IB_WC_IP_CSUM_OK;
if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
(cqe->hds_ip_ext & CQE_L4_OK))))
wc->wc_flags = 0;
break;
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
@ -223,14 +223,30 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
if (unlikely(is_qp1(qp->ibqp.qp_type))) {
u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
#if defined(DX_ROCE_V1_5) || defined(DX_WINDOWS)
udp_header_valid = wc->sl & 0x8;
if (udp_header_valid)
wc->wc_flags |= IB_WC_WITH_UDP_HDR;
ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
&wc->pkey_index);
} else {
wc->pkey_index = 0;
}
#endif
if (ll != IB_LINK_LAYER_ETHERNET)
return;
switch (wc->sl & 0x3) {
case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
wc->network_hdr_type = RDMA_NETWORK_IB;
break;
case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
wc->network_hdr_type = RDMA_NETWORK_IPV6;
break;
case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
wc->network_hdr_type = RDMA_NETWORK_IPV4;
break;
}
wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
}
static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
@ -240,7 +256,9 @@ static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
mlx5_ib_warn(dev, "dump error cqe\n");
for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
printf("mlx5_ib: INFO: ""%08x %08x %08x %08x\n", be32_to_cpu(p[0]), be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3]));
pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
be32_to_cpu(p[1]), be32_to_cpu(p[2]),
be32_to_cpu(p[3]));
}
static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
@ -302,14 +320,14 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
dump_cqe(dev, cqe);
}
static int is_atomic_response(struct mlx5_ib_qp *qp, u16 idx)
static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
{
/* TBD: waiting decision
*/
return 0;
}
static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, u16 idx)
static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
{
struct mlx5_wqe_data_seg *dpseg;
void *addr;
@ -317,12 +335,12 @@ static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, u16 idx)
dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_atomic_seg);
addr = (void *)(uintptr_t)be64_to_cpu(dpseg->addr);
addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
return addr;
}
static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
u16 idx)
uint16_t idx)
{
void *addr;
int byte_count;
@ -335,10 +353,10 @@ static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
addr = mlx5_get_atomic_laddr(qp, idx);
if (byte_count == 4) {
*(u32 *)addr = be32_to_cpu(*((__be32 *)addr));
*(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
} else {
for (i = 0; i < byte_count; i += 8) {
*(u64 *)addr = be64_to_cpu(*((__be64 *)addr));
*(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
addr += 8;
}
}
@ -357,9 +375,9 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
if (idx == head)
break;
tail = qp->sq.swr_ctx[idx].w_list.next;
tail = qp->sq.w_list[idx].next;
} while (1);
tail = qp->sq.swr_ctx[idx].w_list.next;
tail = qp->sq.w_list[idx].next;
qp->sq.last_poll = tail;
}
@ -368,12 +386,44 @@ static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
mlx5_buf_free(dev->mdev, &buf->buf);
}
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
struct ib_sig_err *item)
{
u16 syndrome = be16_to_cpu(cqe->syndrome);
#define GUARD_ERR (1 << 13)
#define APPTAG_ERR (1 << 12)
#define REFTAG_ERR (1 << 11)
if (syndrome & GUARD_ERR) {
item->err_type = IB_SIG_BAD_GUARD;
item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
} else
if (syndrome & REFTAG_ERR) {
item->err_type = IB_SIG_BAD_REFTAG;
item->expected = be32_to_cpu(cqe->expected_reftag);
item->actual = be32_to_cpu(cqe->actual_reftag);
} else
if (syndrome & APPTAG_ERR) {
item->err_type = IB_SIG_BAD_APPTAG;
item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
} else {
pr_err("Got signature completion error with bad syndrome %04x\n",
syndrome);
}
item->sig_err_offset = be64_to_cpu(cqe->err_offset);
item->key = be32_to_cpu(cqe->mkey);
}
static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
struct ib_wc *wc, int *npolled)
{
struct mlx5_ib_wq *wq;
unsigned cur;
unsigned idx;
unsigned int cur;
unsigned int idx;
int np;
int i;
@ -386,14 +436,14 @@ static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
for (i = 0; i < cur && np < num_entries; i++) {
idx = wq->last_poll & (wq->wqe_cnt - 1);
wc->wr_id = wq->swr_ctx[idx].wrid;
wc->wr_id = wq->wrid[idx];
wc->status = IB_WC_WR_FLUSH_ERR;
wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
wq->tail++;
np++;
wc->qp = &qp->ibqp;
wc++;
wq->last_poll = wq->swr_ctx[idx].w_list.next;
wq->last_poll = wq->w_list[idx].next;
}
*npolled = np;
}
@ -402,7 +452,7 @@ static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
struct ib_wc *wc, int *npolled)
{
struct mlx5_ib_wq *wq;
unsigned cur;
unsigned int cur;
int np;
int i;
@ -414,7 +464,7 @@ static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
return;
for (i = 0; i < cur && np < num_entries; i++) {
wc->wr_id = wq->rwr_ctx[wq->tail & (wq->wqe_cnt - 1)].wrid;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
wc->status = IB_WC_WR_FLUSH_ERR;
wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
wq->tail++;
@ -445,11 +495,6 @@ static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
}
}
static inline u32 mlx5_ib_base_mkey(const u32 key)
{
return key & 0xffffff00u;
}
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
@ -460,11 +505,11 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
struct mlx5_sig_err_cqe *sig_err_cqe;
struct mlx5_core_mr *mmr;
struct mlx5_core_mr *mmkey;
struct mlx5_ib_mr *mr;
unsigned long flags;
u8 opcode;
u32 qpn;
uint8_t opcode;
uint32_t qpn;
u16 wqe_ctr;
void *cqe;
int idx;
@ -503,12 +548,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
* from the table.
*/
mqp = __mlx5_qp_lookup(dev->mdev, qpn);
if (unlikely(!mqp)) {
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
cq->mcq.cqn, qpn);
return -EINVAL;
}
*cur_qp = to_mibqp(mqp);
}
@ -520,13 +559,9 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
idx = wqe_ctr & (wq->wqe_cnt - 1);
handle_good_req(wc, cqe64, wq, idx);
handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
wc->wr_id = wq->swr_ctx[idx].wrid;
wq->tail = wq->swr_ctx[idx].wqe_head + 1;
if (unlikely(wq->swr_ctx[idx].w_list.opcode &
MLX5_OPCODE_SIGNATURE_CANCELED))
wc->status = IB_WC_GENERAL_ERR;
else
wc->status = IB_WC_SUCCESS;
wc->wr_id = wq->wrid[idx];
wq->tail = wq->wqe_head[idx] + 1;
wc->status = IB_WC_SUCCESS;
break;
case MLX5_CQE_RESP_WR_IMM:
case MLX5_CQE_RESP_SEND:
@ -550,8 +585,8 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
idx = wqe_ctr & (wq->wqe_cnt - 1);
wc->wr_id = wq->swr_ctx[idx].wrid;
wq->tail = wq->swr_ctx[idx].wqe_head + 1;
wc->wr_id = wq->wrid[idx];
wq->tail = wq->wqe_head[idx] + 1;
} else {
struct mlx5_ib_srq *srq;
@ -562,7 +597,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
mlx5_ib_free_srq_wqe(srq, wqe_ctr);
} else {
wq = &(*cur_qp)->rq;
wc->wr_id = wq->rwr_ctx[wq->tail & (wq->wqe_cnt - 1)].wrid;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
}
}
@ -571,20 +606,19 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
spin_lock_irqsave(&dev->mdev->priv.mr_table.lock, flags);
mmr = __mlx5_mr_lookup(dev->mdev,
mlx5_ib_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
if (unlikely(!mmr)) {
spin_unlock_irqrestore(&dev->mdev->priv.mr_table.lock, flags);
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
return -EINVAL;
}
mr = to_mibmr(mmr);
mmkey = __mlx5_mr_lookup(dev->mdev,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
mr = to_mibmr(mmkey);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true;
mr->sig->sigerr_count++;
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR\n", cq->mcq.cqn);
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
cq->mcq.cqn, mr->sig->err_item.key,
mr->sig->err_item.err_type,
(long long)mr->sig->err_item.sig_err_offset,
mr->sig->err_item.expected,
mr->sig->err_item.actual);
spin_unlock_irqrestore(&dev->mdev->priv.mr_table.lock, flags);
goto repoll;
@ -593,6 +627,28 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
return 0;
}
static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
struct ib_wc *wc)
{
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct mlx5_ib_wc *soft_wc, *next;
int npolled = 0;
list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
if (npolled >= num_entries)
break;
mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
cq->mcq.cqn);
wc[npolled++] = soft_wc->wc;
list_del(&soft_wc->list);
kfree(soft_wc);
}
return npolled;
}
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mlx5_ib_cq *cq = to_mcq(ibcq);
@ -600,8 +656,8 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
int soft_polled = 0;
int npolled;
int err = 0;
spin_lock_irqsave(&cq->lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@ -609,9 +665,11 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
goto out;
}
for (npolled = 0; npolled < num_entries; npolled++) {
err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
if (err)
if (unlikely(!list_empty(&cq->wc_list)))
soft_polled = poll_soft_wc(cq, num_entries, wc);
for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
break;
}
@ -620,26 +678,33 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
out:
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
return npolled;
else
return err;
return soft_polled + npolled;
}
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
struct mlx5_ib_cq *cq = to_mcq(ibcq);
void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
unsigned long irq_flags;
int ret = 0;
spin_lock_irqsave(&cq->lock, irq_flags);
if (cq->notify_flags != IB_CQ_NEXT_COMP)
cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
mlx5_cq_arm(&to_mcq(ibcq)->mcq,
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
ret = 1;
spin_unlock_irqrestore(&cq->lock, irq_flags);
mlx5_cq_arm(&cq->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
uar_page,
MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
to_mcq(ibcq)->mcq.cons_index);
return 0;
return ret;
}
static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
@ -648,11 +713,9 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
int err;
err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
PAGE_SIZE * 2, &buf->buf);
if (err) {
mlx5_ib_err(dev, "alloc failed\n");
2 * PAGE_SIZE, &buf->buf);
if (err)
return err;
}
buf->cqe_size = cqe_size;
buf->nent = nent;
@ -662,38 +725,32 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, struct mlx5_create_cq_mbox_in **cqb,
int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_exp_ib_create_cq ucmd;
struct mlx5_ib_create_cq ucmd;
size_t ucmdlen;
int page_shift;
__be64 *pas;
int npages;
int ncont;
void *cqc;
int err;
memset(&ucmd, 0, sizeof(ucmd));
ucmdlen =
(udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
sizeof(struct mlx5_ib_create_cq)) ?
(sizeof(struct mlx5_ib_create_cq) - sizeof(ucmd.reserved)) :
sizeof(struct mlx5_ib_create_cq);
sizeof(ucmd)) ? (sizeof(ucmd) -
sizeof(ucmd.reserved)) : sizeof(ucmd);
if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_err(dev, "copy failed\n");
if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
}
if (ucmdlen == sizeof(ucmd) && ucmd.reserved != 0) {
mlx5_ib_err(dev, "command corrupted\n");
if (ucmdlen == sizeof(ucmd) &&
ucmd.reserved != 0)
return -EINVAL;
}
if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) {
mlx5_ib_warn(dev, "wrong CQE size %d\n", ucmd.cqe_size);
if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
return -EINVAL;
}
*cqe_size = ucmd.cqe_size;
@ -707,42 +764,31 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
&cq->db);
if (err) {
mlx5_ib_warn(dev, "map failed\n");
if (err)
goto err_umem;
}
mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
&ncont, NULL);
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
(unsigned long long)ucmd.buf_addr, entries * ucmd.cqe_size,
npages, page_shift, ncont);
(long long)ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_db;
}
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
(*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = to_mucontext(context)->uuari.uars[0].index;
if (*cqe_size == 64 && MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
if (ucmd.exp_data.cqe_comp_en == 1 &&
(ucmd.exp_data.comp_mask & MLX5_EXP_CREATE_CQ_MASK_CQE_COMP_EN)) {
MLX5_SET(cqc, &(*cqb)->ctx, cqe_compression_en, 1);
if (ucmd.exp_data.cqe_comp_recv_type ==
MLX5_IB_CQE_FORMAT_CSUM &&
(ucmd.exp_data.comp_mask &
MLX5_EXP_CREATE_CQ_MASK_CQE_COMP_RECV_TYPE))
MLX5_SET(cqc, &(*cqb)->ctx, mini_cqe_res_format,
MLX5_IB_CQE_FORMAT_CSUM);
}
}
return 0;
err_db:
@ -755,7 +801,6 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
{
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
ib_umem_release(cq->buf.umem);
}
@ -775,9 +820,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size,
struct mlx5_create_cq_mbox_in **cqb,
int *index, int *inlen)
u32 **cqb, int *index, int *inlen)
{
__be64 *pas;
void *cqc;
int err;
err = mlx5_db_alloc(dev->mdev, &cq->db);
@ -794,15 +840,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
init_cq_buf(cq, &cq->buf);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_buf;
}
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_fill_page_array(&cq->buf.buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uuari.uars[0].index;
return 0;
@ -821,32 +873,42 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
mlx5_db_free(dev->mdev, &cq->db);
}
static void notify_soft_wc_handler(struct work_struct *work)
{
struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
notify_work);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
struct ib_cq_init_attr *attr,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct mlx5_create_cq_mbox_in *cqb = NULL;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq;
int entries = attr->cqe;
int vector = attr->comp_vector;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq;
int uninitialized_var(index);
int uninitialized_var(inlen);
u32 *cqb = NULL;
void *cqc;
int cqe_size;
int irqn;
unsigned int irqn;
int eqn;
int err;
if (entries < 0 || roundup_pow_of_two(entries + 1) >
(1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
mlx5_ib_warn(dev, "wrong entries number %d(%ld), max %d\n",
entries, roundup_pow_of_two(entries + 1),
1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
if (entries < 0 ||
(entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
return ERR_PTR(-EINVAL);
}
if (check_cq_create_flags(attr->flags))
return ERR_PTR(-EOPNOTSUPP);
entries = roundup_pow_of_two(entries + 1);
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq)
@ -857,7 +919,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
spin_lock_init(&cq->lock);
cq->resize_buf = NULL;
cq->resize_umem = NULL;
cq->create_flags = attr->flags;
INIT_LIST_HEAD(&cq->list_send_qp);
INIT_LIST_HEAD(&cq->list_recv_qp);
@ -867,24 +929,32 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_create;
} else {
cqe_size = (cache_line_size() >= 128 ? 128 : 64);
cqe_size = cache_line_size() == 128 ? 128 : 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)
goto err_create;
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
cq->cqe_size = cqe_size;
cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
if (err)
goto err_cqb;
cqb->ctx.c_eqn = cpu_to_be16(eqn);
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
cq->cqe_size = cqe_size;
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq,
(struct mlx5_create_cq_mbox_in *)cqb, inlen);
if (err)
goto err_cqb;
@ -893,6 +963,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
INIT_LIST_HEAD(&cq->wc_list);
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
err = -EFAULT;
@ -1006,44 +1078,17 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
spin_unlock_irq(&cq->lock);
}
int mlx5_ib_modify_cq(struct ib_cq *cq, struct ib_cq_attr *attr, int cq_attr_mask)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
struct mlx5_modify_cq_mbox_in *in;
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
u16 cq_count = attr->moderation.cq_count;
u16 cq_period = attr->moderation.cq_period;
int err;
u32 fsel = 0;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS;
in->cqn = cpu_to_be32(mcq->mcq.cqn);
if (cq_attr_mask & IB_CQ_MODERATION) {
if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
fsel |= (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
if (cq_period & 0xf000) {
/* A value higher than 0xfff is required, better
* use the largest value possible. */
cq_period = 0xfff;
printf("mlx5_ib: INFO: ""period supported is limited to 12 bits\n");
}
in->ctx.cq_period = cpu_to_be16(cq_period);
in->ctx.cq_max_count = cpu_to_be16(cq_count);
} else {
err = -ENOSYS;
goto out;
}
}
in->field_select = cpu_to_be32(fsel);
err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
out:
kfree(in);
err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
cq_period, cq_count);
if (err)
mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
@ -1163,7 +1208,8 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
}
if (scqe == start_cqe) {
printf("mlx5_ib: WARN: ""resize CQ failed to get resize CQE, CQN 0x%x\n", cq->mcq.cqn);
pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
cq->mcq.cqn);
return -ENOMEM;
}
}
@ -1175,28 +1221,32 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_modify_cq_mbox_in *in;
void *cqc;
u32 *in;
int err;
int npas;
__be64 *pas;
int page_shift;
int inlen;
int uninitialized_var(cqe_size);
unsigned long flags;
if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
mlx5_ib_warn(dev, "Firmware does not support resize CQ\n");
pr_info("Firmware does not support resize CQ\n");
return -ENOSYS;
}
if (entries < 1 || roundup_pow_of_two(entries + 1) >
(1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
mlx5_ib_warn(dev, "wrong entries number %d(%ld), max %d\n",
entries, roundup_pow_of_two(entries + 1),
if (entries < 1 ||
entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
entries,
1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
}
entries = roundup_pow_of_two(entries + 1);
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
return 0;
@ -1214,39 +1264,45 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
}
}
if (err) {
mlx5_ib_warn(dev, "resize failed: %d\n", err);
if (err)
goto ex;
}
inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto ex_resize;
}
pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
if (udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
in->pas, 0);
pas, 0);
else
mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
mlx5_fill_page_array(&cq->resize_buf->buf, pas);
in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
MLX5_MODIFY_CQ_MASK_PG_OFFSET |
MLX5_MODIFY_CQ_MASK_PG_SIZE);
in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
in->ctx.page_offset = 0;
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
in->cqn = cpu_to_be32(cq->mcq.cqn);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.resize_field_select.resize_field_select,
MLX5_MODIFY_CQ_MASK_LOG_SIZE |
MLX5_MODIFY_CQ_MASK_PG_OFFSET |
MLX5_MODIFY_CQ_MASK_PG_SIZE);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err) {
mlx5_ib_warn(dev, "modify cq failed: %d\n", err);
cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq,
(struct mlx5_modify_cq_mbox_in *)in, inlen);
if (err)
goto ex_alloc;
}
if (udata) {
cq->ibcq.cqe = entries - 1;
@ -1301,3 +1357,27 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
cq = to_mcq(ibcq);
return cq->cqe_size;
}
/* Called from atomic context */
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
{
struct mlx5_ib_wc *soft_wc;
struct mlx5_ib_cq *cq = to_mcq(ibcq);
unsigned long flags;
soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
if (!soft_wc)
return -ENOMEM;
soft_wc->wc = *wc;
spin_lock_irqsave(&cq->lock, flags);
list_add_tail(&soft_wc->list, &cq->wc_list);
if (cq->notify_flags == IB_CQ_NEXT_COMP ||
wc->status != IB_WC_SUCCESS) {
cq->notify_flags = 0;
schedule_work(&cq->notify_work);
}
spin_unlock_irqrestore(&cq->lock, flags);
return 0;
}

View File

@ -34,11 +34,11 @@
struct mlx5_ib_user_db_page {
struct list_head list;
struct ib_umem *umem;
uintptr_t user_virt;
unsigned long user_virt;
int refcnt;
};
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, uintptr_t virt,
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db)
{
struct mlx5_ib_user_db_page *page;

View File

@ -0,0 +1,536 @@
/*-
* Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include "mlx5_ib.h"
struct mlx5_ib_gsi_wr {
struct ib_cqe cqe;
struct ib_wc wc;
int send_flags;
bool completed:1;
};
struct mlx5_ib_gsi_qp {
struct ib_qp ibqp;
struct ib_qp *rx_qp;
u8 port_num;
struct ib_qp_cap cap;
enum ib_sig_type sq_sig_type;
/* Serialize qp state modifications */
struct mutex mutex;
struct ib_cq *cq;
struct mlx5_ib_gsi_wr *outstanding_wrs;
u32 outstanding_pi, outstanding_ci;
int num_qps;
/* Protects access to the tx_qps. Post send operations synchronize
* with tx_qp creation in setup_qp(). Also protects the
* outstanding_wrs array and indices.
*/
spinlock_t lock;
struct ib_qp **tx_qps;
};
static struct mlx5_ib_gsi_qp *gsi_qp(struct ib_qp *qp)
{
return container_of(qp, struct mlx5_ib_gsi_qp, ibqp);
}
static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
{
return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
}
/* Call with gsi->lock locked */
static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
{
struct ib_cq *gsi_cq = gsi->ibqp.send_cq;
struct mlx5_ib_gsi_wr *wr;
u32 index;
for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
index++) {
wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
if (!wr->completed)
break;
if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR ||
wr->send_flags & IB_SEND_SIGNALED)
WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
wr->completed = false;
}
gsi->outstanding_ci = index;
}
static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
{
struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
struct mlx5_ib_gsi_wr *wr =
container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
u64 wr_id;
unsigned long flags;
spin_lock_irqsave(&gsi->lock, flags);
wr->completed = true;
wr_id = wr->wc.wr_id;
wr->wc = *wc;
wr->wc.wr_id = wr_id;
wr->wc.qp = &gsi->ibqp;
generate_completions(gsi);
spin_unlock_irqrestore(&gsi->lock, flags);
}
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_gsi_qp *gsi;
struct ib_qp_init_attr hw_init_attr = *init_attr;
const u8 port_num = init_attr->port_num;
const int num_pkeys = pd->device->attrs.max_pkeys;
const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
int ret;
mlx5_ib_dbg(dev, "creating GSI QP\n");
if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
mlx5_ib_warn(dev,
"invalid port number %d during GSI QP creation\n",
port_num);
return ERR_PTR(-EINVAL);
}
gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
if (!gsi)
return ERR_PTR(-ENOMEM);
gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
if (!gsi->tx_qps) {
ret = -ENOMEM;
goto err_free;
}
gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr,
sizeof(*gsi->outstanding_wrs),
GFP_KERNEL);
if (!gsi->outstanding_wrs) {
ret = -ENOMEM;
goto err_free_tx;
}
mutex_init(&gsi->mutex);
mutex_lock(&dev->devr.mutex);
if (dev->devr.ports[port_num - 1].gsi) {
mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
port_num);
ret = -EBUSY;
goto err_free_wrs;
}
gsi->num_qps = num_qps;
spin_lock_init(&gsi->lock);
gsi->cap = init_attr->cap;
gsi->sq_sig_type = init_attr->sq_sig_type;
gsi->ibqp.qp_num = 1;
gsi->port_num = port_num;
gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0,
IB_POLL_SOFTIRQ);
if (IS_ERR(gsi->cq)) {
mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
PTR_ERR(gsi->cq));
ret = PTR_ERR(gsi->cq);
goto err_free_wrs;
}
hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
hw_init_attr.send_cq = gsi->cq;
if (num_qps) {
hw_init_attr.cap.max_send_wr = 0;
hw_init_attr.cap.max_send_sge = 0;
hw_init_attr.cap.max_inline_data = 0;
}
gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
if (IS_ERR(gsi->rx_qp)) {
mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
PTR_ERR(gsi->rx_qp));
ret = PTR_ERR(gsi->rx_qp);
goto err_destroy_cq;
}
dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
mutex_unlock(&dev->devr.mutex);
return &gsi->ibqp;
err_destroy_cq:
ib_free_cq(gsi->cq);
err_free_wrs:
mutex_unlock(&dev->devr.mutex);
kfree(gsi->outstanding_wrs);
err_free_tx:
kfree(gsi->tx_qps);
err_free:
kfree(gsi);
return ERR_PTR(ret);
}
int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
const int port_num = gsi->port_num;
int qp_index;
int ret;
mlx5_ib_dbg(dev, "destroying GSI QP\n");
mutex_lock(&dev->devr.mutex);
ret = ib_destroy_qp(gsi->rx_qp);
if (ret) {
mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
ret);
mutex_unlock(&dev->devr.mutex);
return ret;
}
dev->devr.ports[port_num - 1].gsi = NULL;
mutex_unlock(&dev->devr.mutex);
gsi->rx_qp = NULL;
for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
if (!gsi->tx_qps[qp_index])
continue;
WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
gsi->tx_qps[qp_index] = NULL;
}
ib_free_cq(gsi->cq);
kfree(gsi->outstanding_wrs);
kfree(gsi->tx_qps);
kfree(gsi);
return 0;
}
static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
{
struct ib_pd *pd = gsi->rx_qp->pd;
struct ib_qp_init_attr init_attr = {
.event_handler = gsi->rx_qp->event_handler,
.qp_context = gsi->rx_qp->qp_context,
.send_cq = gsi->cq,
.recv_cq = gsi->rx_qp->recv_cq,
.cap = {
.max_send_wr = gsi->cap.max_send_wr,
.max_send_sge = gsi->cap.max_send_sge,
.max_inline_data = gsi->cap.max_inline_data,
},
.sq_sig_type = gsi->sq_sig_type,
.qp_type = IB_QPT_UD,
.create_flags = mlx5_ib_create_qp_sqpn_qp1(),
};
return ib_create_qp(pd, &init_attr);
}
static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
u16 qp_index)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct ib_qp_attr attr;
int mask;
int ret;
mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
attr.qp_state = IB_QPS_INIT;
attr.pkey_index = qp_index;
attr.qkey = IB_QP1_QKEY;
attr.port_num = gsi->port_num;
ret = ib_modify_qp(qp, &attr, mask);
if (ret) {
mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
qp->qp_num, ret);
return ret;
}
attr.qp_state = IB_QPS_RTR;
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
qp->qp_num, ret);
return ret;
}
attr.qp_state = IB_QPS_RTS;
attr.sq_psn = 0;
ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
if (ret) {
mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
qp->qp_num, ret);
return ret;
}
return 0;
}
static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
{
struct ib_device *device = gsi->rx_qp->device;
struct mlx5_ib_dev *dev = to_mdev(device);
struct ib_qp *qp;
unsigned long flags;
u16 pkey;
int ret;
ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
if (ret) {
mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
gsi->port_num, qp_index);
return;
}
if (!pkey) {
mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d. Skipping.\n",
gsi->port_num, qp_index);
return;
}
spin_lock_irqsave(&gsi->lock, flags);
qp = gsi->tx_qps[qp_index];
spin_unlock_irqrestore(&gsi->lock, flags);
if (qp) {
mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
gsi->port_num, qp_index);
return;
}
qp = create_gsi_ud_qp(gsi);
if (IS_ERR(qp)) {
mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
PTR_ERR(qp));
return;
}
ret = modify_to_rts(gsi, qp, qp_index);
if (ret)
goto err_destroy_qp;
spin_lock_irqsave(&gsi->lock, flags);
WARN_ON_ONCE(gsi->tx_qps[qp_index]);
gsi->tx_qps[qp_index] = qp;
spin_unlock_irqrestore(&gsi->lock, flags);
return;
err_destroy_qp:
WARN_ON_ONCE(qp);
}
static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
{
u16 qp_index;
for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
setup_qp(gsi, qp_index);
}
int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
int ret;
mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
mutex_lock(&gsi->mutex);
ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
if (ret) {
mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
goto unlock;
}
if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
setup_qps(gsi);
unlock:
mutex_unlock(&gsi->mutex);
return ret;
}
int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
int ret;
mutex_lock(&gsi->mutex);
ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
qp_init_attr->cap = gsi->cap;
mutex_unlock(&gsi->mutex);
return ret;
}
/* Call with gsi->lock locked */
static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
struct ib_ud_wr *wr, struct ib_wc *wc)
{
struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
struct mlx5_ib_gsi_wr *gsi_wr;
if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
mlx5_ib_warn(dev, "no available GSI work request.\n");
return -ENOMEM;
}
gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
gsi->cap.max_send_wr];
gsi->outstanding_pi++;
if (!wc) {
memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
gsi_wr->wc.pkey_index = wr->pkey_index;
gsi_wr->wc.wr_id = wr->wr.wr_id;
} else {
gsi_wr->wc = *wc;
gsi_wr->completed = true;
}
gsi_wr->cqe.done = &handle_single_completion;
wr->wr.wr_cqe = &gsi_wr->cqe;
return 0;
}
/* Call with gsi->lock locked */
static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
struct ib_ud_wr *wr)
{
struct ib_wc wc = {
{ .wr_id = wr->wr.wr_id },
.status = IB_WC_SUCCESS,
.opcode = IB_WC_SEND,
.qp = &gsi->ibqp,
};
int ret;
ret = mlx5_ib_add_outstanding_wr(gsi, wr, &wc);
if (ret)
return ret;
generate_completions(gsi);
return 0;
}
/* Call with gsi->lock locked */
static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
{
struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
int qp_index = wr->pkey_index;
if (!mlx5_ib_deth_sqpn_cap(dev))
return gsi->rx_qp;
if (qp_index >= gsi->num_qps)
return NULL;
return gsi->tx_qps[qp_index];
}
int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
struct ib_qp *tx_qp;
unsigned long flags;
int ret;
for (; wr; wr = wr->next) {
struct ib_ud_wr cur_wr = *ud_wr(wr);
cur_wr.wr.next = NULL;
spin_lock_irqsave(&gsi->lock, flags);
tx_qp = get_tx_qp(gsi, &cur_wr);
if (!tx_qp) {
ret = mlx5_ib_gsi_silent_drop(gsi, &cur_wr);
if (ret)
goto err;
spin_unlock_irqrestore(&gsi->lock, flags);
continue;
}
ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL);
if (ret)
goto err;
ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
if (ret) {
/* Undo the effect of adding the outstanding wr */
gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
gsi->cap.max_send_wr;
goto err;
}
spin_unlock_irqrestore(&gsi->lock, flags);
}
return 0;
err:
spin_unlock_irqrestore(&gsi->lock, flags);
*bad_wr = wr;
return ret;
}
int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
return ib_post_recv(gsi->rx_qp, wr, bad_wr);
}
void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
{
if (!gsi)
return;
mutex_lock(&gsi->mutex);
setup_qps(gsi);
mutex_unlock(&gsi->mutex);
}

View File

@ -25,11 +25,11 @@
* $FreeBSD$
*/
#include <dev/mlx5/vport.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_pma.h>
#include "mlx5_ib.h"
#include <dev/mlx5/vport.h>
enum {
MLX5_IB_VENDOR_CLASS1 = 0x9,
@ -37,8 +37,8 @@ enum {
};
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad)
u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad)
{
u8 op_modifier = 0;
@ -54,8 +54,8 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
}
static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
u16 slid;
int err;
@ -106,89 +106,148 @@ static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
struct mlx5_vport_counters *vc)
void *out)
{
pma_cnt_ext->port_xmit_data = cpu_to_be64((vc->transmitted_ib_unicast.octets +
vc->transmitted_ib_multicast.octets) >> 2);
pma_cnt_ext->port_rcv_data = cpu_to_be64((vc->received_ib_unicast.octets +
vc->received_ib_multicast.octets) >> 2);
pma_cnt_ext->port_xmit_packets = cpu_to_be64(vc->transmitted_ib_unicast.packets +
vc->transmitted_ib_multicast.packets);
pma_cnt_ext->port_rcv_packets = cpu_to_be64(vc->received_ib_unicast.packets +
vc->received_ib_multicast.packets);
pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(vc->transmitted_ib_unicast.packets);
pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(vc->received_ib_unicast.packets);
pma_cnt_ext->port_multicast_xmit_packets = cpu_to_be64(vc->transmitted_ib_multicast.packets);
pma_cnt_ext->port_multicast_rcv_packets = cpu_to_be64(vc->received_ib_multicast.packets);
#define MLX5_SUM_CNT(p, cntr1, cntr2) \
(MLX5_GET64(query_vport_counter_out, p, cntr1) + \
MLX5_GET64(query_vport_counter_out, p, cntr2))
pma_cnt_ext->port_xmit_data =
cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
transmitted_ib_multicast.octets) >> 2);
pma_cnt_ext->port_rcv_data =
cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
received_ib_multicast.octets) >> 2);
pma_cnt_ext->port_xmit_packets =
cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
transmitted_ib_multicast.packets));
pma_cnt_ext->port_rcv_packets =
cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
received_ib_multicast.packets));
pma_cnt_ext->port_unicast_xmit_packets =
MLX5_GET64_BE(query_vport_counter_out,
out, transmitted_ib_unicast.packets);
pma_cnt_ext->port_unicast_rcv_packets =
MLX5_GET64_BE(query_vport_counter_out,
out, received_ib_unicast.packets);
pma_cnt_ext->port_multicast_xmit_packets =
MLX5_GET64_BE(query_vport_counter_out,
out, transmitted_ib_multicast.packets);
pma_cnt_ext->port_multicast_rcv_packets =
MLX5_GET64_BE(query_vport_counter_out,
out, received_ib_multicast.packets);
}
static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
struct mlx5_vport_counters *vc)
void *out)
{
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
(vc->transmitted_ib_unicast.octets +
vc->transmitted_ib_multicast.octets) >> 2);
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
(vc->received_ib_unicast.octets +
vc->received_ib_multicast.octets) >> 2);
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
vc->transmitted_ib_unicast.packets +
vc->transmitted_ib_multicast.packets);
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
vc->received_ib_unicast.packets +
vc->received_ib_multicast.packets);
/* Traffic counters will be reported in
* their 64bit form via ib_pma_portcounters_ext by default.
*/
void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
counter_set);
#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \
counter_var = MLX5_GET_BE(typeof(counter_var), \
ib_port_cntrs_grp_data_layout, \
out_pma, counter_name); \
}
MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
symbol_error_counter);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
link_error_recovery_counter);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
link_downed_counter);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
port_rcv_errors);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
port_rcv_remote_physical_errors);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
port_rcv_switch_relay_errors);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
port_xmit_discards);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
port_xmit_constraint_errors);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
port_rcv_constraint_errors);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
link_overrun_errors);
MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
vl_15_dropped);
}
static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
struct ib_mad *in_mad, struct ib_mad *out_mad)
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_vport_counters *vc;
int err;
int ext;
void *out_cnt;
vc = kzalloc(sizeof(*vc), GFP_KERNEL);
if (!vc)
return -ENOMEM;
/* Decalring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
ext = in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT;
err = mlx5_get_vport_counters(dev->mdev, port_num, vc);
if (!err) {
if (ext) {
struct ib_pma_portcounters_ext *pma_cnt_ext =
(struct ib_pma_portcounters_ext *)(out_mad->data + 40);
pma_cnt_ext_assign(pma_cnt_ext, vc);
} else {
struct ib_pma_portcounters *pma_cnt =
(struct ib_pma_portcounters *)(out_mad->data + 40);
ASSIGN_16BIT_COUNTER(pma_cnt->port_rcv_errors,
(u16)vc->received_errors.packets);
pma_cnt_assign(pma_cnt, vc);
}
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
kfree(vc);
return err;
if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
struct ib_pma_portcounters_ext *pma_cnt_ext =
(struct ib_pma_portcounters_ext *)(out_mad->data + 40);
int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
out_cnt = mlx5_vzalloc(sz);
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
port_num, out_cnt, sz);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
} else {
struct ib_pma_portcounters *pma_cnt =
(struct ib_pma_portcounters *)(out_mad->data + 40);
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
out_cnt = mlx5_vzalloc(sz);
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
}
kvfree(out_cnt);
if (err)
return IB_MAD_RESULT_FAILURE;
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
memset(out_mad->data, 0, sizeof(out_mad->data));
if (MLX5_CAP_GEN(mdev, vport_counters) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
/* TBD: read error counters from the PPCNT */
return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
} else {
return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
@ -225,7 +284,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
return err;
}
int mlx5_query_smp_attr_node_info_mad_ifc(struct ib_device *ibdev,
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
struct ib_smp *out_mad)
{
struct ib_smp *in_mad = NULL;
@ -245,7 +304,7 @@ int mlx5_query_smp_attr_node_info_mad_ifc(struct ib_device *ibdev,
return err;
}
int mlx5_query_system_image_guid_mad_ifc(struct ib_device *ibdev,
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
struct ib_smp *out_mad = NULL;
@ -255,7 +314,7 @@ int mlx5_query_system_image_guid_mad_ifc(struct ib_device *ibdev,
if (!out_mad)
return -ENOMEM;
err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad);
err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
if (err)
goto out;
@ -267,7 +326,7 @@ int mlx5_query_system_image_guid_mad_ifc(struct ib_device *ibdev,
return err;
}
int mlx5_query_max_pkeys_mad_ifc(struct ib_device *ibdev,
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
u16 *max_pkeys)
{
struct ib_smp *out_mad = NULL;
@ -277,7 +336,7 @@ int mlx5_query_max_pkeys_mad_ifc(struct ib_device *ibdev,
if (!out_mad)
return -ENOMEM;
err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad);
err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
if (err)
goto out;
@ -289,7 +348,7 @@ int mlx5_query_max_pkeys_mad_ifc(struct ib_device *ibdev,
return err;
}
int mlx5_query_vendor_id_mad_ifc(struct ib_device *ibdev,
int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id)
{
struct ib_smp *out_mad = NULL;
@ -299,7 +358,7 @@ int mlx5_query_vendor_id_mad_ifc(struct ib_device *ibdev,
if (!out_mad)
return -ENOMEM;
err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad);
err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
if (err)
goto out;
@ -311,7 +370,7 @@ int mlx5_query_vendor_id_mad_ifc(struct ib_device *ibdev,
return err;
}
int mlx5_query_node_desc_mad_ifc(struct mlx5_ib_dev *dev, char *node_desc)
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@ -329,14 +388,14 @@ int mlx5_query_node_desc_mad_ifc(struct mlx5_ib_dev *dev, char *node_desc)
if (err)
goto out;
memcpy(node_desc, out_mad->data, 64);
memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
int mlx5_query_node_guid_mad_ifc(struct mlx5_ib_dev *dev, u64 *node_guid)
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@ -354,14 +413,14 @@ int mlx5_query_node_guid_mad_ifc(struct mlx5_ib_dev *dev, u64 *node_guid)
if (err)
goto out;
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
memcpy(node_guid, out_mad->data + 12, 8);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
int mlx5_query_pkey_mad_ifc(struct ib_device *ibdev, u8 port, u16 index,
int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
struct ib_smp *in_mad = NULL;
@ -390,7 +449,7 @@ int mlx5_query_pkey_mad_ifc(struct ib_device *ibdev, u8 port, u16 index,
return err;
}
int mlx5_query_gids_mad_ifc(struct ib_device *ibdev, u8 port, int index,
int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
@ -430,7 +489,7 @@ int mlx5_query_gids_mad_ifc(struct ib_device *ibdev, u8 port, int index,
return err;
}
int mlx5_query_port_mad_ifc(struct ib_device *ibdev, u8 port,
int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);

File diff suppressed because it is too large Load Diff

View File

@ -27,10 +27,9 @@
#include <linux/module.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
CTASSERT(sizeof(uintptr_t) == sizeof(unsigned long));
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
* @count: number of PAGE_SIZE pages covered by umem
@ -38,7 +37,6 @@ CTASSERT(sizeof(uintptr_t) == sizeof(unsigned long));
* @ncont: number of compund pages
* @order: log2 of the number of compound pages
*/
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order)
{
@ -55,29 +53,38 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int entry;
unsigned long page_shift = ilog2(umem->page_size);
/* With ODP we must always match OS page size. */
if (umem->odp_data) {
*count = ib_umem_page_count(umem);
*shift = PAGE_SHIFT;
*ncont = *count;
if (order)
*order = ilog2(roundup_pow_of_two(*count));
return;
}
addr = addr >> page_shift;
tmp = (uintptr_t)addr;
m = find_first_bit(&tmp, 8 * sizeof(tmp));
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, BITS_PER_LONG);
skip = 1 << m;
mask = skip - 1;
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> page_shift;
pfn = sg_dma_address(sg) >> page_shift;
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (uintptr_t)pfn;
m = min_t(unsigned long, m,
find_first_bit(&tmp, 8 * sizeof(tmp)));
tmp = (unsigned long)pfn;
m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
} else {
if (base + p != pfn) {
tmp = (uintptr_t)p;
m = find_first_bit(&tmp, 8 * sizeof(tmp));
tmp = (unsigned long)p;
m = find_first_bit(&tmp, BITS_PER_LONG);
skip = 1 << m;
mask = skip - 1;
base = pfn;
@ -108,6 +115,20 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
*count = i;
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
{
u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
if (umem_dma & ODP_READ_ALLOWED_BIT)
mtt_entry |= MLX5_IB_MTT_READ;
if (umem_dma & ODP_WRITE_ALLOWED_BIT)
mtt_entry |= MLX5_IB_MTT_WRITE;
return mtt_entry;
}
#endif
/*
* Populate the given array with bus addresses from the umem.
*
@ -121,8 +142,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
* access_flags - access flags to set on all present pages.
use enum mlx5_ib_mtt_access_flags for this.
*/
static void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset,
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset, size_t num_pages,
__be64 *pas, int access_flags)
{
unsigned long umem_page_shift = ilog2(umem->page_size);
@ -134,6 +155,21 @@ static void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem
int len;
struct scatterlist *sg;
int entry;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
const bool odp = umem->odp_data != NULL;
if (odp) {
WARN_ON(shift != 0);
WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
for (i = 0; i < num_pages; ++i) {
dma_addr_t pa = umem->odp_data->dma_list[offset + i];
pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
}
return;
}
#endif
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
@ -146,12 +182,10 @@ static void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem
pas[i >> shift] = cpu_to_be64(cur);
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
i >> shift, (unsigned long long)
be64_to_cpu(pas[i >> shift]));
i >> shift, (long long)be64_to_cpu(pas[i >> shift]));
} else
mlx5_ib_dbg(dev, "=====> 0x%llx\n",
(unsigned long long)
(base + (k << umem_page_shift)));
(long long)(base + (k << umem_page_shift)));
i++;
}
}
@ -161,10 +195,9 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int access_flags)
{
return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
pas,
ib_umem_num_pages(umem), pas,
access_flags);
}
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
{
u64 page_size;
@ -182,6 +215,6 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
if (buf_off & off_mask)
return -EINVAL;
*offset = (u32)(buf_off >> ilog2(off_size));
*offset = buf_off >> ilog2(off_size);
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,252 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <dev/mlx5/vport.h>
#include <net/ipv6.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
struct net_device *mlx5_ib_get_netdev(struct ib_device *ib_dev, u8 port)
{
struct mlx5_ib_dev *dev = to_mdev(ib_dev);
return mlx5_get_protocol_dev(dev->mdev, MLX5_INTERFACE_PROTOCOL_ETH);
}
static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
struct net_device *ndev,
void *mlx5_addr)
{
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
source_l3_address);
void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
source_mac_47_32);
union ib_gid zgid;
u16 vtag;
memset(&zgid, 0, sizeof(zgid));
if (0 == memcmp(gid, &zgid, sizeof(zgid)))
return;
ether_addr_copy(mlx5_addr_mac, IF_LLADDR(ndev));
if (VLAN_TAG(ndev, &vtag) == 0) {
MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
MLX5_SET_RA(mlx5_addr, vlan_id, vtag);
}
#ifndef MLX5_USE_ROCE_VERSION_2
MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
#else
MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
if (ipv6_addr_v4mapped((void *)gid)) {
MLX5_SET_RA(mlx5_addr, roce_l3_type,
MLX5_ROCE_L3_TYPE_IPV4);
memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
} else {
MLX5_SET_RA(mlx5_addr, roce_l3_type,
MLX5_ROCE_L3_TYPE_IPV6);
memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
}
#endif
}
int modify_gid_roce(struct ib_device *ib_dev, u8 port, unsigned int index,
const union ib_gid *gid, struct net_device *ndev)
{
struct mlx5_ib_dev *dev = to_mdev(ib_dev);
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)];
u32 out[MLX5_ST_SZ_DW(set_roce_address_out)];
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
memset(in, 0, sizeof(in));
ib_gid_to_mlx5_roce_addr(gid, ndev, in_addr);
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
switch (eth_proto_oper) {
case MLX5_PROT_MASK(MLX5_1000BASE_CX_SGMII):
case MLX5_PROT_MASK(MLX5_1000BASE_KX):
case MLX5_PROT_MASK(MLX5_100BASE_TX):
case MLX5_PROT_MASK(MLX5_1000BASE_T):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_SDR;
break;
case MLX5_PROT_MASK(MLX5_10GBASE_T):
case MLX5_PROT_MASK(MLX5_10GBASE_CX4):
case MLX5_PROT_MASK(MLX5_10GBASE_KX4):
case MLX5_PROT_MASK(MLX5_10GBASE_KR):
case MLX5_PROT_MASK(MLX5_10GBASE_CR):
case MLX5_PROT_MASK(MLX5_10GBASE_SR):
case MLX5_PROT_MASK(MLX5_10GBASE_ER):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_QDR;
break;
case MLX5_PROT_MASK(MLX5_25GBASE_CR):
case MLX5_PROT_MASK(MLX5_25GBASE_KR):
case MLX5_PROT_MASK(MLX5_25GBASE_SR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_EDR;
break;
case MLX5_PROT_MASK(MLX5_40GBASE_CR4):
case MLX5_PROT_MASK(MLX5_40GBASE_KR4):
case MLX5_PROT_MASK(MLX5_40GBASE_SR4):
case MLX5_PROT_MASK(MLX5_40GBASE_LR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_QDR;
break;
case MLX5_PROT_MASK(MLX5_50GBASE_CR2):
case MLX5_PROT_MASK(MLX5_50GBASE_KR2):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_FDR;
break;
case MLX5_PROT_MASK(MLX5_56GBASE_R4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_FDR;
break;
case MLX5_PROT_MASK(MLX5_100GBASE_CR4):
case MLX5_PROT_MASK(MLX5_100GBASE_SR4):
case MLX5_PROT_MASK(MLX5_100GBASE_KR4):
case MLX5_PROT_MASK(MLX5_100GBASE_LR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_EDR;
break;
default:
return -EINVAL;
}
return 0;
}
static int mlx5_query_roce_port_ptys(struct ib_device *ib_dev,
struct ib_port_attr *props, u8 port)
{
struct mlx5_ib_dev *dev = to_mdev(ib_dev);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ptys_reg *ptys;
int err;
ptys = kzalloc(sizeof(*ptys), GFP_KERNEL);
if (!ptys)
return -ENOMEM;
ptys->proto_mask |= MLX5_PTYS_EN;
ptys->local_port = port;
err = mlx5_core_access_ptys(mdev, ptys, 0);
if (err)
goto out;
err = translate_eth_proto_oper(ptys->eth_proto_oper,
&props->active_speed,
&props->active_width);
out:
kfree(ptys);
return err;
}
int mlx5_query_port_roce(struct ib_device *ib_dev, u8 port,
struct ib_port_attr *props)
{
struct net_device *netdev = mlx5_ib_get_netdev(ib_dev, port);
struct mlx5_ib_dev *dev = to_mdev(ib_dev);
enum ib_mtu netdev_ib_mtu;
memset(props, 0, sizeof(*props));
props->port_cap_flags |= IB_PORT_CM_SUP;
props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
roce_address_table_size);
props->max_mtu = IB_MTU_4096;
props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
props->pkey_tbl_len = 1;
props->state = IB_PORT_DOWN;
props->phys_state = 3;
if (mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev,
(u16 *)&props->qkey_viol_cntr))
printf("mlx5_ib: WARN: ""%s failed to query qkey violations counter\n", __func__);
if (!netdev)
return 0;
if (netif_running(netdev) && netif_carrier_ok(netdev)) {
props->state = IB_PORT_ACTIVE;
props->phys_state = 5;
}
netdev_ib_mtu = iboe_get_mtu(netdev->if_mtu);
props->active_mtu = min(props->max_mtu, netdev_ib_mtu);
mlx5_query_roce_port_ptys(ib_dev, props, port);
return 0;
}
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port,
int index, __be16 ah_s_udp_port)
{
#ifndef MLX5_USE_ROCE_VERSION_2
return 0;
#else
return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
#endif
}
int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port,
int index, int *gid_type)
{
union ib_gid gid;
int ret;
ret = ib_get_cached_gid(&dev->ib_dev, port, index, &gid);
if (!ret)
*gid_type = -1;
return ret;
}

View File

@ -33,7 +33,6 @@
#include <rdma/ib_user_verbs.h>
#include "mlx5_ib.h"
#include "user.h"
/* not supported currently */
static int srq_signature;
@ -59,7 +58,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, int type)
event.event = IB_EVENT_SRQ_ERR;
break;
default:
printf("mlx5_ib: WARN: ""mlx5_ib: Unexpected event type %d on SRQ %06x\n", type, srq->srqn);
pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
type, srq->srqn);
return;
}
@ -69,31 +69,39 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, int type)
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in,
struct ib_udata *udata, int buf_size, int *inlen)
struct ib_udata *udata, int buf_size, int *inlen,
int type)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd;
struct mlx5_ib_create_srq ucmd = {};
size_t ucmdlen;
void *xsrqc;
int err;
int npages;
int page_shift;
int ncont;
int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
u32 offset;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
ucmdlen = (drv_data < sizeof(ucmd)) ?
drv_data : sizeof(ucmd);
ucmdlen = min(udata->inlen, sizeof(ucmd));
if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_err(dev, "failed copy udata\n");
mlx5_ib_dbg(dev, "failed copy udata\n");
return -EFAULT;
}
if (ucmdlen == sizeof(ucmd) &&
ucmd.reserved1 != 0) {
mlx5_ib_warn(dev, "corrupted ucmd\n");
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd),
udata->inlen - sizeof(ucmd)))
return -EINVAL;
if (type == IB_SRQT_XRC) {
err = get_srq_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
return err;
}
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
@ -101,7 +109,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
0, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_warn(dev, "failed umem get, size %d\n", buf_size);
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
return err;
}
@ -118,7 +126,6 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
*in = mlx5_vzalloc(*inlen);
if (!(*in)) {
mlx5_ib_err(dev, "failed allocate mbox\n");
err = -ENOMEM;
goto err_umem;
}
@ -128,21 +135,18 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
ucmd.db_addr, &srq->db);
if (err) {
mlx5_ib_warn(dev, "map doorbell failed\n");
mlx5_ib_dbg(dev, "map doorbell failed\n");
goto err_in;
}
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
if (MLX5_CAP_GEN(dev->mdev, cqe_version)) {
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
/* 0xffffff means we ask to work with cqe version 0 */
if (drv_data > offsetof(struct mlx5_ib_create_srq, uidx))
MLX5_SET(xrc_srqc, xsrqc, user_index, ucmd.uidx);
else
MLX5_SET(xrc_srqc, xsrqc, user_index, 0xffffff);
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
type == IB_SRQT_XRC) {
void *xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
}
return 0;
@ -158,13 +162,13 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size,
int *inlen)
int *inlen, int type)
{
int err;
int i;
struct mlx5_wqe_srq_next_seg *next;
int page_shift;
void *xsrqc;
int npages;
err = mlx5_db_alloc(dev->mdev, &srq->db);
if (err) {
@ -172,8 +176,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
return err;
}
if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
mlx5_ib_err(dev, "buf alloc failed\n");
if (mlx5_buf_alloc(dev->mdev, buf_size, 2 * PAGE_SIZE, &srq->buf)) {
mlx5_ib_dbg(dev, "buf alloc failed\n");
err = -ENOMEM;
goto err_db;
}
@ -189,10 +193,12 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
cpu_to_be16((i + 1) & (srq->msrq.max - 1));
}
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * srq->buf.npages;
npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
buf_size, page_shift, srq->buf.npages, npages);
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
*in = mlx5_vzalloc(*inlen);
if (!*in) {
mlx5_ib_err(dev, "failed allocate mbox\n");
err = -ENOMEM;
goto err_buf;
}
@ -200,6 +206,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
(unsigned long)(srq->msrq.max * sizeof(u64)));
err = -ENOMEM;
goto err_in;
}
@ -207,11 +215,11 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if (MLX5_CAP_GEN(dev->mdev, cqe_version)) {
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
/* 0xffffff means we ask to work with cqe version 0 */
MLX5_SET(xrc_srqc, xsrqc, user_index, 0xffffff);
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
type == IB_SRQT_XRC) {
void *xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, MLX5_IB_DEFAULT_UIDX);
}
return 0;
@ -258,9 +266,9 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
/* Sanity check SRQ size before proceeding */
if (init_attr->attr.max_wr >= max_srq_wqes) {
mlx5_ib_warn(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
max_srq_wqes);
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
max_srq_wqes);
return ERR_PTR(-EINVAL);
}
@ -286,9 +294,9 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
srq->msrq.max_avail_gather);
if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen, init_attr->srq_type);
else
err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
err = create_srq_kernel(dev, srq, &in, buf_size, &inlen, init_attr->srq_type);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@ -315,7 +323,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
kvfree(in);
if (err) {
mlx5_ib_warn(dev, "create SRQ failed, err %d\n", err);
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_usr_kern_srq;
}
@ -326,7 +334,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
if (pd->uobject)
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
mlx5_ib_err(dev, "copy to user failed\n");
mlx5_ib_dbg(dev, "copy to user failed\n");
err = -EFAULT;
goto err_core;
}
@ -450,7 +458,6 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
goto out;
}

View File

@ -0,0 +1,54 @@
/*-
* Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <linux/module.h>
#include <dev/mlx5/vport.h>
#include "mlx5_ib.h"
int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port,
struct ifla_vf_info *info)
{
return -EOPNOTSUPP;
}
int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
u8 port, int state)
{
return -EOPNOTSUPP;
}
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
u8 port, struct ifla_vf_stats *stats)
{
return -EOPNOTSUPP;
}
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type)
{
return -EOPNOTSUPP;
}

View File

@ -1,318 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef MLX5_IB_USER_H
#define MLX5_IB_USER_H
#include <linux/types.h>
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
};
enum {
MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
};
enum {
MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
};
/* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
#define MLX5_IB_UVERBS_ABI_VERSION 1
/* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* instead.
*/
struct mlx5_ib_alloc_ucontext_req {
__u32 total_num_uuars;
__u32 num_low_latency_uuars;
};
struct mlx5_ib_alloc_ucontext_req_v2 {
__u32 total_num_uuars;
__u32 num_low_latency_uuars;
__u32 flags;
__u32 reserved;
};
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
__u32 tot_uuars;
__u32 cache_line_size;
__u16 max_sq_desc_sz;
__u16 max_rq_desc_sz;
__u32 max_send_wqebb;
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
__u16 reserved;
__u32 max_desc_sz_sq_dc;
__u32 atomic_arg_sizes_dc;
__u32 reserved1;
__u32 flags;
__u32 reserved2[5];
};
enum mlx5_exp_ib_alloc_ucontext_data_resp_mask {
MLX5_EXP_ALLOC_CTX_RESP_MASK_CQE_COMP_MAX_NUM = 1 << 0,
MLX5_EXP_ALLOC_CTX_RESP_MASK_CQE_VERSION = 1 << 1,
MLX5_EXP_ALLOC_CTX_RESP_MASK_RROCE_UDP_SPORT_MIN = 1 << 2,
MLX5_EXP_ALLOC_CTX_RESP_MASK_RROCE_UDP_SPORT_MAX = 1 << 3,
MLX5_EXP_ALLOC_CTX_RESP_MASK_HCA_CORE_CLOCK_OFFSET = 1 << 4,
};
struct mlx5_exp_ib_alloc_ucontext_data_resp {
__u32 comp_mask; /* use mlx5_ib_exp_alloc_ucontext_data_resp_mask */
__u16 cqe_comp_max_num;
__u8 cqe_version;
__u8 reserved;
__u16 rroce_udp_sport_min;
__u16 rroce_udp_sport_max;
__u32 hca_core_clock_offset;
};
struct mlx5_exp_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
__u32 tot_uuars;
__u32 cache_line_size;
__u16 max_sq_desc_sz;
__u16 max_rq_desc_sz;
__u32 max_send_wqebb;
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
__u16 reserved;
__u32 max_desc_sz_sq_dc;
__u32 atomic_arg_sizes_dc;
__u32 reserved1;
__u32 flags;
__u32 reserved2[5];
/* Some more reserved fields for
* future growth of mlx5_ib_alloc_ucontext_resp */
__u64 prefix_reserved[8];
struct mlx5_exp_ib_alloc_ucontext_data_resp exp_data;
};
struct mlx5_ib_alloc_pd_resp {
__u32 pdn;
};
struct mlx5_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
__u32 cqe_size;
__u32 reserved; /* explicit padding (optional on i386) */
};
enum mlx5_exp_ib_create_cq_mask {
MLX5_EXP_CREATE_CQ_MASK_CQE_COMP_EN = 1 << 0,
MLX5_EXP_CREATE_CQ_MASK_CQE_COMP_RECV_TYPE = 1 << 1,
MLX5_EXP_CREATE_CQ_MASK_RESERVED = 1 << 2,
};
enum mlx5_exp_cqe_comp_recv_type {
MLX5_IB_CQE_FORMAT_HASH,
MLX5_IB_CQE_FORMAT_CSUM,
};
struct mlx5_exp_ib_create_cq_data {
__u32 comp_mask; /* use mlx5_exp_ib_creaet_cq_mask */
__u8 cqe_comp_en;
__u8 cqe_comp_recv_type; /* use mlx5_exp_cqe_comp_recv_type */
__u16 reserved;
};
struct mlx5_exp_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
__u32 cqe_size;
__u32 reserved; /* explicit padding (optional on i386) */
/* Some more reserved fields for future growth of mlx5_ib_create_cq */
__u64 prefix_reserved[8];
/* sizeof prefix aligned with mlx5_ib_create_cq */
__u64 size_of_prefix;
struct mlx5_exp_ib_create_cq_data exp_data;
};
struct mlx5_ib_create_cq_resp {
__u32 cqn;
__u32 reserved;
};
struct mlx5_ib_resize_cq {
__u64 buf_addr;
__u16 cqe_size;
__u16 reserved0;
__u32 reserved1;
};
struct mlx5_ib_create_srq {
__u64 buf_addr;
__u64 db_addr;
__u32 flags;
__u32 reserved; /* explicit padding (optional on i386) */
__u32 uidx;
__u32 reserved1;
};
struct mlx5_ib_create_srq_resp {
__u32 srqn;
__u32 reserved;
};
struct mlx5_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__u32 sq_wqe_count;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 flags;
};
enum mlx5_exp_ib_create_qp_mask {
MLX5_EXP_CREATE_QP_MASK_UIDX = 1 << 0,
MLX5_EXP_CREATE_QP_MASK_SQ_BUFF_ADD = 1 << 1,
MLX5_EXP_CREATE_QP_MASK_WC_UAR_IDX = 1 << 2,
MLX5_EXP_CREATE_QP_MASK_FLAGS_IDX = 1 << 3,
MLX5_EXP_CREATE_QP_MASK_RESERVED = 1 << 4,
};
enum mlx5_exp_create_qp_flags {
MLX5_EXP_CREATE_QP_MULTI_PACKET_WQE_REQ_FLAG = 1 << 0,
};
enum mlx5_exp_drv_create_qp_uar_idx {
MLX5_EXP_CREATE_QP_DB_ONLY_UUAR = -1
};
struct mlx5_exp_ib_create_qp_data {
__u32 comp_mask; /* use mlx5_exp_ib_create_qp_mask */
__u32 uidx;
__u64 sq_buf_addr;
__u32 wc_uar_index;
__u32 flags; /* use mlx5_exp_create_qp_flags */
};
struct mlx5_exp_ib_create_qp {
/* To allow casting to mlx5_ib_create_qp the prefix is the same as
* struct mlx5_ib_create_qp prefix
*/
__u64 buf_addr;
__u64 db_addr;
__u32 sq_wqe_count;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 flags;
/* Some more reserved fields for future growth of mlx5_ib_create_qp */
__u64 prefix_reserved[8];
/* sizeof prefix aligned with mlx5_ib_create_qp */
__u64 size_of_prefix;
/* Experimental data
* Add new experimental data only inside the exp struct
*/
struct mlx5_exp_ib_create_qp_data exp;
};
enum {
MLX5_EXP_INVALID_UUAR = -1,
};
struct mlx5_ib_create_qp_resp {
__u32 uuar_index;
__u32 rsvd;
};
enum mlx5_exp_ib_create_qp_resp_mask {
MLX5_EXP_CREATE_QP_RESP_MASK_FLAGS_IDX = 1 << 0,
MLX5_EXP_CREATE_QP_RESP_MASK_RESERVED = 1 << 1,
};
enum mlx5_exp_create_qp_resp_flags {
MLX5_EXP_CREATE_QP_RESP_MULTI_PACKET_WQE_FLAG = 1 << 0,
};
struct mlx5_exp_ib_create_qp_resp_data {
__u32 comp_mask; /* use mlx5_exp_ib_create_qp_resp_mask */
__u32 flags; /* use mlx5_exp_create_qp_resp_flags */
};
struct mlx5_exp_ib_create_qp_resp {
__u32 uuar_index;
__u32 rsvd;
/* Some more reserved fields for future growth of mlx5_ib_create_qp_resp */
__u64 prefix_reserved[8];
/* sizeof prefix aligned with mlx5_ib_create_qp_resp */
__u64 size_of_prefix;
/* Experimental data
* Add new experimental data only inside the exp struct
*/
struct mlx5_exp_ib_create_qp_resp_data exp;
};
struct mlx5_ib_create_dct {
__u32 uidx;
__u32 reserved;
};
struct mlx5_ib_arm_dct {
__u64 reserved0;
__u64 reserved1;
};
struct mlx5_ib_arm_dct_resp {
__u64 reserved0;
__u64 reserved1;
};
struct mlx5_ib_create_wq {
__u64 buf_addr;
__u64 db_addr;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 user_index;
__u32 flags;
};
#endif /* MLX5_IB_USER_H */

View File

@ -6,18 +6,22 @@ SRCS= \
mlx5_ib_ah.c \
mlx5_ib_cq.c \
mlx5_ib_doorbell.c \
mlx5_ib_gsi.c \
mlx5_ib_mad.c \
mlx5_ib_main.c \
mlx5_ib_mem.c \
mlx5_ib_mr.c \
mlx5_ib_qp.c \
mlx5_ib_roce.c \
mlx5_ib_srq.c \
mlx5_ib_virt.c \
device_if.h bus_if.h vnode_if.h pci_if.h \
opt_inet.h opt_inet6.h
CFLAGS+= -I${SRCTOP}/sys/ofed/include
CFLAGS+= -I${SRCTOP}/sys/ofed/include/uapi
CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
CFLAGS+= -DCONFIG_INFINIBAND_USER_MEM
CFLAGS+= -DINET -DINET6
.include <bsd.kmod.mk>