mirror of
https://git.FreeBSD.org/src.git
synced 2024-11-23 07:31:31 +00:00
ibcore: Declare ib_post_send() and ib_post_recv() arguments const
Since neither ib_post_send() nor ib_post_recv() modify the data structure their second argument points at, declare that argument const. This change makes it necessary to declare the 'bad_wr' argument const too and also to modify all ULPs that call ib_post_send(), ib_post_recv() or ib_post_srq_recv(). This patch does not change any functionality but makes it possible for the compiler to verify whether the ib_post_(send|recv|srq_recv) really do not modify the posted work request. Linux commit: f696bf6d64b195b83ca1bdb7cd33c999c9dcf514 7bb1fafc2f163ad03a2007295bb2f57cfdbfb630 d34ac5cd3a73aacd11009c4fc3ba15d7ea62c411 MFC after: 1 week Reviewed by: kib Sponsored by: Mellanox Technologies // NVIDIA Networking
This commit is contained in:
parent
4fb0a74e08
commit
c3987b8ea7
@ -358,7 +358,7 @@ static void krping_cq_event_handler(struct ib_cq *cq, void *ctx)
|
||||
{
|
||||
struct krping_cb *cb = ctx;
|
||||
struct ib_wc wc;
|
||||
struct ib_recv_wr *bad_wr;
|
||||
const struct ib_recv_wr *bad_wr;
|
||||
int ret;
|
||||
|
||||
BUG_ON(cb->cq != cq);
|
||||
@ -705,7 +705,7 @@ static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
|
||||
static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv)
|
||||
{
|
||||
u32 rkey;
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
int ret;
|
||||
struct scatterlist sg = {0};
|
||||
|
||||
@ -772,7 +772,8 @@ static void krping_format_send(struct krping_cb *cb, u64 buf)
|
||||
|
||||
static void krping_test_server(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr *bad_wr, inv;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_send_wr inv;
|
||||
int ret;
|
||||
|
||||
while (1) {
|
||||
@ -913,7 +914,7 @@ static void rlat_test(struct krping_cb *cb)
|
||||
struct timeval start_tv, stop_tv;
|
||||
int ret;
|
||||
struct ib_wc wc;
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
int ne;
|
||||
|
||||
scnt = 0;
|
||||
@ -1054,7 +1055,7 @@ static void wlat_test(struct krping_cb *cb)
|
||||
}
|
||||
|
||||
if (scnt < iters) {
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
|
||||
*buf = (char)scnt+1;
|
||||
if (scnt < cycle_iters)
|
||||
@ -1187,7 +1188,7 @@ static void bw_test(struct krping_cb *cb)
|
||||
while (scnt < iters || ccnt < iters) {
|
||||
|
||||
while (scnt < iters && scnt - ccnt < cb->txdepth) {
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
|
||||
if (scnt < cycle_iters)
|
||||
post_cycles_start[scnt] = get_cycles();
|
||||
@ -1263,7 +1264,7 @@ static void bw_test(struct krping_cb *cb)
|
||||
|
||||
static void krping_rlat_test_server(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_wc wc;
|
||||
int ret;
|
||||
|
||||
@ -1296,7 +1297,7 @@ static void krping_rlat_test_server(struct krping_cb *cb)
|
||||
|
||||
static void krping_wlat_test_server(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_wc wc;
|
||||
int ret;
|
||||
|
||||
@ -1330,7 +1331,7 @@ static void krping_wlat_test_server(struct krping_cb *cb)
|
||||
|
||||
static void krping_bw_test_server(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_wc wc;
|
||||
int ret;
|
||||
|
||||
@ -1434,7 +1435,7 @@ static int krping_bind_server(struct krping_cb *cb)
|
||||
|
||||
static void krping_run_server(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_recv_wr *bad_wr;
|
||||
const struct ib_recv_wr *bad_wr;
|
||||
int ret;
|
||||
|
||||
ret = krping_bind_server(cb);
|
||||
@ -1485,7 +1486,7 @@ static void krping_run_server(struct krping_cb *cb)
|
||||
static void krping_test_client(struct krping_cb *cb)
|
||||
{
|
||||
int ping, start, cc, i, ret;
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
unsigned char c;
|
||||
|
||||
start = 65;
|
||||
@ -1558,7 +1559,7 @@ static void krping_test_client(struct krping_cb *cb)
|
||||
|
||||
static void krping_rlat_test_client(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_wc wc;
|
||||
int ret;
|
||||
|
||||
@ -1600,7 +1601,7 @@ static void krping_rlat_test_client(struct krping_cb *cb)
|
||||
suseconds_t usec;
|
||||
unsigned long long elapsed;
|
||||
struct ib_wc wc;
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
int ne;
|
||||
|
||||
cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
|
||||
@ -1648,7 +1649,7 @@ static void krping_rlat_test_client(struct krping_cb *cb)
|
||||
|
||||
static void krping_wlat_test_client(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_wc wc;
|
||||
int ret;
|
||||
|
||||
@ -1687,7 +1688,7 @@ static void krping_wlat_test_client(struct krping_cb *cb)
|
||||
|
||||
static void krping_bw_test_client(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_wc wc;
|
||||
int ret;
|
||||
|
||||
@ -1729,8 +1730,10 @@ static void krping_bw_test_client(struct krping_cb *cb)
|
||||
*/
|
||||
static void flush_qp(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr wr = { 0 }, *bad;
|
||||
struct ib_recv_wr recv_wr = { 0 }, *recv_bad;
|
||||
struct ib_send_wr wr = { 0 };
|
||||
const struct ib_send_wr *bad;
|
||||
struct ib_recv_wr recv_wr = { 0 };
|
||||
const struct ib_recv_wr *recv_bad;
|
||||
struct ib_wc wc;
|
||||
int ret;
|
||||
int flushed = 0;
|
||||
@ -1773,7 +1776,8 @@ static void flush_qp(struct krping_cb *cb)
|
||||
|
||||
static void krping_fr_test(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_send_wr inv, *bad;
|
||||
struct ib_send_wr inv;
|
||||
const struct ib_send_wr *bad;
|
||||
struct ib_reg_wr fr;
|
||||
struct ib_wc wc;
|
||||
u8 key = 0;
|
||||
@ -1922,7 +1926,7 @@ static int krping_bind_client(struct krping_cb *cb)
|
||||
|
||||
static void krping_run_client(struct krping_cb *cb)
|
||||
{
|
||||
struct ib_recv_wr *bad_wr;
|
||||
const struct ib_recv_wr *bad_wr;
|
||||
int ret;
|
||||
|
||||
/* set type of service, if any */
|
||||
|
@ -926,10 +926,10 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
|
||||
void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
|
||||
struct c4iw_dev_ucontext *uctx);
|
||||
int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
|
||||
int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
|
||||
int c4iw_destroy_listen(struct iw_cm_id *cm_id);
|
||||
|
@ -335,7 +335,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
}
|
||||
|
||||
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
|
||||
struct ib_send_wr *wr, int max, u32 *plenp)
|
||||
const struct ib_send_wr *wr, int max, u32 *plenp)
|
||||
{
|
||||
u8 *dstp, *srcp;
|
||||
u32 plen = 0;
|
||||
@ -405,7 +405,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
|
||||
}
|
||||
|
||||
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
|
||||
struct ib_send_wr *wr, u8 *len16)
|
||||
const struct ib_send_wr *wr, u8 *len16)
|
||||
{
|
||||
u32 plen;
|
||||
int size;
|
||||
@ -472,7 +472,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
|
||||
}
|
||||
|
||||
static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
|
||||
struct ib_send_wr *wr, u8 *len16)
|
||||
const struct ib_send_wr *wr, u8 *len16)
|
||||
{
|
||||
u32 plen;
|
||||
int size;
|
||||
@ -514,7 +514,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
|
||||
static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16)
|
||||
{
|
||||
if (wr->num_sge > 1)
|
||||
return -EINVAL;
|
||||
@ -545,7 +545,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
|
||||
}
|
||||
|
||||
static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
|
||||
struct ib_recv_wr *wr, u8 *len16)
|
||||
const struct ib_recv_wr *wr, u8 *len16)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -559,7 +559,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
|
||||
static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
|
||||
u8 *len16)
|
||||
{
|
||||
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
|
||||
@ -609,7 +609,7 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
|
||||
kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
|
||||
}
|
||||
|
||||
static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
||||
static void complete_sq_drain_wr(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
|
||||
{
|
||||
struct t4_cqe cqe = {};
|
||||
struct c4iw_cq *schp;
|
||||
@ -639,7 +639,7 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
||||
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
|
||||
}
|
||||
|
||||
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
||||
static void complete_rq_drain_wr(struct c4iw_qp *qhp, const struct ib_recv_wr *wr)
|
||||
{
|
||||
struct t4_cqe cqe = {};
|
||||
struct c4iw_cq *rchp;
|
||||
@ -670,7 +670,7 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
||||
}
|
||||
|
||||
static int build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
|
||||
struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16)
|
||||
const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16)
|
||||
{
|
||||
__be64 *p = (__be64 *)fr->pbl;
|
||||
|
||||
@ -705,7 +705,7 @@ static int build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
|
||||
}
|
||||
|
||||
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
|
||||
struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
|
||||
const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
|
||||
bool dsgl_supported)
|
||||
{
|
||||
struct fw_ri_immd *imdp;
|
||||
@ -772,8 +772,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
int err = 0;
|
||||
u8 len16 = 0;
|
||||
@ -912,8 +912,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
return err;
|
||||
}
|
||||
|
||||
int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
int err = 0;
|
||||
struct c4iw_qp *qhp;
|
||||
|
@ -159,7 +159,8 @@ iser_fast_reg_mr(struct icl_iser_pdu *iser_pdu,
|
||||
struct ib_mr *mr = rsc->mr;
|
||||
struct ib_reg_wr fastreg_wr;
|
||||
struct ib_send_wr inv_wr;
|
||||
struct ib_send_wr *bad_wr, *wr = NULL;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_send_wr *wr = NULL;
|
||||
int ret, n;
|
||||
|
||||
/* if there a single dma entry, dma mr suffices */
|
||||
|
@ -614,8 +614,8 @@ int
|
||||
iser_conn_terminate(struct iser_conn *iser_conn)
|
||||
{
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct ib_send_wr *bad_send_wr;
|
||||
struct ib_recv_wr *bad_recv_wr;
|
||||
const struct ib_send_wr *bad_send_wr;
|
||||
const struct ib_recv_wr *bad_recv_wr;
|
||||
int err = 0;
|
||||
|
||||
/* terminate the iser conn only if the conn state is UP */
|
||||
@ -860,7 +860,8 @@ iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
||||
int
|
||||
iser_post_recvl(struct iser_conn *iser_conn)
|
||||
{
|
||||
struct ib_recv_wr rx_wr, *rx_wr_failed;
|
||||
const struct ib_recv_wr *rx_wr_failed;
|
||||
struct ib_recv_wr rx_wr;
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct ib_sge sge;
|
||||
int ib_ret;
|
||||
@ -887,7 +888,8 @@ iser_post_recvl(struct iser_conn *iser_conn)
|
||||
int
|
||||
iser_post_recvm(struct iser_conn *iser_conn, int count)
|
||||
{
|
||||
struct ib_recv_wr *rx_wr, *rx_wr_failed;
|
||||
const struct ib_recv_wr *rx_wr_failed;
|
||||
struct ib_recv_wr *rx_wr;
|
||||
int i, ib_ret;
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
unsigned int my_rx_head = iser_conn->rx_desc_head;
|
||||
@ -925,7 +927,8 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
|
||||
bool signal)
|
||||
{
|
||||
int ib_ret;
|
||||
struct ib_send_wr send_wr, *send_wr_failed;
|
||||
const struct ib_send_wr *send_wr_failed;
|
||||
struct ib_send_wr send_wr;
|
||||
|
||||
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
|
||||
tx_desc->dma_addr, ISER_HEADERS_LEN,
|
||||
|
@ -760,8 +760,8 @@ int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
|
||||
int mlx4_ib_destroy_srq(struct ib_srq *srq);
|
||||
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
|
||||
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
|
||||
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
@ -771,10 +771,10 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
|
||||
struct ib_qp_init_attr *qp_init_attr);
|
||||
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
|
||||
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
|
||||
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
||||
|
@ -485,7 +485,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
{
|
||||
struct ib_sge list;
|
||||
struct ib_ud_wr wr;
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct mlx4_ib_demux_pv_ctx *tun_ctx;
|
||||
struct mlx4_ib_demux_pv_qp *tun_qp;
|
||||
struct mlx4_rcv_tunnel_mad *tun_mad;
|
||||
@ -1275,7 +1275,8 @@ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
|
||||
int index)
|
||||
{
|
||||
struct ib_sge sg_list;
|
||||
struct ib_recv_wr recv_wr, *bad_recv_wr;
|
||||
struct ib_recv_wr recv_wr;
|
||||
const struct ib_recv_wr *bad_recv_wr;
|
||||
int size;
|
||||
|
||||
size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
|
||||
@ -1326,7 +1327,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
{
|
||||
struct ib_sge list;
|
||||
struct ib_ud_wr wr;
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct mlx4_ib_demux_pv_ctx *sqp_ctx;
|
||||
struct mlx4_ib_demux_pv_qp *sqp;
|
||||
struct mlx4_mad_snd_buf *sqp_mad;
|
||||
|
@ -2287,7 +2287,7 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
|
||||
}
|
||||
|
||||
static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
||||
struct ib_ud_wr *wr,
|
||||
const struct ib_ud_wr *wr,
|
||||
void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
|
||||
@ -2416,7 +2416,7 @@ static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
|
||||
}
|
||||
|
||||
#define MLX4_ROCEV2_QP1_SPORT 0xC000
|
||||
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
||||
static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
|
||||
void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct ib_device *ib_dev = sqp->qp.ibqp.device;
|
||||
@ -2707,7 +2707,7 @@ static __be32 convert_access(int acc)
|
||||
}
|
||||
|
||||
static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
|
||||
struct ib_reg_wr *wr)
|
||||
const struct ib_reg_wr *wr)
|
||||
{
|
||||
struct mlx4_ib_mr *mr = to_mmr(wr->mr);
|
||||
|
||||
@ -2737,7 +2737,7 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
|
||||
}
|
||||
|
||||
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
|
||||
struct ib_atomic_wr *wr)
|
||||
const struct ib_atomic_wr *wr)
|
||||
{
|
||||
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->swap);
|
||||
@ -2753,7 +2753,7 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
|
||||
}
|
||||
|
||||
static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
|
||||
struct ib_atomic_wr *wr)
|
||||
const struct ib_atomic_wr *wr)
|
||||
{
|
||||
aseg->swap_add = cpu_to_be64(wr->swap);
|
||||
aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
|
||||
@ -2762,7 +2762,7 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
|
||||
}
|
||||
|
||||
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
|
||||
struct ib_ud_wr *wr)
|
||||
const struct ib_ud_wr *wr)
|
||||
{
|
||||
memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
|
||||
dseg->dqpn = cpu_to_be32(wr->remote_qpn);
|
||||
@ -2773,7 +2773,7 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
|
||||
|
||||
static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
|
||||
struct mlx4_wqe_datagram_seg *dseg,
|
||||
struct ib_ud_wr *wr,
|
||||
const struct ib_ud_wr *wr,
|
||||
enum mlx4_ib_qp_type qpt)
|
||||
{
|
||||
union mlx4_ext_av *av = &to_mah(wr->ah)->av;
|
||||
@ -2795,7 +2795,7 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
|
||||
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
|
||||
}
|
||||
|
||||
static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
|
||||
static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct mlx4_wqe_inline_seg *inl = wqe;
|
||||
struct mlx4_ib_tunnel_header hdr;
|
||||
@ -2878,7 +2878,7 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
|
||||
dseg->addr = cpu_to_be64(sg->addr);
|
||||
}
|
||||
|
||||
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
|
||||
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, const struct ib_ud_wr *wr,
|
||||
struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
|
||||
__be32 *lso_hdr_sz, __be32 *blh)
|
||||
{
|
||||
@ -2898,7 +2898,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __be32 send_ieth(struct ib_send_wr *wr)
|
||||
static __be32 send_ieth(const struct ib_send_wr *wr)
|
||||
{
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
@ -2920,8 +2920,8 @@ static void add_zero_len_inline(void *wqe)
|
||||
inl->byte_count = cpu_to_be32(1U << 31);
|
||||
}
|
||||
|
||||
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct mlx4_ib_qp *qp = to_mqp(ibqp);
|
||||
void *wqe;
|
||||
@ -3255,8 +3255,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mlx4_ib_qp *qp = to_mqp(ibqp);
|
||||
struct mlx4_wqe_data_seg *scat;
|
||||
|
@ -311,8 +311,8 @@ void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
|
||||
spin_unlock(&srq->lock);
|
||||
}
|
||||
|
||||
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mlx4_ib_srq *srq = to_msrq(ibsrq);
|
||||
struct mlx4_wqe_srq_next_seg *next;
|
||||
|
@ -449,7 +449,7 @@ struct mlx5_umr_wr {
|
||||
u32 mkey;
|
||||
};
|
||||
|
||||
static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
|
||||
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
|
||||
{
|
||||
return container_of(wr, struct mlx5_umr_wr, wr);
|
||||
}
|
||||
@ -837,8 +837,8 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
|
||||
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
|
||||
int mlx5_ib_destroy_srq(struct ib_srq *srq);
|
||||
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
@ -847,10 +847,10 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
|
||||
struct ib_qp_init_attr *qp_init_attr);
|
||||
int mlx5_ib_destroy_qp(struct ib_qp *qp);
|
||||
int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
|
||||
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
|
||||
void *buffer, u32 length,
|
||||
@ -994,10 +994,10 @@ int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
|
||||
int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask,
|
||||
struct ib_qp_init_attr *qp_init_attr);
|
||||
int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
|
||||
|
||||
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
|
||||
|
@ -472,8 +472,8 @@ static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
|
||||
return gsi->tx_qps[qp_index];
|
||||
}
|
||||
|
||||
int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
|
||||
struct ib_qp *tx_qp;
|
||||
@ -517,8 +517,8 @@ int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
|
||||
|
||||
|
@ -573,41 +573,38 @@ static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
|
||||
static void prep_umr_wqe_common(struct ib_pd *pd, struct mlx5_umr_wr *umrwr,
|
||||
struct ib_sge *sg, u64 dma, int n, u32 key,
|
||||
int page_shift)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
sg->addr = dma;
|
||||
sg->length = ALIGN(sizeof(u64) * n, 64);
|
||||
sg->lkey = dev->umrc.pd->local_dma_lkey;
|
||||
|
||||
wr->next = NULL;
|
||||
wr->sg_list = sg;
|
||||
umrwr->wr.next = NULL;
|
||||
umrwr->wr.sg_list = sg;
|
||||
if (n)
|
||||
wr->num_sge = 1;
|
||||
umrwr->wr.num_sge = 1;
|
||||
else
|
||||
wr->num_sge = 0;
|
||||
umrwr->wr.num_sge = 0;
|
||||
|
||||
wr->opcode = MLX5_IB_WR_UMR;
|
||||
umrwr->wr.opcode = MLX5_IB_WR_UMR;
|
||||
|
||||
umrwr->npages = n;
|
||||
umrwr->page_shift = page_shift;
|
||||
umrwr->mkey = key;
|
||||
}
|
||||
|
||||
static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
|
||||
static void prep_umr_reg_wqe(struct ib_pd *pd, struct mlx5_umr_wr *umrwr,
|
||||
struct ib_sge *sg, u64 dma, int n, u32 key,
|
||||
int page_shift, u64 virt_addr, u64 len,
|
||||
int access_flags)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
prep_umr_wqe_common(pd, umrwr, sg, dma, n, key, page_shift);
|
||||
|
||||
prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
|
||||
|
||||
wr->send_flags = 0;
|
||||
umrwr->wr.send_flags = 0;
|
||||
|
||||
umrwr->target.virt_addr = virt_addr;
|
||||
umrwr->length = len;
|
||||
@ -616,12 +613,10 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
|
||||
}
|
||||
|
||||
static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
|
||||
struct ib_send_wr *wr, u32 key)
|
||||
struct mlx5_umr_wr *umrwr, u32 key)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
wr->opcode = MLX5_IB_WR_UMR;
|
||||
umrwr->wr.send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
umrwr->wr.opcode = MLX5_IB_WR_UMR;
|
||||
umrwr->mkey = key;
|
||||
}
|
||||
|
||||
@ -675,7 +670,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct mlx5_umr_wr umrwr = {};
|
||||
struct ib_send_wr *bad;
|
||||
const struct ib_send_wr *bad;
|
||||
struct mlx5_ib_mr *mr;
|
||||
struct ib_sge sg;
|
||||
int size;
|
||||
@ -707,7 +702,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
|
||||
umrwr.wr.wr_cqe = &umr_context.cqe;
|
||||
prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
|
||||
prep_umr_reg_wqe(pd, &umrwr, &sg, dma, npages, mr->mmkey.key,
|
||||
page_shift, virt_addr, len, access_flags);
|
||||
|
||||
down(&umrc->sem);
|
||||
@ -756,7 +751,7 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
|
||||
int size;
|
||||
__be64 *pas;
|
||||
dma_addr_t dma;
|
||||
struct ib_send_wr *bad;
|
||||
const struct ib_send_wr *bad;
|
||||
struct mlx5_umr_wr wr;
|
||||
struct ib_sge sg;
|
||||
int err = 0;
|
||||
@ -1026,7 +1021,7 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct mlx5_umr_wr umrwr = {};
|
||||
struct ib_send_wr *bad;
|
||||
const struct ib_send_wr *bad;
|
||||
int err;
|
||||
|
||||
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
@ -1035,7 +1030,7 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
|
||||
umrwr.wr.wr_cqe = &umr_context.cqe;
|
||||
prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
|
||||
prep_umr_unreg_wqe(dev, &umrwr, mr->mmkey.key);
|
||||
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
|
||||
@ -1065,7 +1060,7 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct device *ddev = dev->ib_dev.dma_device;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct ib_send_wr *bad;
|
||||
const struct ib_send_wr *bad;
|
||||
struct mlx5_umr_wr umrwr = {};
|
||||
struct ib_sge sg;
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
@ -1090,7 +1085,7 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
|
||||
umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
|
||||
}
|
||||
|
||||
prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
|
||||
prep_umr_wqe_common(pd, &umrwr, &sg, dma, npages, mr->mmkey.key,
|
||||
page_shift);
|
||||
|
||||
if (flags & IB_MR_REREG_PD) {
|
||||
|
@ -3028,7 +3028,7 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
|
||||
}
|
||||
|
||||
static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
|
||||
struct ib_send_wr *wr, void *qend,
|
||||
const struct ib_send_wr *wr, void *qend,
|
||||
struct mlx5_ib_qp *qp, int *size)
|
||||
{
|
||||
void *seg = eseg;
|
||||
@ -3081,7 +3081,7 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
|
||||
}
|
||||
|
||||
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
|
||||
struct ib_send_wr *wr)
|
||||
const struct ib_send_wr *wr)
|
||||
{
|
||||
memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
|
||||
dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
|
||||
@ -3240,9 +3240,9 @@ static __be64 get_umr_update_pd_mask(void)
|
||||
}
|
||||
|
||||
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
struct ib_send_wr *wr)
|
||||
const struct ib_send_wr *wr)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
const struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
@ -3311,9 +3311,9 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
|
||||
seg->status = MLX5_MKEY_STATUS_FREE;
|
||||
}
|
||||
|
||||
static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
|
||||
static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, const struct ib_send_wr *wr)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
const struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
memset(seg, 0, sizeof(*seg));
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
|
||||
@ -3344,7 +3344,7 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
|
||||
dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
|
||||
}
|
||||
|
||||
static __be32 send_ieth(struct ib_send_wr *wr)
|
||||
static __be32 send_ieth(const struct ib_send_wr *wr)
|
||||
{
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
@ -3376,7 +3376,7 @@ static u8 wq_sig(void *wqe)
|
||||
return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
|
||||
}
|
||||
|
||||
static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
|
||||
static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
|
||||
void *wqe, int *sz)
|
||||
{
|
||||
struct mlx5_wqe_inline_seg *seg;
|
||||
@ -3522,7 +3522,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
|
||||
static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
|
||||
struct mlx5_ib_qp *qp, void **seg, int *size)
|
||||
{
|
||||
struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
|
||||
@ -3624,7 +3624,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
|
||||
}
|
||||
|
||||
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
|
||||
struct ib_sig_handover_wr *wr, u32 nelements,
|
||||
const struct ib_sig_handover_wr *wr, u32 nelements,
|
||||
u32 length, u32 pdn)
|
||||
{
|
||||
struct ib_mr *sig_mr = wr->sig_mr;
|
||||
@ -3655,10 +3655,10 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
}
|
||||
|
||||
|
||||
static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
|
||||
static int set_sig_umr_wr(const struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
|
||||
void **seg, int *size)
|
||||
{
|
||||
struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
|
||||
const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
|
||||
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
|
||||
u32 pdn = get_pd(qp)->pdn;
|
||||
u32 klm_oct_size;
|
||||
@ -3732,7 +3732,7 @@ static int set_psv_wr(struct ib_sig_domain *domain,
|
||||
}
|
||||
|
||||
static int set_reg_wr(struct mlx5_ib_qp *qp,
|
||||
struct ib_reg_wr *wr,
|
||||
const struct ib_reg_wr *wr,
|
||||
void **seg, int *size)
|
||||
{
|
||||
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
|
||||
@ -3797,7 +3797,7 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
|
||||
}
|
||||
}
|
||||
|
||||
static u8 get_fence(u8 fence, struct ib_send_wr *wr)
|
||||
static u8 get_fence(u8 fence, const struct ib_send_wr *wr)
|
||||
{
|
||||
if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
|
||||
wr->send_flags & IB_SEND_FENCE))
|
||||
@ -3815,10 +3815,10 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
|
||||
struct mlx5_wqe_ctrl_seg **ctrl,
|
||||
struct ib_send_wr *wr, unsigned *idx,
|
||||
int *size, int nreq)
|
||||
static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
|
||||
struct mlx5_wqe_ctrl_seg **ctrl,
|
||||
const struct ib_send_wr *wr, unsigned *idx,
|
||||
int *size, int nreq, bool send_signaled, bool solicited)
|
||||
{
|
||||
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
|
||||
return -ENOMEM;
|
||||
@ -3829,10 +3829,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
|
||||
*(uint32_t *)(*seg + 8) = 0;
|
||||
(*ctrl)->imm = send_ieth(wr);
|
||||
(*ctrl)->fm_ce_se = qp->sq_signal_bits |
|
||||
(wr->send_flags & IB_SEND_SIGNALED ?
|
||||
MLX5_WQE_CTRL_CQ_UPDATE : 0) |
|
||||
(wr->send_flags & IB_SEND_SOLICITED ?
|
||||
MLX5_WQE_CTRL_SOLICITED : 0);
|
||||
(send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
|
||||
(solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
|
||||
|
||||
*seg += sizeof(**ctrl);
|
||||
*size = sizeof(**ctrl) / 16;
|
||||
@ -3840,6 +3838,16 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
|
||||
struct mlx5_wqe_ctrl_seg **ctrl,
|
||||
const struct ib_send_wr *wr, unsigned *idx,
|
||||
int *size, int nreq)
|
||||
{
|
||||
return __begin_wqe(qp, seg, ctrl, wr, idx, size, nreq,
|
||||
wr->send_flags & IB_SEND_SIGNALED,
|
||||
wr->send_flags & IB_SEND_SOLICITED);
|
||||
}
|
||||
|
||||
static void finish_wqe(struct mlx5_ib_qp *qp,
|
||||
struct mlx5_wqe_ctrl_seg *ctrl,
|
||||
u8 size, unsigned idx, u64 wr_id,
|
||||
@ -3864,8 +3872,8 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
|
||||
}
|
||||
|
||||
|
||||
int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
|
||||
@ -3993,10 +4001,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
* SET_PSV WQEs are not signaled and solicited
|
||||
* on error
|
||||
*/
|
||||
wr->send_flags &= ~IB_SEND_SIGNALED;
|
||||
wr->send_flags |= IB_SEND_SOLICITED;
|
||||
err = begin_wqe(qp, &seg, &ctrl, wr,
|
||||
&idx, &size, nreq);
|
||||
err = __begin_wqe(qp, &seg, &ctrl, wr,
|
||||
&idx, &size, nreq, false, true);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "\n");
|
||||
err = -ENOMEM;
|
||||
@ -4016,8 +4022,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
finish_wqe(qp, ctrl, size, idx, wr->wr_id,
|
||||
nreq, get_fence(fence, wr),
|
||||
next_fence, MLX5_OPCODE_SET_PSV);
|
||||
err = begin_wqe(qp, &seg, &ctrl, wr,
|
||||
&idx, &size, nreq);
|
||||
err = __begin_wqe(qp, &seg, &ctrl, wr,
|
||||
&idx, &size, nreq, false, true);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "\n");
|
||||
err = -ENOMEM;
|
||||
@ -4183,8 +4189,8 @@ static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
|
||||
sig->signature = calc_sig(sig, size);
|
||||
}
|
||||
|
||||
int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mlx5_ib_qp *qp = to_mqp(ibqp);
|
||||
struct mlx5_wqe_data_seg *scat;
|
||||
|
@ -427,8 +427,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
|
||||
spin_unlock(&srq->lock);
|
||||
}
|
||||
|
||||
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
|
||||
struct mlx5_wqe_srq_next_seg *next;
|
||||
|
@ -521,10 +521,10 @@ int mthca_max_srq_sge(struct mthca_dev *dev);
|
||||
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
|
||||
enum ib_event_type event_type);
|
||||
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
|
||||
int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mthca_tavor_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
int mthca_arbel_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
|
||||
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
|
||||
enum ib_event_type event_type);
|
||||
@ -532,14 +532,14 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
|
||||
struct ib_qp_init_attr *qp_init_attr);
|
||||
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
|
||||
struct ib_udata *udata);
|
||||
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
|
||||
int index, int *dbd, __be32 *new_wqe);
|
||||
int mthca_alloc_qp(struct mthca_dev *dev,
|
||||
|
@ -1473,7 +1473,7 @@ void mthca_free_qp(struct mthca_dev *dev,
|
||||
|
||||
/* Create UD header for an MLX send and build a data segment for it */
|
||||
static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
|
||||
int ind, struct ib_ud_wr *wr,
|
||||
int ind, const struct ib_ud_wr *wr,
|
||||
struct mthca_mlx_seg *mlx,
|
||||
struct mthca_data_seg *data)
|
||||
{
|
||||
@ -1566,7 +1566,7 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
|
||||
}
|
||||
|
||||
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
|
||||
struct ib_atomic_wr *wr)
|
||||
const struct ib_atomic_wr *wr)
|
||||
{
|
||||
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->swap);
|
||||
@ -1579,7 +1579,7 @@ static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
|
||||
}
|
||||
|
||||
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
|
||||
struct ib_ud_wr *wr)
|
||||
const struct ib_ud_wr *wr)
|
||||
{
|
||||
useg->lkey = cpu_to_be32(to_mah(wr->ah)->key);
|
||||
useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma);
|
||||
@ -1589,15 +1589,15 @@ static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
|
||||
}
|
||||
|
||||
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
|
||||
struct ib_ud_wr *wr)
|
||||
const struct ib_ud_wr *wr)
|
||||
{
|
||||
memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
|
||||
useg->dqpn = cpu_to_be32(wr->remote_qpn);
|
||||
useg->qkey = cpu_to_be32(wr->remote_qkey);
|
||||
}
|
||||
|
||||
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct mthca_dev *dev = to_mdev(ibqp->device);
|
||||
struct mthca_qp *qp = to_mqp(ibqp);
|
||||
@ -1799,8 +1799,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mthca_dev *dev = to_mdev(ibqp->device);
|
||||
struct mthca_qp *qp = to_mqp(ibqp);
|
||||
@ -1910,8 +1910,8 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct mthca_dev *dev = to_mdev(ibqp->device);
|
||||
struct mthca_qp *qp = to_mqp(ibqp);
|
||||
@ -2150,8 +2150,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mthca_dev *dev = to_mdev(ibqp->device);
|
||||
struct mthca_qp *qp = to_mqp(ibqp);
|
||||
|
@ -466,8 +466,8 @@ void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
|
||||
spin_unlock(&srq->lock);
|
||||
}
|
||||
|
||||
int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mthca_dev *dev = to_mdev(ibsrq->device);
|
||||
struct mthca_srq *srq = to_msrq(ibsrq);
|
||||
@ -566,8 +566,8 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct mthca_dev *dev = to_mdev(ibsrq->device);
|
||||
struct mthca_srq *srq = to_msrq(ibsrq);
|
||||
|
@ -485,7 +485,7 @@ qlnxr_get_vlan_id_gsi(struct ib_ah_attr *ah_attr, u16 *vlan_id)
|
||||
static inline int
|
||||
qlnxr_gsi_build_header(struct qlnxr_dev *dev,
|
||||
struct qlnxr_qp *qp,
|
||||
struct ib_send_wr *swr,
|
||||
const struct ib_send_wr *swr,
|
||||
struct ib_ud_header *udh,
|
||||
int *roce_mode)
|
||||
{
|
||||
@ -625,7 +625,7 @@ qlnxr_gsi_build_header(struct qlnxr_dev *dev,
|
||||
|
||||
static inline int
|
||||
qlnxr_gsi_build_packet(struct qlnxr_dev *dev,
|
||||
struct qlnxr_qp *qp, struct ib_send_wr *swr,
|
||||
struct qlnxr_qp *qp, const struct ib_send_wr *swr,
|
||||
struct ecore_roce_ll2_packet **p_packet)
|
||||
{
|
||||
u8 ud_header_buffer[QLNXR_MAX_UD_HEADER_SIZE];
|
||||
@ -690,8 +690,8 @@ qlnxr_gsi_build_packet(struct qlnxr_dev *dev,
|
||||
|
||||
int
|
||||
qlnxr_gsi_post_send(struct ib_qp *ibqp,
|
||||
struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct ecore_roce_ll2_packet *pkt = NULL;
|
||||
struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
|
||||
@ -764,8 +764,8 @@ qlnxr_gsi_post_send(struct ib_qp *ibqp,
|
||||
#define QLNXR_LL2_RX_BUFFER_SIZE (4 * 1024)
|
||||
int
|
||||
qlnxr_gsi_post_recv(struct ib_qp *ibqp,
|
||||
struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct qlnxr_dev *dev = get_qlnxr_dev((ibqp->device));
|
||||
struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
|
||||
|
@ -87,12 +87,12 @@ extern int qlnxr_gsi_poll_cq(struct ib_cq *ibcq,
|
||||
struct ib_wc *wc);
|
||||
|
||||
extern int qlnxr_gsi_post_recv(struct ib_qp *ibqp,
|
||||
struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
|
||||
extern int qlnxr_gsi_post_send(struct ib_qp *ibqp,
|
||||
struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
|
||||
extern struct ib_qp* qlnxr_create_gsi_qp(struct qlnxr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
|
@ -393,8 +393,8 @@ qlnxr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
#endif
|
||||
|
||||
int
|
||||
qlnxr_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
qlnxr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct qlnxr_dev *dev;
|
||||
struct qlnxr_srq *srq;
|
||||
@ -4100,8 +4100,8 @@ static u32
|
||||
qlnxr_prepare_sq_inline_data(struct qlnxr_dev *dev,
|
||||
struct qlnxr_qp *qp,
|
||||
u8 *wqe_size,
|
||||
struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr,
|
||||
const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr,
|
||||
u8 *bits,
|
||||
u8 bit)
|
||||
{
|
||||
@ -4175,7 +4175,7 @@ qlnxr_prepare_sq_inline_data(struct qlnxr_dev *dev,
|
||||
|
||||
static u32
|
||||
qlnxr_prepare_sq_sges(struct qlnxr_dev *dev, struct qlnxr_qp *qp,
|
||||
u8 *wqe_size, struct ib_send_wr *wr)
|
||||
u8 *wqe_size, const struct ib_send_wr *wr)
|
||||
{
|
||||
int i;
|
||||
u32 data_size = 0;
|
||||
@ -4206,8 +4206,8 @@ qlnxr_prepare_sq_rdma_data(struct qlnxr_dev *dev,
|
||||
struct qlnxr_qp *qp,
|
||||
struct rdma_sq_rdma_wqe_1st *rwqe,
|
||||
struct rdma_sq_rdma_wqe_2nd *rwqe2,
|
||||
struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
qlnx_host_t *ha;
|
||||
u32 ret = 0;
|
||||
@ -4238,8 +4238,8 @@ qlnxr_prepare_sq_send_data(struct qlnxr_dev *dev,
|
||||
struct qlnxr_qp *qp,
|
||||
struct rdma_sq_send_wqe *swqe,
|
||||
struct rdma_sq_send_wqe *swqe2,
|
||||
struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
qlnx_host_t *ha;
|
||||
u32 ret = 0;
|
||||
@ -4299,7 +4299,7 @@ qlnx_handle_completed_mrs(struct qlnxr_dev *dev, struct mr_info *info)
|
||||
|
||||
static int qlnxr_prepare_reg(struct qlnxr_qp *qp,
|
||||
struct rdma_sq_fmr_wqe_1st *fwqe1,
|
||||
struct ib_reg_wr *wr)
|
||||
const struct ib_reg_wr *wr)
|
||||
{
|
||||
struct qlnxr_mr *mr = get_qlnxr_mr(wr->mr);
|
||||
struct rdma_sq_fmr_wqe_2nd *fwqe2;
|
||||
@ -4338,7 +4338,7 @@ static int qlnxr_prepare_reg(struct qlnxr_qp *qp,
|
||||
#else
|
||||
|
||||
static void
|
||||
build_frmr_pbes(struct qlnxr_dev *dev, struct ib_send_wr *wr,
|
||||
build_frmr_pbes(struct qlnxr_dev *dev, const struct ib_send_wr *wr,
|
||||
struct mr_info *info)
|
||||
{
|
||||
int i;
|
||||
@ -4423,7 +4423,7 @@ qlnxr_prepare_safe_pbl(struct qlnxr_dev *dev, struct mr_info *info)
|
||||
static inline int
|
||||
qlnxr_prepare_fmr(struct qlnxr_qp *qp,
|
||||
struct rdma_sq_fmr_wqe_1st *fwqe1,
|
||||
struct ib_send_wr *wr)
|
||||
const struct ib_send_wr *wr)
|
||||
{
|
||||
struct qlnxr_dev *dev = qp->dev;
|
||||
u64 fbo;
|
||||
@ -4525,7 +4525,7 @@ qlnxr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
|
||||
}
|
||||
}
|
||||
static inline bool
|
||||
qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
|
||||
qlnxr_can_post_send(struct qlnxr_qp *qp, const struct ib_send_wr *wr)
|
||||
{
|
||||
int wq_is_full, err_wr, pbl_is_full;
|
||||
struct qlnxr_dev *dev = qp->dev;
|
||||
@ -4581,8 +4581,8 @@ qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
|
||||
|
||||
int
|
||||
qlnxr_post_send(struct ib_qp *ibqp,
|
||||
struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct qlnxr_dev *dev = get_qlnxr_dev(ibqp->device);
|
||||
struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
|
||||
@ -4972,8 +4972,8 @@ qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq)
|
||||
|
||||
int
|
||||
qlnxr_post_recv(struct ib_qp *ibqp,
|
||||
struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
|
||||
struct qlnxr_dev *dev = qp->dev;
|
||||
|
@ -55,8 +55,8 @@ extern int qlnxr_query_srq(struct ib_srq *,
|
||||
struct ib_srq_attr *);
|
||||
|
||||
extern int qlnxr_post_srq_recv(struct ib_srq *,
|
||||
struct ib_recv_wr *,
|
||||
struct ib_recv_wr **bad_recv_wr);
|
||||
const struct ib_recv_wr *,
|
||||
const struct ib_recv_wr **bad_recv_wr);
|
||||
|
||||
#if __FreeBSD_version < 1102000
|
||||
extern int qlnxr_query_device(struct ib_device *, struct ib_device_attr *);
|
||||
@ -174,12 +174,12 @@ extern int qlnxr_process_mad(struct ib_device *ibdev,
|
||||
#endif /* #if __FreeBSD_version >= 1102000 */
|
||||
|
||||
extern int qlnxr_post_send(struct ib_qp *,
|
||||
struct ib_send_wr *,
|
||||
struct ib_send_wr **bad_wr);
|
||||
const struct ib_send_wr *,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
|
||||
extern int qlnxr_post_recv(struct ib_qp *,
|
||||
struct ib_recv_wr *,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
const struct ib_recv_wr *,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
|
||||
extern int qlnxr_arm_cq(struct ib_cq *,
|
||||
enum ib_cq_notify_flags flags);
|
||||
|
@ -1149,7 +1149,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
{
|
||||
struct ib_mad_qp_info *qp_info;
|
||||
struct list_head *list;
|
||||
struct ib_send_wr *bad_send_wr;
|
||||
const struct ib_send_wr *bad_send_wr;
|
||||
struct ib_mad_agent *mad_agent;
|
||||
struct ib_sge *sge;
|
||||
unsigned long flags;
|
||||
@ -2427,7 +2427,7 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
|
||||
struct ib_mad_qp_info *qp_info;
|
||||
struct ib_mad_queue *send_queue;
|
||||
struct ib_send_wr *bad_send_wr;
|
||||
const struct ib_send_wr *bad_send_wr;
|
||||
struct ib_mad_send_wc mad_send_wc;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
@ -2522,7 +2522,7 @@ static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
|
||||
if (wc->status == IB_WC_WR_FLUSH_ERR) {
|
||||
if (mad_send_wr->retry) {
|
||||
/* Repost send */
|
||||
struct ib_send_wr *bad_send_wr;
|
||||
const struct ib_send_wr *bad_send_wr;
|
||||
|
||||
mad_send_wr->retry = 0;
|
||||
ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
|
||||
@ -2842,7 +2842,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
||||
int post, ret;
|
||||
struct ib_mad_private *mad_priv;
|
||||
struct ib_sge sg_list;
|
||||
struct ib_recv_wr recv_wr, *bad_recv_wr;
|
||||
struct ib_recv_wr recv_wr;
|
||||
const struct ib_recv_wr *bad_recv_wr;
|
||||
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
|
||||
|
||||
/* Initialize common scatter list fields */
|
||||
|
@ -2532,7 +2532,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
|
||||
struct ib_uverbs_post_send cmd;
|
||||
struct ib_uverbs_post_send_resp resp;
|
||||
struct ib_uverbs_send_wr *user_wr;
|
||||
struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
|
||||
struct ib_send_wr *wr = NULL, *last, *next;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct ib_qp *qp;
|
||||
int i, sg_ind;
|
||||
int is_ud;
|
||||
@ -2804,7 +2805,8 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
|
||||
{
|
||||
struct ib_uverbs_post_recv cmd;
|
||||
struct ib_uverbs_post_recv_resp resp;
|
||||
struct ib_recv_wr *wr, *next, *bad_wr;
|
||||
struct ib_recv_wr *wr, *next;
|
||||
const struct ib_recv_wr *bad_wr;
|
||||
struct ib_qp *qp;
|
||||
ssize_t ret = -EINVAL;
|
||||
|
||||
@ -2854,7 +2856,8 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
|
||||
{
|
||||
struct ib_uverbs_post_srq_recv cmd;
|
||||
struct ib_uverbs_post_srq_recv_resp resp;
|
||||
struct ib_recv_wr *wr, *next, *bad_wr;
|
||||
struct ib_recv_wr *wr, *next;
|
||||
const struct ib_recv_wr *bad_wr;
|
||||
struct ib_srq *srq;
|
||||
ssize_t ret = -EINVAL;
|
||||
|
||||
|
@ -1981,7 +1981,7 @@ static void __ib_drain_sq(struct ib_qp *qp)
|
||||
{
|
||||
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
||||
struct ib_drain_cqe sdrain;
|
||||
struct ib_send_wr *bad_swr;
|
||||
const struct ib_send_wr *bad_swr;
|
||||
struct ib_rdma_wr swr = {
|
||||
.wr = {
|
||||
.opcode = IB_WR_RDMA_WRITE,
|
||||
@ -2021,7 +2021,8 @@ static void __ib_drain_rq(struct ib_qp *qp)
|
||||
{
|
||||
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
||||
struct ib_drain_cqe rdrain;
|
||||
struct ib_recv_wr rwr = {}, *bad_rwr;
|
||||
struct ib_recv_wr rwr = {};
|
||||
const struct ib_recv_wr *bad_rwr;
|
||||
int ret;
|
||||
|
||||
if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
|
||||
|
@ -149,7 +149,7 @@ ipoib_alloc_map_mb(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req,
|
||||
static int ipoib_ib_post_receive(struct ipoib_dev_priv *priv, int id)
|
||||
{
|
||||
struct ipoib_rx_buf *rx_req;
|
||||
struct ib_recv_wr *bad_wr;
|
||||
const struct ib_recv_wr *bad_wr;
|
||||
struct mbuf *m;
|
||||
int ret;
|
||||
int i;
|
||||
@ -452,7 +452,7 @@ post_send(struct ipoib_dev_priv *priv, unsigned int wr_id,
|
||||
struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head,
|
||||
int hlen)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct mbuf *mb = tx_req->mb;
|
||||
u64 *mapping = tx_req->mapping;
|
||||
struct mbuf *m;
|
||||
|
@ -1263,7 +1263,7 @@ struct ib_rdma_wr {
|
||||
u32 rkey;
|
||||
};
|
||||
|
||||
static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
|
||||
static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
|
||||
{
|
||||
return container_of(wr, struct ib_rdma_wr, wr);
|
||||
}
|
||||
@ -1278,7 +1278,7 @@ struct ib_atomic_wr {
|
||||
u32 rkey;
|
||||
};
|
||||
|
||||
static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
|
||||
static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
|
||||
{
|
||||
return container_of(wr, struct ib_atomic_wr, wr);
|
||||
}
|
||||
@ -1295,7 +1295,7 @@ struct ib_ud_wr {
|
||||
u8 port_num; /* valid for DR SMPs on switch only */
|
||||
};
|
||||
|
||||
static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
|
||||
static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
|
||||
{
|
||||
return container_of(wr, struct ib_ud_wr, wr);
|
||||
}
|
||||
@ -1307,7 +1307,7 @@ struct ib_reg_wr {
|
||||
int access;
|
||||
};
|
||||
|
||||
static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
|
||||
static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
|
||||
{
|
||||
return container_of(wr, struct ib_reg_wr, wr);
|
||||
}
|
||||
@ -1320,7 +1320,7 @@ struct ib_sig_handover_wr {
|
||||
struct ib_sge *prot;
|
||||
};
|
||||
|
||||
static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
|
||||
static inline const struct ib_sig_handover_wr *sig_handover_wr(const struct ib_send_wr *wr)
|
||||
{
|
||||
return container_of(wr, struct ib_sig_handover_wr, wr);
|
||||
}
|
||||
@ -1979,8 +1979,8 @@ struct ib_device {
|
||||
struct ib_srq_attr *srq_attr);
|
||||
int (*destroy_srq)(struct ib_srq *srq);
|
||||
int (*post_srq_recv)(struct ib_srq *srq,
|
||||
struct ib_recv_wr *recv_wr,
|
||||
struct ib_recv_wr **bad_recv_wr);
|
||||
const struct ib_recv_wr *recv_wr,
|
||||
const struct ib_recv_wr **bad_recv_wr);
|
||||
struct ib_qp * (*create_qp)(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata);
|
||||
@ -1994,11 +1994,11 @@ struct ib_device {
|
||||
struct ib_qp_init_attr *qp_init_attr);
|
||||
int (*destroy_qp)(struct ib_qp *qp);
|
||||
int (*post_send)(struct ib_qp *qp,
|
||||
struct ib_send_wr *send_wr,
|
||||
struct ib_send_wr **bad_send_wr);
|
||||
const struct ib_send_wr *send_wr,
|
||||
const struct ib_send_wr **bad_send_wr);
|
||||
int (*post_recv)(struct ib_qp *qp,
|
||||
struct ib_recv_wr *recv_wr,
|
||||
struct ib_recv_wr **bad_recv_wr);
|
||||
const struct ib_recv_wr *recv_wr,
|
||||
const struct ib_recv_wr **bad_recv_wr);
|
||||
struct ib_cq * (*create_cq)(struct ib_device *device,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
@ -2723,8 +2723,8 @@ int ib_destroy_srq(struct ib_srq *srq);
|
||||
* the work request that failed to be posted on the QP.
|
||||
*/
|
||||
static inline int ib_post_srq_recv(struct ib_srq *srq,
|
||||
struct ib_recv_wr *recv_wr,
|
||||
struct ib_recv_wr **bad_recv_wr)
|
||||
const struct ib_recv_wr *recv_wr,
|
||||
const struct ib_recv_wr **bad_recv_wr)
|
||||
{
|
||||
return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
|
||||
}
|
||||
@ -2808,8 +2808,8 @@ int ib_close_qp(struct ib_qp *qp);
|
||||
* earlier work requests in the list.
|
||||
*/
|
||||
static inline int ib_post_send(struct ib_qp *qp,
|
||||
struct ib_send_wr *send_wr,
|
||||
struct ib_send_wr **bad_send_wr)
|
||||
const struct ib_send_wr *send_wr,
|
||||
const struct ib_send_wr **bad_send_wr)
|
||||
{
|
||||
return qp->device->post_send(qp, send_wr, bad_send_wr);
|
||||
}
|
||||
@ -2823,8 +2823,8 @@ static inline int ib_post_send(struct ib_qp *qp,
|
||||
* the work request that failed to be posted on the QP.
|
||||
*/
|
||||
static inline int ib_post_recv(struct ib_qp *qp,
|
||||
struct ib_recv_wr *recv_wr,
|
||||
struct ib_recv_wr **bad_recv_wr)
|
||||
const struct ib_recv_wr *recv_wr,
|
||||
const struct ib_recv_wr **bad_recv_wr)
|
||||
{
|
||||
return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user