1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-02-09 02:26:27 +00:00

Adapt the existing SDP ULP code to the new ibcore APIs.

Requested by:	Sobczak, Bartosz <bartosz.sobczak@intel.com>
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2017-09-16 16:16:00 +00:00
parent b754c27916
commit c69c74b892
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bsd_rdma_4_9/; revision=323643
6 changed files with 21 additions and 26 deletions

View File

@ -52,7 +52,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/ib_cm.h>
#include <rdma/sdp_socket.h>
#include <rdma/ib_fmr_pool.h>
#ifdef SDP_DEBUG
@ -338,7 +337,6 @@ struct sdp_rx_ring {
struct sdp_device {
struct ib_pd *pd;
struct ib_mr *mr;
struct ib_fmr_pool *fmr_pool;
};

View File

@ -51,15 +51,14 @@ sdp_qp_event_handler(struct ib_event *event, void *data)
static int
sdp_get_max_dev_sge(struct ib_device *dev)
{
struct ib_device_attr attr;
struct ib_device_attr *device_attr;
static int max_sges = -1;
if (max_sges > 0)
goto out;
ib_query_device(dev, &attr);
max_sges = attr.max_sge;
device_attr = &dev->attrs;
max_sges = device_attr->max_sge;
out:
return max_sges;

View File

@ -133,7 +133,7 @@ sdp_pcbbind(struct sdp_sock *ssk, struct sockaddr *nam, struct ucred *cred)
/* rdma_bind_addr handles bind races. */
SDP_WUNLOCK(ssk);
if (ssk->id == NULL)
ssk->id = rdma_create_id(sdp_cma_handler, ssk, RDMA_PS_SDP, IB_QPT_RC);
ssk->id = rdma_create_id(&init_net, sdp_cma_handler, ssk, RDMA_PS_SDP, IB_QPT_RC);
if (ssk->id == NULL) {
SDP_WLOCK(ssk);
return (ENOMEM);
@ -1709,14 +1709,9 @@ int sdp_mod_usec = 0;
void
sdp_set_default_moderation(struct sdp_sock *ssk)
{
struct ib_cq_attr attr;
if (sdp_mod_count <= 0 || sdp_mod_usec <= 0)
return;
memset(&attr, 0, sizeof(attr));
attr.moderation.cq_count = sdp_mod_count;
attr.moderation.cq_period = sdp_mod_usec;
ib_modify_cq(ssk->rx_ring.cq, &attr, IB_CQ_MODERATION);
ib_modify_cq(ssk->rx_ring.cq, sdp_mod_count, sdp_mod_usec);
}
static void
@ -1726,12 +1721,9 @@ sdp_dev_add(struct ib_device *device)
struct sdp_device *sdp_dev;
sdp_dev = malloc(sizeof(*sdp_dev), M_SDP, M_WAITOK | M_ZERO);
sdp_dev->pd = ib_alloc_pd(device);
sdp_dev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(sdp_dev->pd))
goto out_pd;
sdp_dev->mr = ib_get_dma_mr(sdp_dev->pd, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(sdp_dev->mr))
goto out_mr;
memset(&param, 0, sizeof param);
param.max_pages_per_fmr = SDP_FMR_SIZE;
param.page_shift = PAGE_SHIFT;
@ -1746,15 +1738,13 @@ sdp_dev_add(struct ib_device *device)
return;
out_fmr:
ib_dereg_mr(sdp_dev->mr);
out_mr:
ib_dealloc_pd(sdp_dev->pd);
out_pd:
free(sdp_dev, M_SDP);
}
static void
sdp_dev_rem(struct ib_device *device)
sdp_dev_rem(struct ib_device *device, void *client_data)
{
struct sdp_device *sdp_dev;
struct sdp_sock *ssk;
@ -1778,7 +1768,6 @@ sdp_dev_rem(struct ib_device *device)
return;
ib_flush_fmr_pool(sdp_dev->fmr_pool);
ib_destroy_fmr_pool(sdp_dev->fmr_pool);
ib_dereg_mr(sdp_dev->mr);
ib_dealloc_pd(sdp_dev->pd);
free(sdp_dev, M_SDP);
}

View File

@ -31,7 +31,6 @@
*/
#include <linux/proc_fs.h>
#include <rdma/sdp_socket.h>
#include "sdp.h"
#ifdef CONFIG_PROC_FS

View File

@ -132,7 +132,7 @@ sdp_post_recv(struct sdp_sock *ssk)
rx_req->mapping[i] = addr;
sge->addr = addr;
sge->length = mb->m_len;
sge->lkey = ssk->sdp_dev->mr->lkey;
sge->lkey = ssk->sdp_dev->pd->local_dma_lkey;
}
rx_wr.next = NULL;
@ -698,6 +698,11 @@ sdp_rx_cq_event_handler(struct ib_event *event, void *data)
int
sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device)
{
struct ib_cq_init_attr rx_cq_attr = {
.cqe = SDP_RX_SIZE,
.comp_vector = 0,
.flags = 0,
};
struct ib_cq *rx_cq;
int rc = 0;
@ -710,7 +715,7 @@ sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device)
M_SDP, M_WAITOK);
rx_cq = ib_create_cq(device, sdp_rx_irq, sdp_rx_cq_event_handler,
ssk, SDP_RX_SIZE, 0);
ssk, &rx_cq_attr);
if (IS_ERR(rx_cq)) {
rc = PTR_ERR(rx_cq);
sdp_warn(ssk->socket, "Unable to allocate RX CQ: %d.\n", rc);

View File

@ -131,7 +131,7 @@ sdp_post_send(struct sdp_sock *ssk, struct mbuf *mb)
tx_req->mapping[i] = addr;
sge->addr = addr;
sge->length = mb->m_len;
sge->lkey = ssk->sdp_dev->mr->lkey;
sge->lkey = ssk->sdp_dev->pd->local_dma_lkey;
}
tx_wr.next = NULL;
tx_wr.wr_id = mseq | SDP_OP_SEND;
@ -418,6 +418,11 @@ sdp_tx_cq_event_handler(struct ib_event *event, void *data)
int
sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device)
{
struct ib_cq_init_attr tx_cq_attr = {
.cqe = SDP_TX_SIZE,
.comp_vector = 0,
.flags = 0,
};
struct ib_cq *tx_cq;
int rc = 0;
@ -431,7 +436,7 @@ sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device)
M_SDP, M_WAITOK);
tx_cq = ib_create_cq(device, sdp_tx_irq, sdp_tx_cq_event_handler,
ssk, SDP_TX_SIZE, 0);
ssk, &tx_cq_attr);
if (IS_ERR(tx_cq)) {
rc = PTR_ERR(tx_cq);
sdp_warn(ssk->socket, "Unable to allocate TX CQ: %d.\n", rc);