1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-15 10:17:20 +00:00

cxgbe/cxgbei: Retire all DDP related code from cxgbei and switch to

routines available in t4_tom to manage the iSCSI DDP page pod region.

This adds the ability to use multiple DDP page sizes to the iSCSI
driver, among other improvements.

Sponsored by:	Chelsio Communications
This commit is contained in:
Navdeep Parhar 2016-09-01 20:43:01 +00:00
parent 2d77e0ca06
commit 7cba15b16e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=305240
9 changed files with 257 additions and 1191 deletions

View File

@ -90,7 +90,6 @@ __FBSDID("$FreeBSD$");
#include "common/t4_regs.h" /* for PCIE_MEM_ACCESS */
#include "tom/t4_tom.h"
#include "cxgbei.h"
#include "cxgbei_ulp2_ddp.h"
static int worker_thread_count;
static struct cxgbei_worker_thread_softc *cwt_softc;
@ -101,376 +100,6 @@ struct icl_pdu *icl_cxgbei_new_pdu(int);
void icl_cxgbei_new_pdu_set_conn(struct icl_pdu *, struct icl_conn *);
void icl_cxgbei_conn_pdu_free(struct icl_conn *, struct icl_pdu *);
/*
* Direct Data Placement -
* Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
* final destination host-memory buffers based on the Initiator Task Tag (ITT)
* in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
* The host memory address is programmed into h/w in the format of pagepod
* entries.
* The location of the pagepod entry is encoded into ddp tag which is used as
* the base for ITT/TTT.
*/
/*
* functions to program the pagepod in h/w
*/
static void inline
ppod_set(struct pagepod *ppod,
struct cxgbei_ulp2_pagepod_hdr *hdr,
struct cxgbei_ulp2_gather_list *gl,
unsigned int pidx)
{
int i;
memcpy(ppod, hdr, sizeof(*hdr));
for (i = 0; i < (PPOD_PAGES + 1); i++, pidx++) {
ppod->addr[i] = pidx < gl->nelem ?
cpu_to_be64(gl->dma_sg[pidx].phys_addr) : 0ULL;
}
}
static void inline
ppod_clear(struct pagepod *ppod)
{
memset(ppod, 0, sizeof(*ppod));
}
static inline void
ulp_mem_io_set_hdr(struct adapter *sc, int tid, struct ulp_mem_io *req,
unsigned int wr_len, unsigned int dlen,
unsigned int pm_addr)
{
struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
V_ULP_MEMIO_ORDER(is_t4(sc)) |
V_T5_ULP_MEMIO_IMM(is_t5(sc)));
req->dlen = htonl(V_ULP_MEMIO_DATA_LEN(dlen >> 5));
req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)
| V_FW_WR_FLOWID(tid));
req->lock_addr = htonl(V_ULP_MEMIO_ADDR(pm_addr >> 5));
idata->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM));
idata->len = htonl(dlen);
}
#define ULPMEM_IDATA_MAX_NPPODS 1 /* 256/PPOD_SIZE */
#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
static int
ppod_write_idata(struct cxgbei_data *ci,
struct cxgbei_ulp2_pagepod_hdr *hdr,
unsigned int idx, unsigned int npods,
struct cxgbei_ulp2_gather_list *gl,
unsigned int gl_pidx, struct toepcb *toep)
{
u_int dlen = PPOD_SIZE * npods;
u_int pm_addr = idx * PPOD_SIZE + ci->llimit;
u_int wr_len = roundup(sizeof(struct ulp_mem_io) +
sizeof(struct ulptx_idata) + dlen, 16);
struct ulp_mem_io *req;
struct ulptx_idata *idata;
struct pagepod *ppod;
u_int i;
struct wrqe *wr;
struct adapter *sc = toep->vi->pi->adapter;
wr = alloc_wrqe(wr_len, toep->ctrlq);
if (wr == NULL) {
CXGBE_UNIMPLEMENTED("ppod_write_idata: alloc_wrqe failure");
return (ENOMEM);
}
req = wrtod(wr);
memset(req, 0, wr_len);
ulp_mem_io_set_hdr(sc, toep->tid, req, wr_len, dlen, pm_addr);
idata = (struct ulptx_idata *)(req + 1);
ppod = (struct pagepod *)(idata + 1);
for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES) {
if (!hdr) /* clear the pagepod */
ppod_clear(ppod);
else /* set the pagepod */
ppod_set(ppod, hdr, gl, gl_pidx);
}
t4_wrq_tx(sc, wr);
return 0;
}
int
t4_ddp_set_map(struct cxgbei_data *ci, void *iccp,
struct cxgbei_ulp2_pagepod_hdr *hdr, u_int idx, u_int npods,
struct cxgbei_ulp2_gather_list *gl, int reply)
{
struct icl_cxgbei_conn *icc = (struct icl_cxgbei_conn *)iccp;
struct toepcb *toep = icc->toep;
int err;
unsigned int pidx = 0, w_npods = 0, cnt;
/*
* on T4, if we use a mix of IMMD and DSGL with ULP_MEM_WRITE,
* the order would not be guaranteed, so we will stick with IMMD
*/
gl->tid = toep->tid;
gl->port_id = toep->vi->pi->port_id;
gl->egress_dev = (void *)toep->vi->ifp;
/* send via immediate data */
for (; w_npods < npods; idx += cnt, w_npods += cnt,
pidx += PPOD_PAGES) {
cnt = npods - w_npods;
if (cnt > ULPMEM_IDATA_MAX_NPPODS)
cnt = ULPMEM_IDATA_MAX_NPPODS;
err = ppod_write_idata(ci, hdr, idx, cnt, gl, pidx, toep);
if (err) {
printf("%s: ppod_write_idata failed\n", __func__);
break;
}
}
return err;
}
void
t4_ddp_clear_map(struct cxgbei_data *ci, struct cxgbei_ulp2_gather_list *gl,
u_int tag, u_int idx, u_int npods, struct icl_cxgbei_conn *icc)
{
struct toepcb *toep = icc->toep;
int err = -1;
u_int pidx = 0;
u_int w_npods = 0;
u_int cnt;
for (; w_npods < npods; idx += cnt, w_npods += cnt,
pidx += PPOD_PAGES) {
cnt = npods - w_npods;
if (cnt > ULPMEM_IDATA_MAX_NPPODS)
cnt = ULPMEM_IDATA_MAX_NPPODS;
err = ppod_write_idata(ci, NULL, idx, cnt, gl, 0, toep);
if (err)
break;
}
}
static int
cxgbei_map_sg(struct cxgbei_sgl *sgl, struct ccb_scsiio *csio)
{
unsigned int data_len = csio->dxfer_len;
unsigned int sgoffset = (uint64_t)csio->data_ptr & PAGE_MASK;
unsigned int nsge;
unsigned char *sgaddr = csio->data_ptr;
unsigned int len = 0;
nsge = (csio->dxfer_len + sgoffset + PAGE_SIZE - 1) >> PAGE_SHIFT;
sgl->sg_addr = sgaddr;
sgl->sg_offset = sgoffset;
if (data_len < (PAGE_SIZE - sgoffset))
len = data_len;
else
len = PAGE_SIZE - sgoffset;
sgl->sg_length = len;
data_len -= len;
sgaddr += len;
sgl = sgl+1;
while (data_len > 0) {
sgl->sg_addr = sgaddr;
len = (data_len < PAGE_SIZE)? data_len: PAGE_SIZE;
sgl->sg_length = len;
sgaddr += len;
data_len -= len;
sgl = sgl + 1;
}
return nsge;
}
static int
cxgbei_map_sg_tgt(struct cxgbei_sgl *sgl, union ctl_io *io)
{
unsigned int data_len, sgoffset, nsge;
unsigned char *sgaddr;
unsigned int len = 0, index = 0, ctl_sg_count, i;
struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
if (io->scsiio.kern_sg_entries > 0) {
ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
ctl_sg_count = io->scsiio.kern_sg_entries;
} else {
ctl_sglist = &ctl_sg_entry;
ctl_sglist->addr = io->scsiio.kern_data_ptr;
ctl_sglist->len = io->scsiio.kern_data_len;
ctl_sg_count = 1;
}
sgaddr = sgl->sg_addr = ctl_sglist[index].addr;
sgoffset = sgl->sg_offset = (uint64_t)sgl->sg_addr & PAGE_MASK;
data_len = ctl_sglist[index].len;
if (data_len < (PAGE_SIZE - sgoffset))
len = data_len;
else
len = PAGE_SIZE - sgoffset;
sgl->sg_length = len;
data_len -= len;
sgaddr += len;
sgl = sgl+1;
len = 0;
for (i = 0; i< ctl_sg_count; i++)
len += ctl_sglist[i].len;
nsge = (len + sgoffset + PAGE_SIZE -1) >> PAGE_SHIFT;
while (data_len > 0) {
sgl->sg_addr = sgaddr;
len = (data_len < PAGE_SIZE)? data_len: PAGE_SIZE;
sgl->sg_length = len;
sgaddr += len;
data_len -= len;
sgl = sgl + 1;
if (data_len == 0) {
if (index == ctl_sg_count - 1)
break;
index++;
sgaddr = ctl_sglist[index].addr;
data_len = ctl_sglist[index].len;
}
}
return nsge;
}
static int
t4_sk_ddp_tag_reserve(struct cxgbei_data *ci, struct icl_cxgbei_conn *icc,
u_int xferlen, struct cxgbei_sgl *sgl, u_int sgcnt, u_int *ddp_tag)
{
struct cxgbei_ulp2_gather_list *gl;
int err = -EINVAL;
struct toepcb *toep = icc->toep;
gl = cxgbei_ulp2_ddp_make_gl_from_iscsi_sgvec(xferlen, sgl, sgcnt, ci, 0);
if (gl) {
err = cxgbei_ulp2_ddp_tag_reserve(ci, icc, toep->tid,
&ci->tag_format, ddp_tag, gl, 0, 0);
if (err) {
cxgbei_ulp2_ddp_release_gl(ci, gl);
}
}
return err;
}
static unsigned int
cxgbei_task_reserve_itt(struct icl_conn *ic, void **prv,
struct ccb_scsiio *scmd, unsigned int *itt)
{
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
int xferlen = scmd->dxfer_len;
struct cxgbei_task_data *tdata = NULL;
struct cxgbei_sgl *sge = NULL;
struct toepcb *toep = icc->toep;
struct adapter *sc = td_adapter(toep->td);
struct cxgbei_data *ci = sc->iscsi_ulp_softc;
int err = -1;
MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
tdata = (struct cxgbei_task_data *)*prv;
if (xferlen == 0 || tdata == NULL)
goto out;
if (xferlen < DDP_THRESHOLD)
goto out;
if ((scmd->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
tdata->nsge = cxgbei_map_sg(tdata->sgl, scmd);
if (tdata->nsge == 0) {
CTR1(KTR_CXGBE, "%s: map_sg failed", __func__);
return 0;
}
sge = tdata->sgl;
tdata->sc_ddp_tag = *itt;
CTR3(KTR_CXGBE, "%s: *itt:0x%x sc_ddp_tag:0x%x",
__func__, *itt, tdata->sc_ddp_tag);
if (cxgbei_ulp2_sw_tag_usable(&ci->tag_format,
tdata->sc_ddp_tag)) {
err = t4_sk_ddp_tag_reserve(ci, icc, scmd->dxfer_len,
sge, tdata->nsge, &tdata->sc_ddp_tag);
} else {
CTR3(KTR_CXGBE,
"%s: itt:0x%x sc_ddp_tag:0x%x not usable",
__func__, *itt, tdata->sc_ddp_tag);
}
}
out:
if (err < 0)
tdata->sc_ddp_tag =
cxgbei_ulp2_set_non_ddp_tag(&ci->tag_format, *itt);
return tdata->sc_ddp_tag;
}
static unsigned int
cxgbei_task_reserve_ttt(struct icl_conn *ic, void **prv, union ctl_io *io,
unsigned int *ttt)
{
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
struct toepcb *toep = icc->toep;
struct adapter *sc = td_adapter(toep->td);
struct cxgbei_data *ci = sc->iscsi_ulp_softc;
struct cxgbei_task_data *tdata = NULL;
int xferlen, err = -1;
struct cxgbei_sgl *sge = NULL;
MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
xferlen = (io->scsiio.kern_data_len - io->scsiio.ext_data_filled);
tdata = (struct cxgbei_task_data *)*prv;
if ((xferlen == 0) || (tdata == NULL))
goto out;
if (xferlen < DDP_THRESHOLD)
goto out;
tdata->nsge = cxgbei_map_sg_tgt(tdata->sgl, io);
if (tdata->nsge == 0) {
CTR1(KTR_CXGBE, "%s: map_sg failed", __func__);
return 0;
}
sge = tdata->sgl;
tdata->sc_ddp_tag = *ttt;
if (cxgbei_ulp2_sw_tag_usable(&ci->tag_format, tdata->sc_ddp_tag)) {
err = t4_sk_ddp_tag_reserve(ci, icc, xferlen, sge,
tdata->nsge, &tdata->sc_ddp_tag);
} else {
CTR2(KTR_CXGBE, "%s: sc_ddp_tag:0x%x not usable",
__func__, tdata->sc_ddp_tag);
}
out:
if (err < 0)
tdata->sc_ddp_tag =
cxgbei_ulp2_set_non_ddp_tag(&ci->tag_format, *ttt);
return tdata->sc_ddp_tag;
}
static int
t4_sk_ddp_tag_release(struct icl_cxgbei_conn *icc, unsigned int ddp_tag)
{
struct toepcb *toep = icc->toep;
struct adapter *sc = td_adapter(toep->td);
struct cxgbei_data *ci = sc->iscsi_ulp_softc;
cxgbei_ulp2_ddp_tag_release(ci, ddp_tag, icc);
return (0);
}
static void
read_pdu_limits(struct adapter *sc, uint32_t *max_tx_pdu_len,
uint32_t *max_rx_pdu_len)
@ -504,58 +133,43 @@ read_pdu_limits(struct adapter *sc, uint32_t *max_tx_pdu_len,
static int
cxgbei_init(struct adapter *sc, struct cxgbei_data *ci)
{
int nppods, bits, rc;
static const u_int pgsz_order[] = {0, 1, 2, 3};
struct ppod_region *pr;
uint32_t r;
int rc;
MPASS(sc->vres.iscsi.size > 0);
MPASS(ci != NULL);
ci->llimit = sc->vres.iscsi.start;
ci->ulimit = sc->vres.iscsi.start + sc->vres.iscsi.size - 1;
read_pdu_limits(sc, &ci->max_tx_pdu_len, &ci->max_rx_pdu_len);
nppods = sc->vres.iscsi.size >> IPPOD_SIZE_SHIFT;
if (nppods <= 1024)
return (ENXIO);
ci->ddp_threshold = 2048;
pr = &ci->pr;
bits = fls(nppods);
if (bits > IPPOD_IDX_MAX_SIZE)
bits = IPPOD_IDX_MAX_SIZE;
nppods = (1 << (bits - 1)) - 1;
rc = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR, NULL, NULL, UINT32_MAX , 8, BUS_SPACE_MAXSIZE,
BUS_DMA_ALLOCNOW, NULL, NULL, &ci->ulp_ddp_tag);
r = t4_read_reg(sc, A_ULP_RX_ISCSI_PSZ);
rc = t4_init_ppod_region(pr, &sc->vres.iscsi, r, "iSCSI page pods");
if (rc != 0) {
device_printf(sc->dev, "%s: failed to create DMA tag: %u.\n",
device_printf(sc->dev,
"%s: failed to initialize the iSCSI page pod region: %u.\n",
__func__, rc);
return (rc);
}
ci->colors = malloc(nppods * sizeof(char), M_CXGBE, M_NOWAIT | M_ZERO);
ci->gl_map = malloc(nppods * sizeof(struct cxgbei_ulp2_gather_list *),
M_CXGBE, M_NOWAIT | M_ZERO);
if (ci->colors == NULL || ci->gl_map == NULL) {
bus_dma_tag_destroy(ci->ulp_ddp_tag);
free(ci->colors, M_CXGBE);
free(ci->gl_map, M_CXGBE);
return (ENOMEM);
r = t4_read_reg(sc, A_ULP_RX_ISCSI_TAGMASK);
r &= V_ISCSITAGMASK(M_ISCSITAGMASK);
if (r != pr->pr_tag_mask) {
/*
* Recent firmwares are supposed to set up the the iSCSI tagmask
* but we'll do it ourselves it the computed value doesn't match
* what's in the register.
*/
device_printf(sc->dev,
"tagmask 0x%08x does not match computed mask 0x%08x.\n", r,
pr->pr_tag_mask);
t4_set_reg_field(sc, A_ULP_RX_ISCSI_TAGMASK,
V_ISCSITAGMASK(M_ISCSITAGMASK), pr->pr_tag_mask);
}
mtx_init(&ci->map_lock, "ddp lock", NULL, MTX_DEF | MTX_DUPOK);
ci->nppods = nppods;
ci->idx_last = nppods;
ci->idx_bits = bits;
ci->idx_mask = (1 << bits) - 1;
ci->rsvd_tag_mask = (1 << (bits + IPPOD_IDX_SHIFT)) - 1;
ci->tag_format.sw_bits = bits;
ci->tag_format.rsvd_bits = bits;
ci->tag_format.rsvd_shift = IPPOD_IDX_SHIFT;
ci->tag_format.rsvd_mask = ci->idx_mask;
t4_iscsi_init(sc, ci->idx_mask << IPPOD_IDX_SHIFT, pgsz_order);
return (rc);
return (0);
}
static int
@ -772,47 +386,6 @@ do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (0);
}
/* initiator */
void
cxgbei_conn_task_reserve_itt(void *conn, void **prv,
void *scmd, unsigned int *itt)
{
unsigned int tag;
tag = cxgbei_task_reserve_itt(conn, prv, scmd, itt);
if (tag)
*itt = htonl(tag);
return;
}
/* target */
void
cxgbei_conn_transfer_reserve_ttt(void *conn, void **prv,
void *scmd, unsigned int *ttt)
{
unsigned int tag;
tag = cxgbei_task_reserve_ttt(conn, prv, scmd, ttt);
if (tag)
*ttt = htonl(tag);
return;
}
void
cxgbei_cleanup_task(void *conn, void *ofld_priv)
{
struct icl_conn *ic = (struct icl_conn *)conn;
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
struct cxgbei_task_data *tdata = ofld_priv;
struct adapter *sc = icc->sc;
struct cxgbei_data *ci = sc->iscsi_ulp_softc;
MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
MPASS(tdata != NULL);
if (cxgbei_ulp2_is_ddp_tag(&ci->tag_format, tdata->sc_ddp_tag))
t4_sk_ddp_tag_release(icc, tdata->sc_ddp_tag);
memset(tdata, 0, sizeof(*tdata));
}
static int
cxgbei_activate(struct adapter *sc)
{
@ -834,7 +407,7 @@ cxgbei_activate(struct adapter *sc)
}
/* per-adapter softc for iSCSI */
ci = malloc(sizeof(*ci), M_CXGBE, M_ZERO | M_NOWAIT);
ci = malloc(sizeof(*ci), M_CXGBE, M_ZERO | M_WAITOK);
if (ci == NULL)
return (ENOMEM);
@ -852,12 +425,13 @@ cxgbei_activate(struct adapter *sc)
static int
cxgbei_deactivate(struct adapter *sc)
{
struct cxgbei_data *ci = sc->iscsi_ulp_softc;
ASSERT_SYNCHRONIZED_OP(sc);
if (sc->iscsi_ulp_softc != NULL) {
cxgbei_ddp_cleanup(sc->iscsi_ulp_softc);
free(sc->iscsi_ulp_softc, M_CXGBE);
if (ci != NULL) {
t4_free_ppod_region(&ci->pr);
free(ci, M_CXGBE);
sc->iscsi_ulp_softc = NULL;
}

View File

@ -105,69 +105,18 @@ ip_to_icp(struct icl_pdu *ip)
return (__containerof(ip, struct icl_cxgbei_pdu, ip));
}
struct cxgbei_sgl {
int sg_flag;
void *sg_addr;
void *sg_dma_addr;
size_t sg_offset;
size_t sg_length;
};
#define cxgbei_scsi_for_each_sg(_sgl, _sgel, _n, _i) \
for (_i = 0, _sgel = (cxgbei_sgl*) (_sgl); _i < _n; _i++, \
_sgel++)
#define sg_dma_addr(_sgel) _sgel->sg_dma_addr
#define sg_virt(_sgel) _sgel->sg_addr
#define sg_len(_sgel) _sgel->sg_length
#define sg_off(_sgel) _sgel->sg_offset
#define sg_next(_sgel) _sgel + 1
/* private data for each scsi task */
struct cxgbei_task_data {
struct cxgbei_sgl sgl[256];
u_int nsge;
u_int sc_ddp_tag;
};
struct cxgbei_ulp2_tag_format {
u_char sw_bits;
u_char rsvd_bits;
u_char rsvd_shift;
u_char filler[1];
uint32_t rsvd_mask;
};
struct cxgbei_data {
u_int llimit;
u_int ulimit;
u_int nppods;
u_int idx_last;
u_char idx_bits;
uint32_t idx_mask;
uint32_t rsvd_tag_mask;
u_int max_tx_pdu_len;
u_int max_rx_pdu_len;
struct mtx map_lock;
bus_dma_tag_t ulp_ddp_tag;
unsigned char *colors;
struct cxgbei_ulp2_gather_list **gl_map;
u_int ddp_threshold;
struct ppod_region pr;
struct cxgbei_ulp2_tag_format tag_format;
};
void cxgbei_conn_task_reserve_itt(void *, void **, void *, unsigned int *);
void cxgbei_conn_transfer_reserve_ttt(void *, void **, void *, unsigned int *);
void cxgbei_cleanup_task(void *, void *);
/* cxgbei.c */
u_int cxgbei_select_worker_thread(struct icl_cxgbei_conn *);
struct cxgbei_ulp2_pagepod_hdr;
int t4_ddp_set_map(struct cxgbei_data *, void *,
struct cxgbei_ulp2_pagepod_hdr *, u_int, u_int,
struct cxgbei_ulp2_gather_list *, int);
void t4_ddp_clear_map(struct cxgbei_data *, struct cxgbei_ulp2_gather_list *,
u_int, u_int, u_int, struct icl_cxgbei_conn *);
/* icl_cxgbei.c */
int icl_cxgbei_mod_load(void);
int icl_cxgbei_mod_unload(void);

View File

@ -1,417 +0,0 @@
/*-
* Copyright (c) 2012 Chelsio Communications, Inc.
* All rights reserved.
*
* Chelsio T5xx iSCSI driver
* cxgbei_ulp2_ddp.c: Chelsio iSCSI DDP Manager.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
#ifdef TCP_OFFLOAD
#include <sys/types.h>
#include <sys/module.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/mbuf.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/toecore.h>
#include <netinet/tcp_var.h>
#include <netinet/tcp_fsm.h>
#include <dev/iscsi/icl.h>
#include <dev/iscsi/iscsi_proto.h>
#include "common/common.h"
#include "common/t4_msg.h"
#include "common/t4_regs.h" /* for PCIE_MEM_ACCESS */
#include "tom/t4_tom.h"
#include "cxgbei.h"
#include "cxgbei_ulp2_ddp.h"
/*
* Map a single buffer address.
*/
static void
ulp2_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *ba = arg;
if (error)
return;
KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
*ba = segs->ds_addr;
}
/*
* iSCSI Direct Data Placement
*
* T4/5 ulp2 h/w can directly place the iSCSI Data-In or Data-Out PDU's
* payload into pre-posted final destination host-memory buffers based on the
* Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out
* PDUs.
*
* The host memory address is programmed into h/w in the format of pagepod
* entries.
* The location of the pagepod entry is encoded into ddp tag which is used or
* is the base for ITT/TTT.
*/
static inline int
ddp_find_unused_entries(struct cxgbei_data *ci, u_int start, u_int max,
u_int count, u_int *idx, struct cxgbei_ulp2_gather_list *gl)
{
unsigned int i, j, k;
/* not enough entries */
if (max - start < count)
return (EBUSY);
max -= count;
mtx_lock(&ci->map_lock);
for (i = start; i < max;) {
for (j = 0, k = i; j < count; j++, k++) {
if (ci->gl_map[k])
break;
}
if (j == count) {
for (j = 0, k = i; j < count; j++, k++)
ci->gl_map[k] = gl;
mtx_unlock(&ci->map_lock);
*idx = i;
return (0);
}
i += j + 1;
}
mtx_unlock(&ci->map_lock);
return (EBUSY);
}
static inline void
ddp_unmark_entries(struct cxgbei_data *ci, u_int start, u_int count)
{
mtx_lock(&ci->map_lock);
memset(&ci->gl_map[start], 0,
count * sizeof(struct cxgbei_ulp2_gather_list *));
mtx_unlock(&ci->map_lock);
}
static inline void
ddp_gl_unmap(struct cxgbei_data *ci, struct cxgbei_ulp2_gather_list *gl)
{
int i;
if (!gl->pages[0])
return;
for (i = 0; i < gl->nelem; i++) {
bus_dmamap_unload(ci->ulp_ddp_tag, gl->dma_sg[i].bus_map);
bus_dmamap_destroy(ci->ulp_ddp_tag, gl->dma_sg[i].bus_map);
}
}
static inline int
ddp_gl_map(struct cxgbei_data *ci, struct cxgbei_ulp2_gather_list *gl)
{
int i, rc;
bus_addr_t pa;
MPASS(ci != NULL);
mtx_lock(&ci->map_lock);
for (i = 0; i < gl->nelem; i++) {
rc = bus_dmamap_create(ci->ulp_ddp_tag, 0,
&gl->dma_sg[i].bus_map);
if (rc != 0)
goto unmap;
rc = bus_dmamap_load(ci->ulp_ddp_tag, gl->dma_sg[i].bus_map,
gl->pages[i], PAGE_SIZE, ulp2_dma_map_addr,
&pa, BUS_DMA_NOWAIT);
if (rc != 0)
goto unmap;
gl->dma_sg[i].phys_addr = pa;
}
mtx_unlock(&ci->map_lock);
return (0);
unmap:
if (i) {
u_int nelem = gl->nelem;
gl->nelem = i;
ddp_gl_unmap(ci, gl);
gl->nelem = nelem;
}
return (ENOMEM);
}
/**
* cxgbei_ulp2_ddp_make_gl_from_iscsi_sgvec - build ddp page buffer list
* @xferlen: total buffer length
* @sgl: page buffer scatter-gather list (struct cxgbei_sgl)
* @sgcnt: # of page buffers
* @gfp: allocation mode
*
* construct a ddp page buffer list from the scsi scattergather list.
* coalesce buffers as much as possible, and obtain dma addresses for
* each page.
*
* Return the cxgbei_ulp2_gather_list constructed from the page buffers if the
* memory can be used for ddp. Return NULL otherwise.
*/
struct cxgbei_ulp2_gather_list *
cxgbei_ulp2_ddp_make_gl_from_iscsi_sgvec(u_int xferlen, struct cxgbei_sgl *sgl,
u_int sgcnt, struct cxgbei_data *ci, int gfp)
{
struct cxgbei_ulp2_gather_list *gl;
struct cxgbei_sgl *sg = sgl;
void *sgpage = (void *)((u64)sg->sg_addr & (~PAGE_MASK));
unsigned int sglen = sg->sg_length;
unsigned int sgoffset = (u64)sg->sg_addr & PAGE_MASK;
unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
PAGE_SHIFT;
int i = 1, j = 0;
if (xferlen <= DDP_THRESHOLD) {
CTR2(KTR_CXGBE, "xfer %u < threshold %u, no ddp.",
xferlen, DDP_THRESHOLD);
return NULL;
}
gl = malloc(sizeof(struct cxgbei_ulp2_gather_list) +
npages * (sizeof(struct dma_segments) + sizeof(void *)),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (gl == NULL)
return (NULL);
gl->pages = (void **)&gl->dma_sg[npages];
gl->length = xferlen;
gl->offset = sgoffset;
gl->pages[0] = sgpage;
CTR6(KTR_CXGBE,
"%s: xferlen:0x%x len:0x%x off:0x%x sg_addr:%p npages:%d",
__func__, xferlen, gl->length, gl->offset, sg->sg_addr, npages);
for (i = 1, sg = sg_next(sg); i < sgcnt; i++, sg = sg_next(sg)) {
void *page = sg->sg_addr;
if (sgpage == page && sg->sg_offset == sgoffset + sglen)
sglen += sg->sg_length;
else {
/* make sure the sgl is fit for ddp:
* each has the same page size, and
* all of the middle pages are used completely
*/
if ((j && sgoffset) ||
((i != sgcnt - 1) &&
((sglen + sgoffset) & ~CXGBEI_PAGE_MASK))){
goto error_out;
}
j++;
if (j == gl->nelem || sg->sg_offset) {
goto error_out;
}
gl->pages[j] = page;
sglen = sg->sg_length;
sgoffset = sg->sg_offset;
sgpage = page;
}
}
gl->nelem = ++j;
if (ddp_gl_map(ci, gl) < 0)
goto error_out;
return gl;
error_out:
free(gl, M_DEVBUF);
return NULL;
}
/**
* cxgbei_ulp2_ddp_release_gl - release a page buffer list
* @gl: a ddp page buffer list
* @pdev: pci_dev used for pci_unmap
* free a ddp page buffer list resulted from cxgbei_ulp2_ddp_make_gl().
*/
void
cxgbei_ulp2_ddp_release_gl(struct cxgbei_data *ci,
struct cxgbei_ulp2_gather_list *gl)
{
ddp_gl_unmap(ci, gl);
free(gl, M_DEVBUF);
}
/**
* cxgbei_ulp2_ddp_tag_reserve - set up ddp for a data transfer
* @ci: adapter's ddp info
* @tid: connection id
* @tformat: tag format
* @tagp: contains s/w tag initially, will be updated with ddp/hw tag
* @gl: the page momory list
* @gfp: allocation mode
*
* ddp setup for a given page buffer list and construct the ddp tag.
* return 0 if success, < 0 otherwise.
*/
int
cxgbei_ulp2_ddp_tag_reserve(struct cxgbei_data *ci, void *icc, u_int tid,
struct cxgbei_ulp2_tag_format *tformat, u32 *tagp,
struct cxgbei_ulp2_gather_list *gl, int gfp, int reply)
{
struct cxgbei_ulp2_pagepod_hdr hdr;
u_int npods, idx;
int rc;
u32 sw_tag = *tagp;
u32 tag;
MPASS(ci != NULL);
if (!gl || !gl->nelem || gl->length < DDP_THRESHOLD)
return (EINVAL);
npods = (gl->nelem + IPPOD_PAGES_MAX - 1) >> IPPOD_PAGES_SHIFT;
if (ci->idx_last == ci->nppods)
rc = ddp_find_unused_entries(ci, 0, ci->nppods, npods, &idx,
gl);
else {
rc = ddp_find_unused_entries(ci, ci->idx_last + 1,
ci->nppods, npods, &idx, gl);
if (rc && ci->idx_last >= npods) {
rc = ddp_find_unused_entries(ci, 0,
min(ci->idx_last + npods, ci->nppods),
npods, &idx, gl);
}
}
if (rc) {
CTR3(KTR_CXGBE, "xferlen %u, gl %u, npods %u NO DDP.",
gl->length, gl->nelem, npods);
return (rc);
}
tag = cxgbei_ulp2_ddp_tag_base(idx, ci->colors, tformat, sw_tag);
CTR4(KTR_CXGBE, "%s: sw_tag:0x%x idx:0x%x tag:0x%x",
__func__, sw_tag, idx, tag);
hdr.rsvd = 0;
hdr.vld_tid = htonl(F_IPPOD_VALID | V_IPPOD_TID(tid));
hdr.pgsz_tag_clr = htonl(tag & ci->rsvd_tag_mask);
hdr.maxoffset = htonl(gl->length);
hdr.pgoffset = htonl(gl->offset);
rc = t4_ddp_set_map(ci, icc, &hdr, idx, npods, gl, reply);
if (rc < 0)
goto unmark_entries;
ci->idx_last = idx;
*tagp = tag;
return (0);
unmark_entries:
ddp_unmark_entries(ci, idx, npods);
return (rc);
}
/**
* cxgbei_ulp2_ddp_tag_release - release a ddp tag
* @ci: adapter's ddp info
* @tag: ddp tag
* ddp cleanup for a given ddp tag and release all the resources held
*/
void
cxgbei_ulp2_ddp_tag_release(struct cxgbei_data *ci, uint32_t tag,
struct icl_cxgbei_conn *icc)
{
uint32_t idx;
MPASS(ci != NULL);
MPASS(icc != NULL);
idx = (tag >> IPPOD_IDX_SHIFT) & ci->idx_mask;
CTR3(KTR_CXGBE, "tag:0x%x idx:0x%x nppods:0x%x",
tag, idx, ci->nppods);
if (idx < ci->nppods) {
struct cxgbei_ulp2_gather_list *gl = ci->gl_map[idx];
unsigned int npods;
if (!gl || !gl->nelem) {
CTR4(KTR_CXGBE,
"release 0x%x, idx 0x%x, gl 0x%p, %u.",
tag, idx, gl, gl ? gl->nelem : 0);
return;
}
npods = (gl->nelem + IPPOD_PAGES_MAX - 1) >> IPPOD_PAGES_SHIFT;
CTR3(KTR_CXGBE, "ddp tag 0x%x, release idx 0x%x, npods %u.",
tag, idx, npods);
t4_ddp_clear_map(ci, gl, tag, idx, npods, icc);
ddp_unmark_entries(ci, idx, npods);
cxgbei_ulp2_ddp_release_gl(ci, gl);
} else
CTR3(KTR_CXGBE, "ddp tag 0x%x, idx 0x%x > max 0x%x.",
tag, idx, ci->nppods);
}
/**
* cxgbei_ddp_cleanup - release the adapter's ddp resources
*/
void
cxgbei_ddp_cleanup(struct cxgbei_data *ci)
{
int i = 0;
while (i < ci->nppods) {
struct cxgbei_ulp2_gather_list *gl = ci->gl_map[i];
if (gl) {
int npods = (gl->nelem + IPPOD_PAGES_MAX - 1)
>> IPPOD_PAGES_SHIFT;
free(gl, M_DEVBUF);
i += npods;
} else
i++;
}
free(ci->colors, M_CXGBE);
free(ci->gl_map, M_CXGBE);
}
#endif

View File

@ -1,217 +0,0 @@
/*-
* Copyright (c) 2012 Chelsio Communications, Inc.
* All rights reserved.
*
* Chelsio T5xx iSCSI driver
* cxgbei_ulp2_ddp.c: Chelsio iSCSI DDP Manager.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __CXGBEI_ULP2_DDP_H__
#define __CXGBEI_ULP2_DDP_H__
#define CXGBEI_PAGE_MASK (~(PAGE_SIZE-1))
#define DDP_THRESHOLD 2048
/*
* cxgbei ddp tag are 32 bits, it consists of reserved bits used by h/w and
* non-reserved bits that can be used by the iscsi s/w.
* The reserved bits are identified by the rsvd_bits and rsvd_shift fields
* in struct cxgbei_ulp2_tag_format.
*
* The upper most reserved bit can be used to check if a tag is ddp tag or not:
* if the bit is 0, the tag is a valid ddp tag
*/
/*
* cxgbei_ulp2_is_ddp_tag - check if a given tag is a hw/ddp tag
* @tformat: tag format information
* @tag: tag to be checked
*
* return true if the tag is a ddp tag, false otherwise.
*/
static inline int
cxgbei_ulp2_is_ddp_tag(struct cxgbei_ulp2_tag_format *tformat, uint32_t tag)
{
return (!(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))));
}
/*
* cxgbei_ulp2_sw_tag_usable - check if s/w tag has enough bits left for hw bits
* @tformat: tag format information
* @sw_tag: s/w tag to be checked
*
* return true if the tag can be used for hw ddp tag, false otherwise.
*/
static inline int
cxgbei_ulp2_sw_tag_usable(struct cxgbei_ulp2_tag_format *tformat,
uint32_t sw_tag)
{
return (1); /* XXXNP: huh? */
sw_tag >>= (32 - tformat->rsvd_bits + tformat->rsvd_shift);
return !sw_tag;
}
/*
* cxgbei_ulp2_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag
* @tformat: tag format information
* @sw_tag: s/w tag to be checked
*
* insert 1 at the upper most reserved bit to mark it as an invalid ddp tag.
*/
static inline uint32_t
cxgbei_ulp2_set_non_ddp_tag(struct cxgbei_ulp2_tag_format *tformat,
uint32_t sw_tag)
{
uint32_t rsvd_bits = tformat->rsvd_bits + tformat->rsvd_shift;
if (sw_tag) {
u32 v1 = sw_tag & ((1 << (rsvd_bits - 1)) - 1);
u32 v2 = (sw_tag >> (rsvd_bits - 1)) << rsvd_bits;
return v2 | (1 << (rsvd_bits - 1)) | v1;
}
return sw_tag | (1 << (rsvd_bits - 1)) ;
}
struct dma_segments {
bus_dmamap_t bus_map;
bus_addr_t phys_addr;
};
/*
* struct cxgbei_ulp2_gather_list - cxgbei direct data placement memory
*
* @tag: ddp tag
* @length: total data buffer length
* @offset: initial offset to the 1st page
* @nelem: # of pages
* @pages: page pointers
* @phys_addr: physical address
*/
struct cxgbei_ulp2_gather_list {
uint32_t tag;
uint32_t tid;
uint32_t port_id;
void *egress_dev;
unsigned int length;
unsigned int offset;
unsigned int nelem;
bus_size_t mapsize;
bus_dmamap_t bus_map;
bus_dma_segment_t *segments;
void **pages;
struct dma_segments dma_sg[0];
};
#define IPPOD_SIZE sizeof(struct cxgbei_ulp2_pagepod) /* 64 */
#define IPPOD_SIZE_SHIFT 6
#define IPPOD_COLOR_SHIFT 0
#define IPPOD_COLOR_SIZE 6
#define IPPOD_COLOR_MASK ((1 << IPPOD_COLOR_SIZE) - 1)
#define IPPOD_IDX_SHIFT IPPOD_COLOR_SIZE
#define IPPOD_IDX_MAX_SIZE 24
#define S_IPPOD_TID 0
#define M_IPPOD_TID 0xFFFFFF
#define V_IPPOD_TID(x) ((x) << S_IPPOD_TID)
#define S_IPPOD_VALID 24
#define V_IPPOD_VALID(x) ((x) << S_IPPOD_VALID)
#define F_IPPOD_VALID V_IPPOD_VALID(1U)
#define S_IPPOD_COLOR 0
#define M_IPPOD_COLOR 0x3F
#define V_IPPOD_COLOR(x) ((x) << S_IPPOD_COLOR)
#define S_IPPOD_TAG 6
#define M_IPPOD_TAG 0xFFFFFF
#define V_IPPOD_TAG(x) ((x) << S_IPPOD_TAG)
#define S_IPPOD_PGSZ 30
#define M_IPPOD_PGSZ 0x3
#define V_IPPOD_PGSZ(x) ((x) << S_IPPOD_PGSZ)
static inline uint32_t
cxgbei_ulp2_ddp_tag_base(u_int idx, u_char *colors,
struct cxgbei_ulp2_tag_format *tformat, uint32_t sw_tag)
{
if (__predict_false(++colors[idx] == 1 << IPPOD_IDX_SHIFT))
colors[idx] = 0;
sw_tag <<= tformat->rsvd_bits + tformat->rsvd_shift;
return (sw_tag | idx << IPPOD_IDX_SHIFT | colors[idx]);
}
#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */
/*
* align pdu size to multiple of 512 for better performance
*/
#define cxgbei_align_pdu_size(n) do { n = (n) & (~511); } while (0)
#define ULP2_MAX_PKT_SIZE 16224
#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
#define IPPOD_PAGES_MAX 4
#define IPPOD_PAGES_SHIFT 2 /* 4 pages per pod */
/*
* struct pagepod_hdr, pagepod - pagepod format
*/
struct cxgbei_ulp2_pagepod_hdr {
uint32_t vld_tid;
uint32_t pgsz_tag_clr;
uint32_t maxoffset;
uint32_t pgoffset;
uint64_t rsvd;
};
struct cxgbei_ulp2_pagepod {
struct cxgbei_ulp2_pagepod_hdr hdr;
uint64_t addr[IPPOD_PAGES_MAX + 1];
};
int cxgbei_ulp2_ddp_tag_reserve(struct cxgbei_data *, void *, unsigned int,
struct cxgbei_ulp2_tag_format *, uint32_t *,
struct cxgbei_ulp2_gather_list *, int , int );
void cxgbei_ulp2_ddp_tag_release(struct cxgbei_data *, uint32_t,
struct icl_cxgbei_conn *);
struct cxgbei_ulp2_gather_list *cxgbei_ulp2_ddp_make_gl_from_iscsi_sgvec(u_int,
struct cxgbei_sgl *, u_int, struct cxgbei_data *, int);
void cxgbei_ulp2_ddp_release_gl(struct cxgbei_data *,
struct cxgbei_ulp2_gather_list *);
int cxgbei_ulp2_ddp_find_page_index(u_long);
int cxgbei_ulp2_adapter_ddp_info(struct cxgbei_data *,
struct cxgbei_ulp2_tag_format *);
void cxgbei_ddp_cleanup(struct cxgbei_data *);
#endif

View File

@ -60,6 +60,8 @@ __FBSDID("$FreeBSD$");
#include <sys/uio.h>
#include <machine/bus.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/tcp.h>
@ -70,6 +72,28 @@ __FBSDID("$FreeBSD$");
#include <dev/iscsi/iscsi_proto.h>
#include <icl_conn_if.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_da.h>
#include <cam/ctl/ctl_io.h>
#include <cam/ctl/ctl.h>
#include <cam/ctl/ctl_backend.h>
#include <cam/ctl/ctl_error.h>
#include <cam/ctl/ctl_frontend.h>
#include <cam/ctl/ctl_debug.h>
#include <cam/ctl/ctl_ha.h>
#include <cam/ctl/ctl_ioctl.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_xpt.h>
#include <cam/cam_debug.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_sim.h>
#include <cam/cam_xpt_periph.h>
#include <cam/cam_periph.h>
#include <cam/cam_compat.h>
#include <cam/scsi/scsi_message.h>
#include "common/common.h"
#include "common/t4_tcb.h"
#include "tom/t4_tom.h"
@ -90,8 +114,7 @@ static int recvspace = 1048576;
SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, recvspace, CTLFLAG_RWTUN,
&recvspace, 0, "Default receive socket buffer size");
static uma_zone_t icl_transfer_zone;
static uma_zone_t prsv_zone;
static volatile u_int icl_cxgbei_ncons;
#define ICL_CONN_LOCK(X) mtx_lock(X->ic_lock)
@ -242,12 +265,6 @@ icl_cxgbei_conn_pdu_data_segment_length(struct icl_conn *ic,
return (icl_pdu_data_segment_length(request));
}
static uint32_t
icl_conn_build_tasktag(struct icl_conn *ic, uint32_t tag)
{
return tag;
}
static struct mbuf *
finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp)
{
@ -776,55 +793,215 @@ icl_cxgbei_conn_close(struct icl_conn *ic)
int
icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip,
struct ccb_scsiio *csio, uint32_t *task_tagp, void **prvp)
struct ccb_scsiio *csio, uint32_t *ittp, void **arg)
{
void *prv;
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
struct toepcb *toep = icc->toep;
struct adapter *sc = icc->sc;
struct cxgbei_data *ci = sc->iscsi_ulp_softc;
struct ppod_region *pr = &ci->pr;
struct ppod_reservation *prsv;
uint32_t itt;
int rc = 0;
*task_tagp = icl_conn_build_tasktag(ic, *task_tagp);
/* This is for the offload driver's state. Must not be set already. */
MPASS(arg != NULL);
MPASS(*arg == NULL);
prv = uma_zalloc(icl_transfer_zone, M_NOWAIT | M_ZERO);
if (prv == NULL)
return (ENOMEM);
if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN ||
csio->dxfer_len < ci->ddp_threshold) {
no_ddp:
/*
* No DDP for this I/O. Allocate an ITT (based on the one
* passed in) that cannot be a valid hardware DDP tag in the
* iSCSI region.
*/
itt = *ittp & M_PPOD_TAG;
itt = V_PPOD_TAG(itt) | pr->pr_invalid_bit;
*ittp = htobe32(itt);
MPASS(*arg == NULL); /* State is maintained for DDP only. */
return (0);
}
*prvp = prv;
/*
* Reserve resources for DDP, update the itt that should be used in the
* PDU, and save DDP specific state for this I/O in *arg.
*/
cxgbei_conn_task_reserve_itt(ic, prvp, csio, task_tagp);
prsv = uma_zalloc(prsv_zone, M_NOWAIT);
if (prsv == NULL) {
rc = ENOMEM;
goto no_ddp;
}
/* XXX add support for all CAM_DATA_ types */
MPASS((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR);
rc = t4_alloc_page_pods_for_buf(pr, (vm_offset_t)csio->data_ptr,
csio->dxfer_len, prsv);
if (rc != 0) {
uma_zfree(prsv_zone, prsv);
goto no_ddp;
}
rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid, prsv,
(vm_offset_t)csio->data_ptr, csio->dxfer_len);
if (rc != 0) {
t4_free_page_pods(prsv);
uma_zfree(prsv_zone, prsv);
goto no_ddp;
}
*ittp = htobe32(prsv->prsv_tag);
*arg = prsv;
return (0);
}
void
icl_cxgbei_conn_task_done(struct icl_conn *ic, void *prv)
icl_cxgbei_conn_task_done(struct icl_conn *ic, void *arg)
{
cxgbei_cleanup_task(ic, prv);
uma_zfree(icl_transfer_zone, prv);
if (arg != NULL) {
struct ppod_reservation *prsv = arg;
t4_free_page_pods(prsv);
uma_zfree(prsv_zone, prsv);
}
}
/* XXXNP: PDU should be passed in as parameter, like on the initiator. */
#define io_to_request_pdu(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr)
#define io_to_ppod_reservation(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr)
int
icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io,
uint32_t *transfer_tag, void **prvp)
uint32_t *tttp, void **arg)
{
void *prv;
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
struct toepcb *toep = icc->toep;
struct ctl_scsiio *ctsio = &io->scsiio;
struct adapter *sc = icc->sc;
struct cxgbei_data *ci = sc->iscsi_ulp_softc;
struct ppod_region *pr = &ci->pr;
struct ppod_reservation *prsv;
uint32_t ttt;
int xferlen, rc = 0, alias;
*transfer_tag = icl_conn_build_tasktag(ic, *transfer_tag);
/* This is for the offload driver's state. Must not be set already. */
MPASS(arg != NULL);
MPASS(*arg == NULL);
prv = uma_zalloc(icl_transfer_zone, M_NOWAIT | M_ZERO);
if (prv == NULL)
return (ENOMEM);
if (ctsio->ext_data_filled == 0) {
int first_burst;
struct icl_pdu *ip = io_to_request_pdu(io);
vm_offset_t buf;
#ifdef INVARIANTS
struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
*prvp = prv;
MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
MPASS(ic == ip->ip_conn);
MPASS(ip->ip_bhs_mbuf != NULL);
#endif
first_burst = icl_pdu_data_segment_length(ip);
cxgbei_conn_transfer_reserve_ttt(ic, prvp, io, transfer_tag);
/*
* Note that ICL calls conn_transfer_setup even if the first
* burst had everything and there's nothing left to transfer.
*/
MPASS(ctsio->kern_data_len >= first_burst);
xferlen = ctsio->kern_data_len;
if (xferlen - first_burst < ci->ddp_threshold) {
no_ddp:
/*
* No DDP for this transfer. Allocate a TTT (based on
* the one passed in) that cannot be a valid hardware
* DDP tag in the iSCSI region.
*/
ttt = *tttp & M_PPOD_TAG;
ttt = V_PPOD_TAG(ttt) | pr->pr_invalid_bit;
*tttp = htobe32(ttt);
MPASS(io_to_ppod_reservation(io) == NULL);
return (0);
}
if (ctsio->kern_sg_entries == 0)
buf = (vm_offset_t)ctsio->kern_data_ptr;
else if (ctsio->kern_sg_entries == 1) {
struct ctl_sg_entry *sgl = (void *)ctsio->kern_data_ptr;
MPASS(sgl->len == xferlen);
buf = (vm_offset_t)sgl->addr;
} else {
rc = EAGAIN; /* XXX implement */
goto no_ddp;
}
/*
* Reserve resources for DDP, update the ttt that should be used
* in the PDU, and save DDP specific state for this I/O.
*/
MPASS(io_to_ppod_reservation(io) == NULL);
prsv = uma_zalloc(prsv_zone, M_NOWAIT);
if (prsv == NULL) {
rc = ENOMEM;
goto no_ddp;
}
rc = t4_alloc_page_pods_for_buf(pr, buf, xferlen, prsv);
if (rc != 0) {
uma_zfree(prsv_zone, prsv);
goto no_ddp;
}
rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid,
prsv, buf, xferlen);
if (rc != 0) {
t4_free_page_pods(prsv);
uma_zfree(prsv_zone, prsv);
goto no_ddp;
}
*tttp = htobe32(prsv->prsv_tag);
io_to_ppod_reservation(io) = prsv;
*arg = ctsio;
return (0);
}
/*
* In the middle of an I/O. A non-NULL page pod reservation indicates
* that a DDP buffer is being used for the I/O.
*/
prsv = io_to_ppod_reservation(ctsio);
if (prsv == NULL)
goto no_ddp;
alias = (prsv->prsv_tag & pr->pr_alias_mask) >> pr->pr_alias_shift;
alias++;
prsv->prsv_tag &= ~pr->pr_alias_mask;
prsv->prsv_tag |= alias << pr->pr_alias_shift & pr->pr_alias_mask;
*tttp = htobe32(prsv->prsv_tag);
*arg = ctsio;
return (0);
}
void
icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *prv)
icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *arg)
{
cxgbei_cleanup_task(ic, prv);
uma_zfree(icl_transfer_zone, prv);
struct ctl_scsiio *ctsio = arg;
if (ctsio != NULL && ctsio->kern_data_len == ctsio->ext_data_filled) {
struct ppod_reservation *prsv;
prsv = io_to_ppod_reservation(ctsio);
MPASS(prsv != NULL);
t4_free_page_pods(prsv);
uma_zfree(prsv_zone, prsv);
}
}
static void
@ -882,9 +1059,12 @@ icl_cxgbei_mod_load(void)
{
int rc;
icl_transfer_zone = uma_zcreate("icl_transfer",
16 * 1024, NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
/*
* Space to track pagepod reservations.
*/
prsv_zone = uma_zcreate("Pagepod reservations",
sizeof(struct ppod_reservation), NULL, NULL, NULL, NULL,
CACHE_LINE_SIZE, 0);
refcount_init(&icl_cxgbei_ncons, 0);
@ -903,7 +1083,7 @@ icl_cxgbei_mod_unload(void)
icl_unregister("cxgbei", false);
uma_zdestroy(icl_transfer_zone);
uma_zdestroy(prsv_zone);
return (0);
}

View File

@ -155,7 +155,6 @@ int t4_register_uld(struct uld_info *);
int t4_unregister_uld(struct uld_info *);
int t4_activate_uld(struct adapter *, int);
int t4_deactivate_uld(struct adapter *, int);
void t4_iscsi_init(struct adapter *, u_int, const u_int *);
int uld_active(struct adapter *, int);
#endif
#endif

View File

@ -8929,16 +8929,6 @@ t4_db_dropped(struct adapter *sc)
}
#ifdef TCP_OFFLOAD
void
t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order)
{
t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
V_HPZ3(pgsz_order[3]));
}
static int
toe_capability(struct vi_info *vi, int enable)
{

View File

@ -575,11 +575,20 @@ t4_tweak_chip_settings(struct adapter *sc)
V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
/* 4K, 16K, 64K, 256K DDP "page sizes" */
/* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */
v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
m = v = F_TDDPTAGTCB;
/*
* 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been
* chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we
* may have to deal with is MAXPHYS + 1 page.
*/
v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4);
t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v);
/* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */
m = v = F_TDDPTAGTCB | F_ISCSITAGTCB;
t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |

View File

@ -6,7 +6,6 @@ CXGBE = ${.CURDIR}/../../../dev/cxgbe
KMOD= cxgbei
SRCS= cxgbei.c
SRCS+= cxgbei_ulp2_ddp.c
SRCS+= icl_cxgbei.c
SRCS+= bus_if.h
SRCS+= device_if.h