mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-13 10:02:38 +00:00
Add nvme_ctrlr_submit_[admin|io]_request functions which consolidates
code for allocating nvme_tracker objects and making calls into bus_dmamap_load for commands which have payloads. Sponsored by: Intel
This commit is contained in:
parent
ad697276ce
commit
d281e8fbbd
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=241660
@ -262,33 +262,6 @@ nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
|
||||
nvme_qpair_submit_cmd(qpair, tr);
|
||||
}
|
||||
|
||||
struct nvme_tracker *
|
||||
nvme_allocate_tracker(struct nvme_controller *ctrlr, boolean_t is_admin,
|
||||
struct nvme_request *req)
|
||||
{
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_qpair *qpair;
|
||||
|
||||
if (is_admin) {
|
||||
qpair = &ctrlr->adminq;
|
||||
} else {
|
||||
if (ctrlr->per_cpu_io_queues)
|
||||
qpair = &ctrlr->ioq[curcpu];
|
||||
else
|
||||
qpair = &ctrlr->ioq[0];
|
||||
}
|
||||
|
||||
tr = nvme_qpair_allocate_tracker(qpair);
|
||||
|
||||
if (tr == NULL)
|
||||
return (NULL);
|
||||
|
||||
tr->qpair = qpair;
|
||||
tr->req = req;
|
||||
|
||||
return (tr);
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_attach(device_t dev)
|
||||
{
|
||||
|
@ -791,3 +791,54 @@ nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req)
|
||||
{
|
||||
struct nvme_qpair *qpair;
|
||||
struct nvme_tracker *tr;
|
||||
int err;
|
||||
|
||||
qpair = &ctrlr->adminq;
|
||||
|
||||
tr = nvme_qpair_allocate_tracker(qpair);
|
||||
|
||||
tr->req = req;
|
||||
|
||||
if (req->payload_size > 0) {
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map,
|
||||
req->payload, req->payload_size,
|
||||
nvme_payload_map, tr, 0);
|
||||
if (err != 0)
|
||||
panic("bus_dmamap_load returned non-zero!\n");
|
||||
} else
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
}
|
||||
|
||||
void
|
||||
nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req)
|
||||
{
|
||||
struct nvme_qpair *qpair;
|
||||
struct nvme_tracker *tr;
|
||||
int err;
|
||||
|
||||
if (ctrlr->per_cpu_io_queues)
|
||||
qpair = &ctrlr->ioq[curcpu];
|
||||
else
|
||||
qpair = &ctrlr->ioq[0];
|
||||
|
||||
tr = nvme_qpair_allocate_tracker(qpair);
|
||||
|
||||
tr->req = req;
|
||||
|
||||
if (req->payload_size > 0) {
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map,
|
||||
req->payload, req->payload_size,
|
||||
nvme_payload_map, tr, 0);
|
||||
if (err != 0)
|
||||
panic("bus_dmamap_load returned non-zero!\n");
|
||||
} else
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
}
|
||||
|
@ -34,15 +34,11 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
req = nvme_allocate_request(payload,
|
||||
sizeof(struct nvme_controller_data), cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_IDENTIFY;
|
||||
|
||||
@ -52,10 +48,7 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
|
||||
*/
|
||||
cmd->cdw10 = 1;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -63,15 +56,11 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
|
||||
void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
req = nvme_allocate_request(payload,
|
||||
sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_IDENTIFY;
|
||||
|
||||
@ -80,10 +69,7 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
|
||||
*/
|
||||
cmd->nsid = nsid;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -92,13 +78,10 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_CREATE_IO_CQ;
|
||||
|
||||
@ -111,7 +94,7 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
|
||||
cmd->cdw11 = (vector << 16) | 0x3;
|
||||
cmd->prp1 = io_que->cpl_bus_addr;
|
||||
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -119,13 +102,10 @@ nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_CREATE_IO_SQ;
|
||||
|
||||
@ -138,7 +118,7 @@ nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
|
||||
cmd->cdw11 = (io_que->id << 16) | 0x1;
|
||||
cmd->prp1 = io_que->cmd_bus_addr;
|
||||
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -146,13 +126,10 @@ nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_DELETE_IO_CQ;
|
||||
|
||||
@ -162,7 +139,7 @@ nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
|
||||
*/
|
||||
cmd->cdw10 = io_que->id;
|
||||
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -170,13 +147,10 @@ nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_DELETE_IO_SQ;
|
||||
|
||||
@ -186,7 +160,7 @@ nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
|
||||
*/
|
||||
cmd->cdw10 = io_que->id;
|
||||
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -195,26 +169,16 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_SET_FEATURES;
|
||||
cmd->cdw10 = feature;
|
||||
cmd->cdw11 = cdw11;
|
||||
|
||||
if (payload_size > 0) {
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map,
|
||||
payload, payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
} else
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -223,26 +187,16 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_GET_FEATURES;
|
||||
cmd->cdw10 = feature;
|
||||
cmd->cdw11 = cdw11;
|
||||
|
||||
if (payload_size > 0) {
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map,
|
||||
payload, payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
} else
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -299,17 +253,14 @@ nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_ASYNC_EVENT_REQUEST;
|
||||
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
||||
void
|
||||
@ -318,22 +269,15 @@ nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
req = nvme_allocate_request(payload, sizeof(*payload), cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ctrlr, TRUE, req);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_GET_LOG_PAGE;
|
||||
cmd->nsid = nsid;
|
||||
cmd->cdw10 = ((sizeof(*payload)/sizeof(uint32_t)) - 1) << 16;
|
||||
cmd->cdw10 |= NVME_LOG_HEALTH_INFORMATION;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
sizeof(*payload), nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
nvme_ctrlr_submit_admin_request(ctrlr, req);
|
||||
}
|
||||
|
@ -34,17 +34,12 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
req = nvme_allocate_request(payload, lba_count*512, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, req);
|
||||
|
||||
if (tr == NULL)
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_READ;
|
||||
cmd->nsid = ns->id;
|
||||
@ -53,10 +48,7 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
*(uint64_t *)&cmd->cdw10 = lba;
|
||||
cmd->cdw12 = lba_count-1;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -66,15 +58,11 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
req = nvme_allocate_request(payload, lba_count*512, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, req);
|
||||
|
||||
if (tr == NULL)
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &req->cmd;
|
||||
@ -85,10 +73,7 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
*(uint64_t *)&cmd->cdw10 = lba;
|
||||
cmd->cdw12 = lba_count-1;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -98,16 +83,12 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
|
||||
uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
int err;
|
||||
|
||||
req = nvme_allocate_request(payload,
|
||||
num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, req);
|
||||
|
||||
if (tr == NULL)
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &req->cmd;
|
||||
@ -118,10 +99,7 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
|
||||
cmd->cdw10 = num_ranges;
|
||||
cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE;
|
||||
|
||||
err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map, payload,
|
||||
req->payload_size, nvme_payload_map, tr, 0);
|
||||
|
||||
KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n"));
|
||||
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -130,21 +108,18 @@ int
|
||||
nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
|
||||
|
||||
tr = nvme_allocate_tracker(ns->ctrlr, FALSE, req);
|
||||
|
||||
if (tr == NULL)
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_FLUSH;
|
||||
cmd->nsid = ns->id;
|
||||
|
||||
nvme_qpair_submit_cmd(tr->qpair, tr);
|
||||
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -331,9 +331,6 @@ void nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr,
|
||||
nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg);
|
||||
|
||||
struct nvme_tracker * nvme_allocate_tracker(struct nvme_controller *ctrlr,
|
||||
boolean_t is_admin,
|
||||
struct nvme_request *request);
|
||||
void nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg,
|
||||
int error);
|
||||
|
||||
@ -341,6 +338,10 @@ int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
|
||||
int nvme_ctrlr_reset(struct nvme_controller *ctrlr);
|
||||
/* ctrlr defined as void * to allow use with config_intrhook. */
|
||||
void nvme_ctrlr_start(void *ctrlr_arg);
|
||||
void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req);
|
||||
void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req);
|
||||
|
||||
void nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
|
||||
uint16_t vector, uint32_t num_entries,
|
||||
|
@ -109,6 +109,7 @@ nvme_qpair_allocate_tracker(struct nvme_qpair *qpair)
|
||||
|
||||
callout_init_mtx(&tr->timer, &qpair->lock, 0);
|
||||
tr->cid = qpair->num_tr++;
|
||||
tr->qpair = qpair;
|
||||
} else
|
||||
SLIST_REMOVE_HEAD(&qpair->free_tr, slist);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user