From: Klaus Jensen <its@irrelevant.dk>
To: qemu-block@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>, Fam Zheng <fam@euphon.net>,
Javier Gonzalez <javier@javigon.com>,
qemu-devel@nongnu.org, Max Reitz <mreitz@redhat.com>,
Keith Busch <keith.busch@intel.com>,
Paul Durrant <Paul.Durrant@citrix.com>,
Stephen Bates <sbates@raithlin.com>
Subject: [PATCH v2 18/20] nvme: remove redundant NvmeCmd pointer parameter
Date: Tue, 15 Oct 2019 12:38:58 +0200 [thread overview]
Message-ID: <20191015103900.313928-19-its@irrelevant.dk> (raw)
In-Reply-To: <20191015103900.313928-1-its@irrelevant.dk>
The command struct is available in the NvmeRequest that we generally
pass around anyway.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/block/nvme.c | 219 +++++++++++++++++++++++-------------------------
1 file changed, 106 insertions(+), 113 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index bcd801c345b6..67f92bf5a3ac 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -574,14 +574,14 @@ static uint16_t nvme_dma_write_sgl(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
}
static uint16_t nvme_dma_write(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
- NvmeCmd *cmd, NvmeRequest *req)
+ NvmeRequest *req)
{
- if (NVME_CMD_FLAGS_PSDT(cmd->flags)) {
- return nvme_dma_write_sgl(n, ptr, len, cmd->dptr.sgl, req);
+ if (NVME_CMD_FLAGS_PSDT(req->cmd.flags)) {
+ return nvme_dma_write_sgl(n, ptr, len, req->cmd.dptr.sgl, req);
}
- uint64_t prp1 = le64_to_cpu(cmd->dptr.prp.prp1);
- uint64_t prp2 = le64_to_cpu(cmd->dptr.prp.prp2);
+ uint64_t prp1 = le64_to_cpu(req->cmd.dptr.prp.prp1);
+ uint64_t prp2 = le64_to_cpu(req->cmd.dptr.prp.prp2);
return nvme_dma_write_prp(n, ptr, len, prp1, prp2, req);
}
@@ -624,7 +624,7 @@ out:
}
static uint16_t nvme_dma_read_sgl(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
- NvmeSglDescriptor sgl, NvmeCmd *cmd, NvmeRequest *req)
+ NvmeSglDescriptor sgl, NvmeRequest *req)
{
QEMUSGList qsg;
uint16_t err = NVME_SUCCESS;
@@ -662,29 +662,29 @@ out:
}
static uint16_t nvme_dma_read(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
- NvmeCmd *cmd, NvmeRequest *req)
+ NvmeRequest *req)
{
- if (NVME_CMD_FLAGS_PSDT(cmd->flags)) {
- return nvme_dma_read_sgl(n, ptr, len, cmd->dptr.sgl, cmd, req);
+ if (NVME_CMD_FLAGS_PSDT(req->cmd.flags)) {
+ return nvme_dma_read_sgl(n, ptr, len, req->cmd.dptr.sgl, req);
}
- uint64_t prp1 = le64_to_cpu(cmd->dptr.prp.prp1);
- uint64_t prp2 = le64_to_cpu(cmd->dptr.prp.prp2);
+ uint64_t prp1 = le64_to_cpu(req->cmd.dptr.prp.prp1);
+ uint64_t prp2 = le64_to_cpu(req->cmd.dptr.prp.prp2);
return nvme_dma_read_prp(n, ptr, len, prp1, prp2, req);
}
-static uint16_t nvme_map(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_map(NvmeCtrl *n, NvmeRequest *req)
{
uint32_t len = req->nlb << nvme_ns_lbads(req->ns);
uint64_t prp1, prp2;
- if (NVME_CMD_FLAGS_PSDT(cmd->flags)) {
- return nvme_map_sgl(n, &req->qsg, cmd->dptr.sgl, len, req);
+ if (NVME_CMD_FLAGS_PSDT(req->cmd.flags)) {
+ return nvme_map_sgl(n, &req->qsg, req->cmd.dptr.sgl, len, req);
}
- prp1 = le64_to_cpu(cmd->dptr.prp.prp1);
- prp2 = le64_to_cpu(cmd->dptr.prp.prp2);
+ prp1 = le64_to_cpu(req->cmd.dptr.prp.prp1);
+ prp2 = le64_to_cpu(req->cmd.dptr.prp.prp2);
return nvme_map_prp(n, &req->qsg, prp1, prp2, len, req);
}
@@ -1045,7 +1045,7 @@ static uint16_t nvme_check_rw(NvmeCtrl *n, NvmeRequest *req)
return NVME_SUCCESS;
}
-static uint16_t nvme_flush(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns = req->ns;
@@ -1057,12 +1057,12 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
-static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeRequest *req)
{
NvmeAIO *aio;
NvmeNamespace *ns = req->ns;
- NvmeRwCmd *rw = (NvmeRwCmd *) cmd;
+ NvmeRwCmd *rw = (NvmeRwCmd *) &req->cmd;
int64_t offset;
size_t count;
@@ -1092,9 +1092,9 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
-static uint16_t nvme_rw(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeRwCmd *rw = (NvmeRwCmd *) cmd;
+ NvmeRwCmd *rw = (NvmeRwCmd *) &req->cmd;
NvmeNamespace *ns = req->ns;
int status;
@@ -1114,7 +1114,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return status;
}
- status = nvme_map(n, cmd, req);
+ status = nvme_map(n, req);
if (status) {
block_acct_invalid(blk_get_stats(ns->conf.blk), acct);
return status;
@@ -1126,11 +1126,12 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
-static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
{
- uint32_t nsid = le32_to_cpu(cmd->nsid);
+ uint32_t nsid = le32_to_cpu(req->cmd.nsid);
- trace_nvme_io_cmd(req->cid, nsid, le16_to_cpu(req->sq->sqid), cmd->opcode);
+ trace_nvme_io_cmd(req->cid, nsid, le16_to_cpu(req->sq->sqid),
+ req->cmd.opcode);
req->ns = nvme_ns(n, nsid);
@@ -1139,16 +1140,16 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_INVALID_NSID | NVME_DNR;
}
- switch (cmd->opcode) {
+ switch (req->cmd.opcode) {
case NVME_CMD_FLUSH:
- return nvme_flush(n, cmd, req);
+ return nvme_flush(n, req);
case NVME_CMD_WRITE_ZEROS:
- return nvme_write_zeros(n, cmd, req);
+ return nvme_write_zeros(n, req);
case NVME_CMD_WRITE:
case NVME_CMD_READ:
- return nvme_rw(n, cmd, req);
+ return nvme_rw(n, req);
default:
- trace_nvme_err_invalid_opc(cmd->opcode);
+ trace_nvme_err_invalid_opc(req->cmd.opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -1165,10 +1166,10 @@ static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
n->qs_created--;
}
-static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
- NvmeRequest *req, *next;
+ NvmeDeleteQ *c = (NvmeDeleteQ *) &req->cmd;
+ NvmeRequest *next;
NvmeSQueue *sq;
NvmeCQueue *cq;
NvmeAIO *aio;
@@ -1237,10 +1238,10 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
n->qs_created++;
}
-static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req)
{
NvmeSQueue *sq;
- NvmeCreateSq *c = (NvmeCreateSq *)cmd;
+ NvmeCreateSq *c = (NvmeCreateSq *) &req->cmd;
uint16_t cqid = le16_to_cpu(c->cqid);
uint16_t sqid = le16_to_cpu(c->sqid);
@@ -1275,8 +1276,8 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
return NVME_SUCCESS;
}
-static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
- uint32_t buf_len, uint64_t off, NvmeRequest *req)
+static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
+ uint64_t off, NvmeRequest *req)
{
uint32_t trans_len;
@@ -1290,13 +1291,13 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
nvme_clear_events(n, NVME_AER_TYPE_ERROR);
}
- return nvme_dma_read(n, (uint8_t *) n->elpes + off, trans_len, cmd, req);
+ return nvme_dma_read(n, (uint8_t *) n->elpes + off, trans_len, req);
}
-static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
- uint32_t buf_len, uint64_t off, NvmeRequest *req)
+static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
+ uint64_t off, NvmeRequest *req)
{
- uint32_t nsid = le32_to_cpu(cmd->nsid);
+ uint32_t nsid = le32_to_cpu(req->cmd.nsid);
uint32_t trans_len;
time_t current_ms;
@@ -1352,11 +1353,11 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
nvme_clear_events(n, NVME_AER_TYPE_SMART);
}
- return nvme_dma_read(n, (uint8_t *) &smart + off, trans_len, cmd, req);
+ return nvme_dma_read(n, (uint8_t *) &smart + off, trans_len, req);
}
-static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
- uint64_t off, NvmeRequest *req)
+static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
+ NvmeRequest *req)
{
uint32_t trans_len;
NvmeFwSlotInfoLog fw_log;
@@ -1369,15 +1370,15 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
trans_len = MIN(sizeof(fw_log) - off, buf_len);
- return nvme_dma_read(n, (uint8_t *) &fw_log + off, trans_len, cmd, req);
+ return nvme_dma_read(n, (uint8_t *) &fw_log + off, trans_len, req);
}
-static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
{
- uint32_t dw10 = le32_to_cpu(cmd->cdw10);
- uint32_t dw11 = le32_to_cpu(cmd->cdw11);
- uint32_t dw12 = le32_to_cpu(cmd->cdw12);
- uint32_t dw13 = le32_to_cpu(cmd->cdw13);
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
+ uint32_t dw12 = le32_to_cpu(req->cmd.cdw12);
+ uint32_t dw13 = le32_to_cpu(req->cmd.cdw13);
uint8_t lid = dw10 & 0xff;
uint8_t lsp = (dw10 >> 8) & 0xf;
uint8_t rae = (dw10 >> 15) & 0x1;
@@ -1407,11 +1408,11 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
switch (lid) {
case NVME_LOG_ERROR_INFO:
- return nvme_error_info(n, cmd, rae, len, off, req);
+ return nvme_error_info(n, rae, len, off, req);
case NVME_LOG_SMART_INFO:
- return nvme_smart_info(n, cmd, rae, len, off, req);
+ return nvme_smart_info(n, rae, len, off, req);
case NVME_LOG_FW_SLOT_INFO:
- return nvme_fw_log_info(n, cmd, len, off, req);
+ return nvme_fw_log_info(n, len, off, req);
default:
trace_nvme_err_invalid_log_page(req->cid, lid);
return NVME_INVALID_LOG_ID | NVME_DNR;
@@ -1430,9 +1431,9 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
n->qs_created--;
}
-static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
+ NvmeDeleteQ *c = (NvmeDeleteQ *) &req->cmd;
NvmeCQueue *cq;
uint16_t qid = le16_to_cpu(c->qid);
@@ -1471,10 +1472,10 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
n->qs_created++;
}
-static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
{
NvmeCQueue *cq;
- NvmeCreateCq *c = (NvmeCreateCq *)cmd;
+ NvmeCreateCq *c = (NvmeCreateCq *) &req->cmd;
uint16_t cqid = le16_to_cpu(c->cqid);
uint16_t vector = le16_to_cpu(c->irq_vector);
uint16_t qsize = le16_to_cpu(c->qsize);
@@ -1511,17 +1512,16 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
return NVME_SUCCESS;
}
-static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
{
trace_nvme_identify_ctrl();
- return nvme_dma_read(n, (uint8_t *) &n->id_ctrl, sizeof(n->id_ctrl), cmd,
- req);
+ return nvme_dma_read(n, (uint8_t *) &n->id_ctrl, sizeof(n->id_ctrl), req);
}
-static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
{
- uint32_t nsid = le32_to_cpu(cmd->nsid);
+ uint32_t nsid = le32_to_cpu(req->cmd.nsid);
NvmeNamespace *ns = nvme_ns(n, nsid);
trace_nvme_identify_ns(nsid);
@@ -1531,15 +1531,13 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_INVALID_NSID | NVME_DNR;
}
- return nvme_dma_read(n, (uint8_t *) &ns->id_ns, sizeof(ns->id_ns), cmd,
- req);
+ return nvme_dma_read(n, (uint8_t *) &ns->id_ns, sizeof(ns->id_ns), req);
}
-static uint16_t nvme_identify_ns_list(NvmeCtrl *n, NvmeCmd *cmd,
- NvmeRequest *req)
+static uint16_t nvme_identify_ns_list(NvmeCtrl *n, NvmeRequest *req)
{
static const int data_len = 4 * KiB;
- uint32_t min_nsid = le32_to_cpu(cmd->nsid);
+ uint32_t min_nsid = le32_to_cpu(req->cmd.nsid);
uint32_t *list;
uint16_t ret;
int i, j = 0;
@@ -1556,13 +1554,12 @@ static uint16_t nvme_identify_ns_list(NvmeCtrl *n, NvmeCmd *cmd,
break;
}
}
- ret = nvme_dma_read(n, (uint8_t *) list, data_len, cmd, req);
+ ret = nvme_dma_read(n, (uint8_t *) list, data_len, req);
g_free(list);
return ret;
}
-static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *cmd,
- NvmeRequest *req)
+static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
{
static const int len = 4096;
@@ -1573,7 +1570,7 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *cmd,
uint8_t nid[16];
};
- uint32_t nsid = le32_to_cpu(cmd->nsid);
+ uint32_t nsid = le32_to_cpu(req->cmd.nsid);
struct ns_descr *list;
uint16_t ret;
@@ -1590,33 +1587,33 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *cmd,
list->nidl = 0x10;
*(uint32_t *) &list->nid[12] = cpu_to_be32(nsid);
- ret = nvme_dma_read(n, (uint8_t *) list, len, cmd, req);
+ ret = nvme_dma_read(n, (uint8_t *) list, len, req);
g_free(list);
return ret;
}
-static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeIdentify *c = (NvmeIdentify *)cmd;
+ NvmeIdentify *c = (NvmeIdentify *) &req->cmd;
switch (le32_to_cpu(c->cns)) {
case 0x00:
- return nvme_identify_ns(n, cmd, req);
+ return nvme_identify_ns(n, req);
case 0x01:
- return nvme_identify_ctrl(n, cmd, req);
+ return nvme_identify_ctrl(n, req);
case 0x02:
- return nvme_identify_ns_list(n, cmd, req);
+ return nvme_identify_ns_list(n, req);
case 0x03:
- return nvme_identify_ns_descr_list(n, cmd, req);
+ return nvme_identify_ns_descr_list(n, req);
default:
trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
return NVME_INVALID_FIELD | NVME_DNR;
}
}
-static uint16_t nvme_abort(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
{
- uint16_t sqid = le32_to_cpu(cmd->cdw10) & 0xffff;
+ uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
req->cqe.result = 1;
if (nvme_check_sqid(n, sqid)) {
@@ -1666,21 +1663,19 @@ static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
return cpu_to_le64(ts.all);
}
-static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd,
- NvmeRequest *req)
+static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
{
uint64_t timestamp = nvme_get_timestamp(n);
- return nvme_dma_read(n, (uint8_t *)×tamp, sizeof(timestamp), cmd,
- req);
+ return nvme_dma_read(n, (uint8_t *)×tamp, sizeof(timestamp), req);
}
-static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns;
- uint32_t dw10 = le32_to_cpu(cmd->cdw10);
- uint32_t dw11 = le32_to_cpu(cmd->cdw11);
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
uint32_t result, nsid;
trace_nvme_getfeat(dw10);
@@ -1716,7 +1711,7 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
trace_nvme_getfeat_numq(result);
break;
case NVME_TIMESTAMP:
- return nvme_get_feature_timestamp(n, cmd, req);
+ return nvme_get_feature_timestamp(n, req);
case NVME_INTERRUPT_COALESCING:
result = cpu_to_le32(n->features.int_coalescing);
break;
@@ -1742,14 +1737,12 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_SUCCESS;
}
-static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd,
- NvmeRequest *req)
+static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
{
uint16_t ret;
uint64_t timestamp;
- ret = nvme_dma_write(n, (uint8_t *)×tamp, sizeof(timestamp), cmd,
- req);
+ ret = nvme_dma_write(n, (uint8_t *)×tamp, sizeof(timestamp), req);
if (ret != NVME_SUCCESS) {
return ret;
}
@@ -1759,12 +1752,12 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd,
return NVME_SUCCESS;
}
-static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns;
- uint32_t dw10 = le32_to_cpu(cmd->cdw10);
- uint32_t dw11 = le32_to_cpu(cmd->cdw11);
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
uint32_t nsid;
trace_nvme_setfeat(dw10, dw11);
@@ -1808,7 +1801,7 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
((n->params.num_queues - 2) << 16));
break;
case NVME_TIMESTAMP:
- return nvme_set_feature_timestamp(n, cmd, req);
+ return nvme_set_feature_timestamp(n, req);
case NVME_ASYNCHRONOUS_EVENT_CONF:
n->features.async_config = dw11;
break;
@@ -1827,7 +1820,7 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_SUCCESS;
}
-static uint16_t nvme_aer(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
{
trace_nvme_aer(req->cid);
@@ -1843,31 +1836,31 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
-static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
{
- switch (cmd->opcode) {
+ switch (req->cmd.opcode) {
case NVME_ADM_CMD_DELETE_SQ:
- return nvme_del_sq(n, cmd);
+ return nvme_del_sq(n, req);
case NVME_ADM_CMD_CREATE_SQ:
- return nvme_create_sq(n, cmd);
+ return nvme_create_sq(n, req);
case NVME_ADM_CMD_GET_LOG_PAGE:
- return nvme_get_log(n, cmd, req);
+ return nvme_get_log(n, req);
case NVME_ADM_CMD_DELETE_CQ:
- return nvme_del_cq(n, cmd);
+ return nvme_del_cq(n, req);
case NVME_ADM_CMD_CREATE_CQ:
- return nvme_create_cq(n, cmd);
+ return nvme_create_cq(n, req);
case NVME_ADM_CMD_IDENTIFY:
- return nvme_identify(n, cmd, req);
+ return nvme_identify(n, req);
case NVME_ADM_CMD_ABORT:
- return nvme_abort(n, cmd, req);
+ return nvme_abort(n, req);
case NVME_ADM_CMD_SET_FEATURES:
- return nvme_set_feature(n, cmd, req);
+ return nvme_set_feature(n, req);
case NVME_ADM_CMD_GET_FEATURES:
- return nvme_get_feature(n, cmd, req);
+ return nvme_get_feature(n, req);
case NVME_ADM_CMD_ASYNC_EV_REQ:
- return nvme_aer(n, cmd, req);
+ return nvme_aer(n, req);
default:
- trace_nvme_err_invalid_admin_opc(cmd->opcode);
+ trace_nvme_err_invalid_admin_opc(req->cmd.opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -1944,7 +1937,7 @@ static void nvme_process_sq(void *opaque)
while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
addr = sq->dma_addr + sq->head * n->sqe_size;
- nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
+ nvme_addr_read(n, addr, (void *)&cmd, sizeof(NvmeCmd));
nvme_inc_sq_head(sq);
req = QTAILQ_FIRST(&sq->req_list);
@@ -1953,8 +1946,8 @@ static void nvme_process_sq(void *opaque)
nvme_init_req(n, &cmd, req);
- status = sq->sqid ? nvme_io_cmd(n, &cmd, req) :
- nvme_admin_cmd(n, &cmd, req);
+ status = sq->sqid ? nvme_io_cmd(n, req) :
+ nvme_admin_cmd(n, req);
if (status != NVME_NO_COMPLETE) {
req->status = status;
nvme_enqueue_req_completion(cq, req);
--
2.23.0
next prev parent reply other threads:[~2019-10-15 10:53 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-10-15 10:38 [PATCH v2 00/20] nvme: support NVMe v1.3d, SGLs and multiple namespaces Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 01/20] nvme: remove superfluous breaks Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 02/20] nvme: move device parameters to separate struct Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 03/20] nvme: add missing fields in the identify controller data structure Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 04/20] nvme: populate the mandatory subnqn and ver fields Klaus Jensen
2019-11-12 15:04 ` Beata Michalska
2019-11-13 6:16 ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 05/20] nvme: allow completion queues in the cmb Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 06/20] nvme: add support for the abort command Klaus Jensen
2019-11-12 15:04 ` Beata Michalska
2019-11-13 6:12 ` Klaus Birkelund
2019-11-15 11:56 ` Beata Michalska
2019-11-18 8:49 ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 07/20] nvme: refactor device realization Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 08/20] nvme: add support for the get log page command Klaus Jensen
2019-11-12 15:04 ` Beata Michalska
2019-11-19 20:01 ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 09/20] nvme: add support for the asynchronous event request command Klaus Jensen
2019-11-12 15:04 ` Beata Michalska
2019-11-19 19:51 ` Klaus Birkelund
2019-11-25 12:44 ` Beata Michalska
2019-10-15 10:38 ` [PATCH v2 10/20] nvme: add logging to error information log page Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 11/20] nvme: add missing mandatory features Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 12/20] nvme: bump supported specification version to 1.3 Klaus Jensen
2019-11-12 15:05 ` Beata Michalska
2019-11-18 9:48 ` Klaus Birkelund
2019-11-25 12:13 ` Beata Michalska
2019-11-26 8:40 ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 13/20] nvme: refactor prp mapping Klaus Jensen
2019-11-12 15:23 ` Beata Michalska
2019-11-20 9:39 ` Klaus Birkelund
2019-11-25 13:15 ` Beata Michalska
2019-10-15 10:38 ` [PATCH v2 14/20] nvme: allow multiple aios per command Klaus Jensen
2019-11-12 15:25 ` Beata Michalska
2019-11-21 11:57 ` Klaus Birkelund
2019-11-25 13:59 ` Beata Michalska
2019-10-15 10:38 ` [PATCH v2 15/20] nvme: add support for scatter gather lists Klaus Jensen
2019-11-12 15:25 ` Beata Michalska
2019-11-25 6:21 ` Klaus Birkelund
2019-11-25 14:10 ` Beata Michalska
2019-11-26 8:34 ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 16/20] nvme: support multiple namespaces Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 17/20] nvme: bump controller pci device id Klaus Jensen
2019-10-15 10:38 ` Klaus Jensen [this message]
2019-10-15 10:38 ` [PATCH v2 19/20] nvme: make lba data size configurable Klaus Jensen
2019-11-12 15:24 ` Beata Michalska
2019-11-13 7:13 ` Klaus Birkelund
2019-10-15 10:39 ` [PATCH v2 20/20] nvme: handle dma errors Klaus Jensen
2019-10-15 17:19 ` [PATCH v2 00/20] nvme: support NVMe v1.3d, SGLs and multiple namespaces no-reply
2019-10-15 17:26 ` no-reply
2019-10-16 6:29 ` Fam Zheng
2019-10-28 6:09 ` Klaus Birkelund
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191015103900.313928-19-its@irrelevant.dk \
--to=its@irrelevant.dk \
--cc=Paul.Durrant@citrix.com \
--cc=fam@euphon.net \
--cc=javier@javigon.com \
--cc=keith.busch@intel.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=sbates@raithlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).