All of lore.kernel.org
 help / color / mirror / Atom feed
From: Klaus Jensen <its@irrelevant.dk>
To: qemu-block@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>, Fam Zheng <fam@euphon.net>,
	Javier Gonzalez <javier@javigon.com>,
	qemu-devel@nongnu.org, Max Reitz <mreitz@redhat.com>,
	Keith Busch <keith.busch@intel.com>,
	Paul Durrant <Paul.Durrant@citrix.com>,
	Stephen Bates <sbates@raithlin.com>
Subject: [PATCH v2 12/20] nvme: bump supported specification version to 1.3
Date: Tue, 15 Oct 2019 12:38:52 +0200	[thread overview]
Message-ID: <20191015103900.313928-13-its@irrelevant.dk> (raw)
In-Reply-To: <20191015103900.313928-1-its@irrelevant.dk>

Add the new Namespace Identification Descriptor List (CNS 03h) and track
creation of queues to enable the controller to return Command Sequence
Error if Set Features is called for Number of Queues after any queues
have been created.

Signed-off-by: Klaus Jensen <klaus.jensen@cnexlabs.com>
---
 hw/block/nvme.c       | 82 +++++++++++++++++++++++++++++++++++--------
 hw/block/nvme.h       |  1 +
 hw/block/trace-events |  8 +++--
 include/block/nvme.h  | 30 +++++++++++++---
 4 files changed, 100 insertions(+), 21 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index e7d46dcc6afe..1e2320b38b14 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -9,20 +9,22 @@
  */
 
 /**
- * Reference Specification: NVM Express 1.2.1
+ * Reference Specification: NVM Express 1.3d
  *
  *   https://nvmexpress.org/resources/specifications/
  */
 
 /**
  * Usage: add options:
- *      -drive file=<file>,if=none,id=<drive_id>
- *      -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
- *              cmb_size_mb=<cmb_size_mb[optional]>, \
- *              num_queues=<N[optional]>
+ *     -drive file=<file>,if=none,id=<drive_id>
+ *     -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>
  *
- * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
- * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
+ * Advanced optional options:
+ *
+ *   num_queues=<uint32>      : Maximum number of IO Queues.
+ *                              Default: 64
+ *   cmb_size_mb=<uint32>     : Size of Controller Memory Buffer in MBs.
+ *                              Default: 0 (disabled)
  */
 
 #include "qemu/osdep.h"
@@ -345,6 +347,8 @@ static void nvme_post_cqes(void *opaque)
 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
 {
     assert(cq->cqid == req->sq->cqid);
+
+    trace_nvme_enqueue_req_completion(req->cid, cq->cqid, req->status);
     QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
     QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
     timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
@@ -530,6 +534,7 @@ static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
     if (sq->sqid) {
         g_free(sq);
     }
+    n->qs_created--;
 }
 
 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -596,6 +601,7 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
     cq = n->cq[cqid];
     QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
     n->sq[sqid] = sq;
+    n->qs_created++;
 }
 
 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -742,7 +748,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
     uint32_t dw11 = le32_to_cpu(cmd->cdw11);
     uint32_t dw12 = le32_to_cpu(cmd->cdw12);
     uint32_t dw13 = le32_to_cpu(cmd->cdw13);
-    uint16_t lid = dw10 & 0xff;
+    uint8_t  lid = dw10 & 0xff;
+    uint8_t  lsp = (dw10 >> 8) & 0xf;
     uint8_t  rae = (dw10 >> 15) & 0x1;
     uint32_t numdl, numdu;
     uint64_t off, lpol, lpou;
@@ -760,7 +767,7 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
         return NVME_INVALID_FIELD | NVME_DNR;
     }
 
-    trace_nvme_get_log(req->cid, lid, rae, len, off);
+    trace_nvme_get_log(req->cid, lid, lsp, rae, len, off);
 
     switch (lid) {
     case NVME_LOG_ERROR_INFO:
@@ -784,6 +791,7 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
     if (cq->cqid) {
         g_free(cq);
     }
+    n->qs_created--;
 }
 
 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -824,6 +832,7 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
     msix_vector_use(&n->parent_obj, cq->vector);
     n->cq[cqid] = cq;
     cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
+    n->qs_created++;
 }
 
 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -897,7 +906,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
         prp1, prp2);
 }
 
-static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
+static uint16_t nvme_identify_ns_list(NvmeCtrl *n, NvmeIdentify *c)
 {
     static const int data_len = 4 * KiB;
     uint32_t min_nsid = le32_to_cpu(c->nsid);
@@ -907,7 +916,7 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
     uint16_t ret;
     int i, j = 0;
 
-    trace_nvme_identify_nslist(min_nsid);
+    trace_nvme_identify_ns_list(min_nsid);
 
     list = g_malloc0(data_len);
     for (i = 0; i < n->num_namespaces; i++) {
@@ -924,6 +933,41 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
     return ret;
 }
 
+static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *c)
+{
+    static const int len = 4096;
+
+    struct ns_descr {
+        uint8_t nidt;
+        uint8_t nidl;
+        uint8_t rsvd2[2];
+        uint8_t nid[16];
+    };
+
+    uint32_t nsid = le32_to_cpu(c->nsid);
+    uint64_t prp1 = le64_to_cpu(c->prp1);
+    uint64_t prp2 = le64_to_cpu(c->prp2);
+
+    struct ns_descr *list;
+    uint16_t ret;
+
+    trace_nvme_identify_ns_descr_list(nsid);
+
+    if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
+        trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
+        return NVME_INVALID_NSID | NVME_DNR;
+    }
+
+    list = g_malloc0(len);
+    list->nidt = 0x3;
+    list->nidl = 0x10;
+    *(uint32_t *) &list->nid[12] = cpu_to_be32(nsid);
+
+    ret = nvme_dma_read_prp(n, (uint8_t *) list, len, prp1, prp2);
+    g_free(list);
+    return ret;
+}
+
 static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
 {
     NvmeIdentify *c = (NvmeIdentify *)cmd;
@@ -934,7 +978,9 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
     case 0x01:
         return nvme_identify_ctrl(n, c);
     case 0x02:
-        return nvme_identify_nslist(n, c);
+        return nvme_identify_ns_list(n, c);
+    case 0x03:
+        return nvme_identify_ns_descr_list(n, cmd);
     default:
         trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
         return NVME_INVALID_FIELD | NVME_DNR;
@@ -1101,6 +1147,14 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
         blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
         break;
     case NVME_NUMBER_OF_QUEUES:
+        if (n->qs_created > 2) {
+            return NVME_CMD_SEQ_ERROR | NVME_DNR;
+        }
+
+        if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) {
+            return NVME_INVALID_FIELD | NVME_DNR;
+        }
+
         trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
                                 ((dw11 >> 16) & 0xFFFF) + 1,
                                 n->params.num_queues - 1,
@@ -1816,7 +1870,7 @@ static void nvme_init_ctrl(NvmeCtrl *n)
     id->ieee[0] = 0x00;
     id->ieee[1] = 0x02;
     id->ieee[2] = 0xb3;
-    id->ver = cpu_to_le32(0x00010201);
+    id->ver = cpu_to_le32(0x00010300);
     id->oacs = cpu_to_le16(0);
     id->acl = 3;
     id->aerl = n->params.aerl;
@@ -1847,7 +1901,7 @@ static void nvme_init_ctrl(NvmeCtrl *n)
     NVME_CAP_SET_CSS(n->bar.cap, 1);
     NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
 
-    n->bar.vs = 0x00010201;
+    n->bar.vs = 0x00010300;
     n->bar.intmc = n->bar.intms = 0;
 }
 
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index d74b0e0f9b2c..3f7bd627e824 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -102,6 +102,7 @@ typedef struct NvmeCtrl {
     uint16_t    temperature;
     uint8_t     elp_index;
     uint64_t    error_count;
+    uint32_t    qs_created;
 
     QEMUTimer   *aer_timer;
     uint8_t     aer_mask;
diff --git a/hw/block/trace-events b/hw/block/trace-events
index a20a68d85d5a..f62fa99dc2cd 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -39,8 +39,9 @@ nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uin
 nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
 nvme_del_cq(uint16_t cqid) "deleted completion queue, sqid=%"PRIu16""
 nvme_identify_ctrl(void) "identify controller"
-nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16""
-nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16""
+nvme_identify_ns(uint32_t ns) "nsid %"PRIu32""
+nvme_identify_ns_list(uint32_t ns) "nsid %"PRIu32""
+nvme_identify_ns_descr_list(uint32_t ns) "nsid %"PRIu32""
 nvme_getfeat(uint32_t fid) "fid 0x%"PRIx32""
 nvme_setfeat(uint32_t fid, uint32_t val) "fid 0x%"PRIx32" val 0x%"PRIx32""
 nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s"
@@ -48,12 +49,13 @@ nvme_getfeat_numq(int result) "get feature number of queues, result=%d"
 nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d"
 nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64""
 nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64""
-nvme_get_log(uint16_t cid, uint16_t lid, uint8_t rae, uint32_t len, uint64_t off) "cid %"PRIu16" lid 0x%"PRIx16" rae 0x%"PRIx8" len %"PRIu32" off %"PRIu64""
+nvme_get_log(uint16_t cid, uint16_t lid, uint8_t lsp, uint8_t rae, uint32_t len, uint64_t off) "cid %"PRIu16" lid 0x%"PRIx16" lsp 0x%"PRIx8" rae 0x%"PRIx8" len %"PRIu32" off %"PRIu64""
 nvme_process_aers(void) "processing aers"
 nvme_aer(uint16_t cid) "cid %"PRIu16""
 nvme_aer_aerl_exceeded(void) "aerl exceeded"
 nvme_aer_masked(uint8_t type, uint8_t mask) "type 0x%"PRIx8" mask 0x%"PRIx8""
 nvme_aer_post_cqe(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8""
+nvme_enqueue_req_completion(uint16_t cid, uint16_t cqid, uint16_t status) "cid %"PRIu16" cqid %"PRIu16" status 0x%"PRIx16""
 nvme_enqueue_event(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8""
 nvme_enqueue_event_masked(uint8_t typ) "type 0x%"PRIx8""
 nvme_no_outstanding_aers(void) "ignoring event; no outstanding AERs"
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 9726839f75bb..f0f5728b5ec4 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -550,7 +550,9 @@ typedef struct NvmeIdCtrl {
     uint32_t    rtd3e;
     uint32_t    oaes;
     uint32_t    ctratt;
-    uint8_t     rsvd255[156];
+    uint8_t     rsvd111[12];
+    uint8_t     fguid[16];
+    uint8_t     rsvd255[128];
     uint16_t    oacs;
     uint8_t     acl;
     uint8_t     aerl;
@@ -568,9 +570,15 @@ typedef struct NvmeIdCtrl {
     uint8_t     tnvmcap[16];
     uint8_t     unvmcap[16];
     uint32_t    rpmbs;
-    uint8_t     rsvd319[4];
+    uint16_t    edstt;
+    uint8_t     dsto;
+    uint8_t     fwug;
     uint16_t    kas;
-    uint8_t     rsvd511[190];
+    uint16_t    hctma;
+    uint16_t    mntmt;
+    uint16_t    mxtmt;
+    uint32_t    sanicap;
+    uint8_t     rsvd511[180];
     uint8_t     sqes;
     uint8_t     cqes;
     uint16_t    maxcmd;
@@ -678,7 +686,21 @@ typedef struct NvmeIdNs {
     uint8_t     mc;
     uint8_t     dpc;
     uint8_t     dps;
-    uint8_t     res30[98];
+    uint8_t     nmic;
+    uint8_t     rescap;
+    uint8_t     fpi;
+    uint8_t     dlfeat;
+    uint16_t    nawun;
+    uint16_t    nawupf;
+    uint16_t    nacwu;
+    uint16_t    nabsn;
+    uint16_t    nabo;
+    uint16_t    nabspf;
+    uint16_t    noiob;
+    uint8_t     nvmcap[16];
+    uint8_t     resv103[40];
+    uint8_t     nguid[16];
+    uint64_t    eui64;
     NvmeLBAF    lbaf[16];
     uint8_t     res192[192];
     uint8_t     vs[3712];
-- 
2.23.0



  parent reply	other threads:[~2019-10-15 10:52 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-15 10:38 [PATCH v2 00/20] nvme: support NVMe v1.3d, SGLs and multiple namespaces Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 01/20] nvme: remove superfluous breaks Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 02/20] nvme: move device parameters to separate struct Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 03/20] nvme: add missing fields in the identify controller data structure Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 04/20] nvme: populate the mandatory subnqn and ver fields Klaus Jensen
2019-11-12 15:04   ` Beata Michalska
2019-11-13  6:16     ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 05/20] nvme: allow completion queues in the cmb Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 06/20] nvme: add support for the abort command Klaus Jensen
2019-11-12 15:04   ` Beata Michalska
2019-11-13  6:12     ` Klaus Birkelund
2019-11-15 11:56       ` Beata Michalska
2019-11-18  8:49         ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 07/20] nvme: refactor device realization Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 08/20] nvme: add support for the get log page command Klaus Jensen
2019-11-12 15:04   ` Beata Michalska
2019-11-19 20:01     ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 09/20] nvme: add support for the asynchronous event request command Klaus Jensen
2019-11-12 15:04   ` Beata Michalska
2019-11-19 19:51     ` Klaus Birkelund
2019-11-25 12:44       ` Beata Michalska
2019-10-15 10:38 ` [PATCH v2 10/20] nvme: add logging to error information log page Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 11/20] nvme: add missing mandatory features Klaus Jensen
2019-10-15 10:38 ` Klaus Jensen [this message]
2019-11-12 15:05   ` [PATCH v2 12/20] nvme: bump supported specification version to 1.3 Beata Michalska
2019-11-18  9:48     ` Klaus Birkelund
2019-11-25 12:13       ` Beata Michalska
2019-11-26  8:40         ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 13/20] nvme: refactor prp mapping Klaus Jensen
2019-11-12 15:23   ` Beata Michalska
2019-11-20  9:39     ` Klaus Birkelund
2019-11-25 13:15       ` Beata Michalska
2019-10-15 10:38 ` [PATCH v2 14/20] nvme: allow multiple aios per command Klaus Jensen
2019-11-12 15:25   ` Beata Michalska
2019-11-21 11:57     ` Klaus Birkelund
2019-11-25 13:59       ` Beata Michalska
2019-10-15 10:38 ` [PATCH v2 15/20] nvme: add support for scatter gather lists Klaus Jensen
2019-11-12 15:25   ` Beata Michalska
2019-11-25  6:21     ` Klaus Birkelund
2019-11-25 14:10       ` Beata Michalska
2019-11-26  8:34         ` Klaus Birkelund
2019-10-15 10:38 ` [PATCH v2 16/20] nvme: support multiple namespaces Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 17/20] nvme: bump controller pci device id Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 18/20] nvme: remove redundant NvmeCmd pointer parameter Klaus Jensen
2019-10-15 10:38 ` [PATCH v2 19/20] nvme: make lba data size configurable Klaus Jensen
2019-11-12 15:24   ` Beata Michalska
2019-11-13  7:13     ` Klaus Birkelund
2019-10-15 10:39 ` [PATCH v2 20/20] nvme: handle dma errors Klaus Jensen
2019-10-15 17:19 ` [PATCH v2 00/20] nvme: support NVMe v1.3d, SGLs and multiple namespaces no-reply
2019-10-15 17:26 ` no-reply
2019-10-16  6:29 ` Fam Zheng
2019-10-28  6:09 ` Klaus Birkelund

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191015103900.313928-13-its@irrelevant.dk \
    --to=its@irrelevant.dk \
    --cc=Paul.Durrant@citrix.com \
    --cc=fam@euphon.net \
    --cc=javier@javigon.com \
    --cc=keith.busch@intel.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sbates@raithlin.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.