kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org, Peter Maydell <peter.maydell@linaro.org>
Cc: kvm@vger.kernel.org,
	"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
	"Daniel P. Berrangé" <berrange@redhat.com>,
	"Eduardo Habkost" <ehabkost@redhat.com>,
	"Markus Armbruster" <armbru@redhat.com>,
	"Eric Blake" <eblake@redhat.com>, "Fam Zheng" <fam@euphon.net>,
	"Stefan Hajnoczi" <stefanha@redhat.com>,
	"Keith Busch" <kbusch@kernel.org>,
	"Max Reitz" <mreitz@redhat.com>,
	qemu-block@nongnu.org, "Kevin Wolf" <kwolf@redhat.com>,
	"Coiby Xu" <Coiby.Xu@gmail.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Klaus Jensen" <its@irrelevant.dk>,
	"Philippe Mathieu-Daudé" <philmd@redhat.com>,
	"Eric Auger" <eric.auger@redhat.com>
Subject: [PULL 11/33] block/nvme: Use unsigned integer for queue counter/size
Date: Wed,  4 Nov 2020 15:18:06 +0000	[thread overview]
Message-ID: <20201104151828.405824-12-stefanha@redhat.com> (raw)
In-Reply-To: <20201104151828.405824-1-stefanha@redhat.com>

From: Philippe Mathieu-Daudé <philmd@redhat.com>

We can not have negative queue count/size/index, use unsigned type.
Rename 'nr_queues' as 'queue_count' to match the spec naming.

Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-id: 20201029093306.1063879-10-philmd@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
---
 block/nvme.c       | 38 ++++++++++++++++++--------------------
 block/trace-events | 10 +++++-----
 2 files changed, 23 insertions(+), 25 deletions(-)

diff --git a/block/nvme.c b/block/nvme.c
index b0629f5de8..c450499111 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -104,7 +104,7 @@ struct BDRVNVMeState {
      * [1..]: io queues.
      */
     NVMeQueuePair **queues;
-    int nr_queues;
+    unsigned queue_count;
     size_t page_size;
     /* How many uint32_t elements does each doorbell entry take. */
     size_t doorbell_scale;
@@ -161,7 +161,7 @@ static QemuOptsList runtime_opts = {
 };
 
 static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
-                            int nentries, int entry_bytes, Error **errp)
+                            unsigned nentries, size_t entry_bytes, Error **errp)
 {
     size_t bytes;
     int r;
@@ -206,7 +206,7 @@ static void nvme_free_req_queue_cb(void *opaque)
 
 static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
                                              AioContext *aio_context,
-                                             int idx, int size,
+                                             unsigned idx, size_t size,
                                              Error **errp)
 {
     int i, r;
@@ -623,7 +623,7 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
     bool progress = false;
     int i;
 
-    for (i = 0; i < s->nr_queues; i++) {
+    for (i = 0; i < s->queue_count; i++) {
         if (nvme_poll_queue(s->queues[i])) {
             progress = true;
         }
@@ -644,10 +644,10 @@ static void nvme_handle_event(EventNotifier *n)
 static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
 {
     BDRVNVMeState *s = bs->opaque;
-    int n = s->nr_queues;
+    unsigned n = s->queue_count;
     NVMeQueuePair *q;
     NvmeCmd cmd;
-    int queue_size = NVME_QUEUE_SIZE;
+    unsigned queue_size = NVME_QUEUE_SIZE;
 
     q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
                                n, queue_size, errp);
@@ -661,7 +661,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
         .cdw11 = cpu_to_le32(0x3),
     };
     if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
-        error_setg(errp, "Failed to create CQ io queue [%d]", n);
+        error_setg(errp, "Failed to create CQ io queue [%u]", n);
         goto out_error;
     }
     cmd = (NvmeCmd) {
@@ -671,12 +671,12 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
         .cdw11 = cpu_to_le32(0x1 | (n << 16)),
     };
     if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
-        error_setg(errp, "Failed to create SQ io queue [%d]", n);
+        error_setg(errp, "Failed to create SQ io queue [%u]", n);
         goto out_error;
     }
     s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
     s->queues[n] = q;
-    s->nr_queues++;
+    s->queue_count++;
     return true;
 out_error:
     nvme_free_queue_pair(q);
@@ -785,7 +785,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
         ret = -EINVAL;
         goto out;
     }
-    s->nr_queues = 1;
+    s->queue_count = 1;
     QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
     regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << AQA_ACQS_SHIFT) |
                             (NVME_QUEUE_SIZE << AQA_ASQS_SHIFT));
@@ -895,10 +895,9 @@ static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
 
 static void nvme_close(BlockDriverState *bs)
 {
-    int i;
     BDRVNVMeState *s = bs->opaque;
 
-    for (i = 0; i < s->nr_queues; ++i) {
+    for (unsigned i = 0; i < s->queue_count; ++i) {
         nvme_free_queue_pair(s->queues[i]);
     }
     g_free(s->queues);
@@ -1123,7 +1122,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
     };
 
     trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
-    assert(s->nr_queues > 1);
+    assert(s->queue_count > 1);
     req = nvme_get_free_req(ioq);
     assert(req);
 
@@ -1233,7 +1232,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
         .ret = -EINPROGRESS,
     };
 
-    assert(s->nr_queues > 1);
+    assert(s->queue_count > 1);
     req = nvme_get_free_req(ioq);
     assert(req);
     nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
@@ -1285,7 +1284,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
     cmd.cdw12 = cpu_to_le32(cdw12);
 
     trace_nvme_write_zeroes(s, offset, bytes, flags);
-    assert(s->nr_queues > 1);
+    assert(s->queue_count > 1);
     req = nvme_get_free_req(ioq);
     assert(req);
 
@@ -1328,7 +1327,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
         return -ENOTSUP;
     }
 
-    assert(s->nr_queues > 1);
+    assert(s->queue_count > 1);
 
     buf = qemu_try_memalign(s->page_size, s->page_size);
     if (!buf) {
@@ -1408,7 +1407,7 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
 {
     BDRVNVMeState *s = bs->opaque;
 
-    for (int i = 0; i < s->nr_queues; i++) {
+    for (unsigned i = 0; i < s->queue_count; i++) {
         NVMeQueuePair *q = s->queues[i];
 
         qemu_bh_delete(q->completion_bh);
@@ -1429,7 +1428,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
     aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
                            false, nvme_handle_event, nvme_poll_cb);
 
-    for (int i = 0; i < s->nr_queues; i++) {
+    for (unsigned i = 0; i < s->queue_count; i++) {
         NVMeQueuePair *q = s->queues[i];
 
         q->completion_bh =
@@ -1446,11 +1445,10 @@ static void nvme_aio_plug(BlockDriverState *bs)
 
 static void nvme_aio_unplug(BlockDriverState *bs)
 {
-    int i;
     BDRVNVMeState *s = bs->opaque;
     assert(s->plugged);
     s->plugged = false;
-    for (i = INDEX_IO(0); i < s->nr_queues; i++) {
+    for (unsigned i = INDEX_IO(0); i < s->queue_count; i++) {
         NVMeQueuePair *q = s->queues[i];
         qemu_mutex_lock(&q->lock);
         nvme_kick(q);
diff --git a/block/trace-events b/block/trace-events
index f6a0f99df1..8368f4acb0 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -136,13 +136,13 @@ qed_aio_write_main(void *s, void *acb, int ret, uint64_t offset, size_t len) "s
 # nvme.c
 nvme_controller_capability_raw(uint64_t value) "0x%08"PRIx64
 nvme_controller_capability(const char *desc, uint64_t value) "%s: %"PRIu64
-nvme_kick(void *s, int queue) "s %p queue %d"
+nvme_kick(void *s, unsigned q_index) "s %p q #%u"
 nvme_dma_flush_queue_wait(void *s) "s %p"
 nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) "cmd_specific %d sq_head %d sqid %d cid %d status 0x%x"
-nvme_process_completion(void *s, int index, int inflight) "s %p queue %d inflight %d"
-nvme_process_completion_queue_plugged(void *s, int index) "s %p queue %d"
-nvme_complete_command(void *s, int index, int cid) "s %p queue %d cid %d"
-nvme_submit_command(void *s, int index, int cid) "s %p queue %d cid %d"
+nvme_process_completion(void *s, unsigned q_index, int inflight) "s %p q #%u inflight %d"
+nvme_process_completion_queue_plugged(void *s, unsigned q_index) "s %p q #%u"
+nvme_complete_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d"
+nvme_submit_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d"
 nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) "%02x %02x %02x %02x %02x %02x %02x %02x"
 nvme_handle_event(void *s) "s %p"
 nvme_poll_queue(void *s, unsigned q_index) "s %p q #%u"
-- 
2.28.0


  parent reply	other threads:[~2020-11-04 15:20 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-04 15:17 [PULL 00/33] Block patches Stefan Hajnoczi
2020-11-04 15:17 ` [PULL 01/33] accel/kvm: add PIO ioeventfds only in case kvm_eventfds_allowed is true Stefan Hajnoczi
2020-11-04 15:17 ` [PULL 02/33] softmmu/memory: fix memory_region_ioeventfd_equal() Stefan Hajnoczi
2020-11-04 15:17 ` [PULL 03/33] MAINTAINERS: Cover "block/nvme.h" file Stefan Hajnoczi
2020-11-04 15:17 ` [PULL 04/33] block/nvme: Use hex format to display offset in trace events Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 05/33] block/nvme: Report warning with warn_report() Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 06/33] block/nvme: Trace controller capabilities Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 07/33] block/nvme: Trace nvme_poll_queue() per queue Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 08/33] block/nvme: Improve nvme_free_req_queue_wait() trace information Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 09/33] block/nvme: Trace queue pair creation/deletion Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 10/33] block/nvme: Move definitions before structure declarations Stefan Hajnoczi
2020-11-04 15:18 ` Stefan Hajnoczi [this message]
2020-11-04 15:18 ` [PULL 12/33] block/nvme: Make nvme_identify() return boolean indicating error Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 13/33] block/nvme: Make nvme_init_queue() " Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 14/33] block/nvme: Introduce Completion Queue definitions Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 15/33] block/nvme: Use definitions instead of magic values in add_io_queue() Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 16/33] block/nvme: Correctly initialize Admin Queue Attributes Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 17/33] block/nvme: Simplify ADMIN queue access Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 18/33] block/nvme: Simplify nvme_cmd_sync() Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 19/33] block/nvme: Set request_alignment at initialization Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 20/33] block/nvme: Correct minimum device page size Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 21/33] block/nvme: Change size and alignment of IDENTIFY response buffer Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 22/33] block/nvme: Change size and alignment of queue Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 23/33] block/nvme: Change size and alignment of prp_list_pages Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 24/33] block/nvme: Align iov's va and size on host page size Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 25/33] block/nvme: Fix use of write-only doorbells page on Aarch64 arch Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 26/33] block/nvme: Fix nvme_submit_command() on big-endian host Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 27/33] util/vfio-helpers: Improve reporting unsupported IOMMU type Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 28/33] util/vfio-helpers: Trace PCI I/O config accesses Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 29/33] util/vfio-helpers: Trace PCI BAR region info Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 30/33] util/vfio-helpers: Trace where BARs are mapped Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 31/33] util/vfio-helpers: Improve DMA trace events Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 32/33] util/vfio-helpers: Convert vfio_dump_mapping to " Stefan Hajnoczi
2020-11-04 15:18 ` [PULL 33/33] util/vfio-helpers: Assert offset is aligned to page size Stefan Hajnoczi
2020-11-04 20:59 ` [PULL 00/33] Block patches Peter Maydell
2020-11-23 12:55   ` Philippe Mathieu-Daudé
2020-11-23 14:47     ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201104151828.405824-12-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=Coiby.Xu@gmail.com \
    --cc=armbru@redhat.com \
    --cc=berrange@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=eblake@redhat.com \
    --cc=ehabkost@redhat.com \
    --cc=eric.auger@redhat.com \
    --cc=fam@euphon.net \
    --cc=its@irrelevant.dk \
    --cc=kbusch@kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=philmd@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).