All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, qemu-devel@nongnu.org
Subject: [Qemu-devel] [PULL 12/28] block: Add Error to blk_set_aio_context()
Date: Mon,  3 Jun 2019 17:02:17 +0200	[thread overview]
Message-ID: <20190603150233.6614-13-kwolf@redhat.com> (raw)
In-Reply-To: <20190603150233.6614-1-kwolf@redhat.com>

Add an Error parameter to blk_set_aio_context() and use
bdrv_child_try_set_aio_context() internally to check whether all
involved nodes can actually support the AioContext switch.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 include/sysemu/block-backend.h  |  3 ++-
 block/block-backend.c           | 26 ++++++++++++++++----------
 hw/block/dataplane/virtio-blk.c | 12 +++++++++---
 hw/block/dataplane/xen-block.c  |  6 ++++--
 hw/scsi/virtio-scsi.c           | 10 +++++++---
 tests/test-bdrv-drain.c         |  8 ++++----
 tests/test-block-iothread.c     | 22 +++++++++++-----------
 7 files changed, 53 insertions(+), 34 deletions(-)

diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
index 938de34fe9..228fb3fb83 100644
--- a/include/sysemu/block-backend.h
+++ b/include/sysemu/block-backend.h
@@ -208,7 +208,8 @@ void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
 void blk_op_block_all(BlockBackend *blk, Error *reason);
 void blk_op_unblock_all(BlockBackend *blk, Error *reason);
 AioContext *blk_get_aio_context(BlockBackend *blk);
-void blk_set_aio_context(BlockBackend *blk, AioContext *new_context);
+int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
+                        Error **errp);
 void blk_add_aio_context_notifier(BlockBackend *blk,
         void (*attached_aio_context)(AioContext *new_context, void *opaque),
         void (*detach_aio_context)(void *opaque), void *opaque);
diff --git a/block/block-backend.c b/block/block-backend.c
index ad3e1c882d..390fde6f71 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1865,30 +1865,36 @@ static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
     return blk_get_aio_context(blk_acb->blk);
 }
 
-static void blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
-                                   bool update_root_node)
+static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
+                                  bool update_root_node, Error **errp)
 {
     BlockDriverState *bs = blk_bs(blk);
     ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
+    int ret;
 
     if (bs) {
+        if (update_root_node) {
+            ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root,
+                                                 errp);
+            if (ret < 0) {
+                return ret;
+            }
+        }
         if (tgm->throttle_state) {
             bdrv_drained_begin(bs);
             throttle_group_detach_aio_context(tgm);
             throttle_group_attach_aio_context(tgm, new_context);
             bdrv_drained_end(bs);
         }
-        if (update_root_node) {
-            GSList *ignore = g_slist_prepend(NULL, blk->root);
-            bdrv_set_aio_context_ignore(bs, new_context, &ignore);
-            g_slist_free(ignore);
-        }
     }
+
+    return 0;
 }
 
-void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
+int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
+                        Error **errp)
 {
-    blk_do_set_aio_context(blk, new_context, true);
+    return blk_do_set_aio_context(blk, new_context, true, errp);
 }
 
 static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
@@ -1915,7 +1921,7 @@ static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
                                  GSList **ignore)
 {
     BlockBackend *blk = child->opaque;
-    blk_do_set_aio_context(blk, ctx, false);
+    blk_do_set_aio_context(blk, ctx, false, &error_abort);
 }
 
 void blk_add_aio_context_notifier(BlockBackend *blk,
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 8c37bd314a..158c78f852 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -173,6 +173,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
     unsigned i;
     unsigned nvqs = s->conf->num_queues;
+    Error *local_err = NULL;
     int r;
 
     if (vblk->dataplane_started || s->starting) {
@@ -212,7 +213,11 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
     vblk->dataplane_started = true;
     trace_virtio_blk_data_plane_start(s);
 
-    blk_set_aio_context(s->conf->conf.blk, s->ctx);
+    r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
+    if (r < 0) {
+        error_report_err(local_err);
+        goto fail_guest_notifiers;
+    }
 
     /* Kick right away to begin processing requests already in vring */
     for (i = 0; i < nvqs; i++) {
@@ -281,8 +286,9 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
     aio_context_acquire(s->ctx);
     aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
 
-    /* Drain and switch bs back to the QEMU main loop */
-    blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
+    /* Drain and try to switch bs back to the QEMU main loop. If other users
+     * keep the BlockBackend in the iothread, that's ok */
+    blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
 
     aio_context_release(s->ctx);
 
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index bb8f1186e4..f7ad452bbd 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -682,7 +682,8 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
     }
 
     aio_context_acquire(dataplane->ctx);
-    blk_set_aio_context(dataplane->blk, qemu_get_aio_context());
+    /* Xen doesn't have multiple users for nodes, so this can't fail */
+    blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
     aio_context_release(dataplane->ctx);
 
     xendev = dataplane->xendev;
@@ -811,7 +812,8 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
     }
 
     aio_context_acquire(dataplane->ctx);
-    blk_set_aio_context(dataplane->blk, dataplane->ctx);
+    /* If other users keep the BlockBackend in the iothread, that's ok */
+    blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
     aio_context_release(dataplane->ctx);
     return;
 
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 839f120256..01c2b85f90 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -795,6 +795,7 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
     VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
     VirtIOSCSI *s = VIRTIO_SCSI(vdev);
     SCSIDevice *sd = SCSI_DEVICE(dev);
+    int ret;
 
     if (s->ctx && !s->dataplane_fenced) {
         AioContext *ctx;
@@ -808,9 +809,11 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
             return;
         }
         virtio_scsi_acquire(s);
-        blk_set_aio_context(sd->conf.blk, s->ctx);
+        ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
         virtio_scsi_release(s);
-
+        if (ret < 0) {
+            return;
+        }
     }
 
     if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
@@ -839,7 +842,8 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
 
     if (s->ctx) {
         virtio_scsi_acquire(s);
-        blk_set_aio_context(sd->conf.blk, qemu_get_aio_context());
+        /* If other users keep the BlockBackend in the iothread, that's ok */
+        blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
         virtio_scsi_release(s);
     }
 
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index 5534c2adf9..e86798923f 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -678,7 +678,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
     s = bs->opaque;
     blk_insert_bs(blk, bs, &error_abort);
 
-    blk_set_aio_context(blk, ctx_a);
+    blk_set_aio_context(blk, ctx_a, &error_abort);
     aio_context_acquire(ctx_a);
 
     s->bh_indirection_ctx = ctx_b;
@@ -742,7 +742,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
     }
 
     aio_context_acquire(ctx_a);
-    blk_set_aio_context(blk, qemu_get_aio_context());
+    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
     aio_context_release(ctx_a);
 
     bdrv_unref(bs);
@@ -903,7 +903,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
     if (use_iothread) {
         iothread = iothread_new();
         ctx = iothread_get_aio_context(iothread);
-        blk_set_aio_context(blk_src, ctx);
+        blk_set_aio_context(blk_src, ctx, &error_abort);
     } else {
         ctx = qemu_get_aio_context();
     }
@@ -1001,7 +1001,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
     g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
 
     if (use_iothread) {
-        blk_set_aio_context(blk_src, qemu_get_aio_context());
+        blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
     }
     aio_context_release(ctx);
 
diff --git a/tests/test-block-iothread.c b/tests/test-block-iothread.c
index e424d360c8..1d47ea9895 100644
--- a/tests/test-block-iothread.c
+++ b/tests/test-block-iothread.c
@@ -342,14 +342,14 @@ static void test_sync_op(const void *opaque)
     blk_insert_bs(blk, bs, &error_abort);
     c = QLIST_FIRST(&bs->parents);
 
-    blk_set_aio_context(blk, ctx);
+    blk_set_aio_context(blk, ctx, &error_abort);
     aio_context_acquire(ctx);
     t->fn(c);
     if (t->blkfn) {
         t->blkfn(blk);
     }
     aio_context_release(ctx);
-    blk_set_aio_context(blk, qemu_get_aio_context());
+    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
 
     bdrv_unref(bs);
     blk_unref(blk);
@@ -428,7 +428,7 @@ static void test_attach_blockjob(void)
         aio_poll(qemu_get_aio_context(), false);
     }
 
-    blk_set_aio_context(blk, ctx);
+    blk_set_aio_context(blk, ctx, &error_abort);
 
     tjob->n = 0;
     while (tjob->n == 0) {
@@ -436,7 +436,7 @@ static void test_attach_blockjob(void)
     }
 
     aio_context_acquire(ctx);
-    blk_set_aio_context(blk, qemu_get_aio_context());
+    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
     aio_context_release(ctx);
 
     tjob->n = 0;
@@ -444,7 +444,7 @@ static void test_attach_blockjob(void)
         aio_poll(qemu_get_aio_context(), false);
     }
 
-    blk_set_aio_context(blk, ctx);
+    blk_set_aio_context(blk, ctx, &error_abort);
 
     tjob->n = 0;
     while (tjob->n == 0) {
@@ -453,7 +453,7 @@ static void test_attach_blockjob(void)
 
     aio_context_acquire(ctx);
     job_complete_sync(&tjob->common.job, &error_abort);
-    blk_set_aio_context(blk, qemu_get_aio_context());
+    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
     aio_context_release(ctx);
 
     bdrv_unref(bs);
@@ -497,7 +497,7 @@ static void test_propagate_basic(void)
     bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
 
     /* Switch the AioContext */
-    blk_set_aio_context(blk, ctx);
+    blk_set_aio_context(blk, ctx, &error_abort);
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(bs_a) == ctx);
     g_assert(bdrv_get_aio_context(bs_verify) == ctx);
@@ -505,7 +505,7 @@ static void test_propagate_basic(void)
 
     /* Switch the AioContext back */
     ctx = qemu_get_aio_context();
-    blk_set_aio_context(blk, ctx);
+    blk_set_aio_context(blk, ctx, &error_abort);
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(bs_a) == ctx);
     g_assert(bdrv_get_aio_context(bs_verify) == ctx);
@@ -565,7 +565,7 @@ static void test_propagate_diamond(void)
     blk_insert_bs(blk, bs_verify, &error_abort);
 
     /* Switch the AioContext */
-    blk_set_aio_context(blk, ctx);
+    blk_set_aio_context(blk, ctx, &error_abort);
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(bs_verify) == ctx);
     g_assert(bdrv_get_aio_context(bs_a) == ctx);
@@ -574,7 +574,7 @@ static void test_propagate_diamond(void)
 
     /* Switch the AioContext back */
     ctx = qemu_get_aio_context();
-    blk_set_aio_context(blk, ctx);
+    blk_set_aio_context(blk, ctx, &error_abort);
     g_assert(blk_get_aio_context(blk) == ctx);
     g_assert(bdrv_get_aio_context(bs_verify) == ctx);
     g_assert(bdrv_get_aio_context(bs_a) == ctx);
@@ -654,7 +654,7 @@ static void test_propagate_mirror(void)
     job_cancel_sync_all();
 
     aio_context_acquire(ctx);
-    blk_set_aio_context(blk, main_ctx);
+    blk_set_aio_context(blk, main_ctx, &error_abort);
     bdrv_try_set_aio_context(target, main_ctx, &error_abort);
     aio_context_release(ctx);
 
-- 
2.20.1



  parent reply	other threads:[~2019-06-03 15:15 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-03 15:02 [Qemu-devel] [PULL 00/28] Block layer patches Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 01/28] block: Drain source node in bdrv_replace_node() Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 02/28] iotests: Test commit job start with concurrent I/O Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 03/28] blockdev: fix missed target unref for drive-backup Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 04/28] tests/perf: Test lseek influence on qcow2 block-status Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 05/28] block: avoid recursive block_status call if possible Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 06/28] block/io: Delay decrementing the quiesce_counter Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 07/28] iotests: Test cancelling a job and closing the VM Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 08/28] block/linux-aio: Drop unused BlockAIOCB submission method Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 09/28] nvme: add Get/Set Feature Timestamp support Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 10/28] test-block-iothread: Check filter node in test_propagate_mirror Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 11/28] nbd-server: Call blk_set_allow_aio_context_change() Kevin Wolf
2019-06-03 15:02 ` Kevin Wolf [this message]
2019-06-03 15:02 ` [Qemu-devel] [PULL 13/28] block: Add BlockBackend.ctx Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 14/28] block: Add qdev_prop_drive_iothread property type Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 15/28] scsi-disk: Use qdev_prop_drive_iothread Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 16/28] block: Adjust AioContexts when attaching nodes Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 17/28] test-block-iothread: Test adding parent to iothread node Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 18/28] test-block-iothread: BlockBackend AioContext across root node change Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 19/28] block: Move node without parents to main AioContext Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 20/28] blockdev: Use bdrv_try_set_aio_context() for monitor commands Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 21/28] block: Remove wrong bdrv_set_aio_context() calls Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 22/28] virtio-scsi-test: Test attaching new overlay with iothreads Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 23/28] iotests: Attach new devices to node in non-default iothread Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 24/28] test-bdrv-drain: Use bdrv_try_set_aio_context() Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 25/28] block: Remove bdrv_set_aio_context() Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 26/28] block/qcow2-refcount: add trace-point to qcow2_process_discards Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 27/28] block/io: bdrv_pdiscard: support int64_t bytes parameter Kevin Wolf
2019-06-03 15:02 ` [Qemu-devel] [PULL 28/28] iotests: Fix duplicated diff output on failure Kevin Wolf
2019-06-03 16:00 ` [Qemu-devel] [PULL 00/28] Block layer patches Peter Maydell
2019-06-03 19:27 ` no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190603150233.6614-13-kwolf@redhat.com \
    --to=kwolf@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.