qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, peter.maydell@linaro.org, qemu-devel@nongnu.org
Subject: [Qemu-devel] [PULL 04/13] block: Add @drained_end_counter
Date: Fri, 19 Jul 2019 15:43:36 +0200	[thread overview]
Message-ID: <20190719134345.23526-5-kwolf@redhat.com> (raw)
In-Reply-To: <20190719134345.23526-1-kwolf@redhat.com>

From: Max Reitz <mreitz@redhat.com>

Callers can now pass a pointer to an integer that bdrv_drain_invoke()
(and its recursive callees) will increment for every
bdrv_drain_invoke_entry() operation they schedule.
bdrv_drain_invoke_entry() in turn will decrement it once it has invoked
BlockDriver.bdrv_co_drain_end().

We use atomic operations to access the pointee, because the
bdrv_do_drained_end() caller may wish to end drained sections for
multiple nodes in different AioContexts (bdrv_drain_all_end() does, for
example).

This is the first step to moving the polling for BdrvCoDrainData.done to
become true out of bdrv_drain_invoke() and into the root drained_end
function.

Signed-off-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 block/io.c | 58 +++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 40 insertions(+), 18 deletions(-)

diff --git a/block/io.c b/block/io.c
index 1e618f9a37..c42e18b068 100644
--- a/block/io.c
+++ b/block/io.c
@@ -194,6 +194,7 @@ typedef struct {
     bool poll;
     BdrvChild *parent;
     bool ignore_bds_parents;
+    int *drained_end_counter;
 } BdrvCoDrainData;
 
 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
@@ -211,13 +212,18 @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
     atomic_mb_set(&data->done, true);
     bdrv_dec_in_flight(bs);
 
-    if (data->begin) {
+    if (data->drained_end_counter) {
+        atomic_dec(data->drained_end_counter);
+    }
+
+    if (data->begin || data->drained_end_counter) {
         g_free(data);
     }
 }
 
 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
-static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
+static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
+                              int *drained_end_counter)
 {
     BdrvCoDrainData *data;
 
@@ -230,16 +236,25 @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
     *data = (BdrvCoDrainData) {
         .bs = bs,
         .done = false,
-        .begin = begin
+        .begin = begin,
+        .drained_end_counter = drained_end_counter,
     };
 
+    if (!begin && drained_end_counter) {
+        atomic_inc(drained_end_counter);
+    }
+
     /* Make sure the driver callback completes during the polling phase for
      * drain_begin. */
     bdrv_inc_in_flight(bs);
     data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
     aio_co_schedule(bdrv_get_aio_context(bs), data->co);
 
-    if (!begin) {
+    /*
+     * TODO: Drop this and make callers pass @drained_end_counter and poll
+     * themselves
+     */
+    if (!begin && !drained_end_counter) {
         BDRV_POLL_WHILE(bs, !data->done);
         g_free(data);
     }
@@ -281,7 +296,8 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
                                   BdrvChild *parent, bool ignore_bds_parents,
                                   bool poll);
 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
-                                BdrvChild *parent, bool ignore_bds_parents);
+                                BdrvChild *parent, bool ignore_bds_parents,
+                                int *drained_end_counter);
 
 static void bdrv_co_drain_bh_cb(void *opaque)
 {
@@ -308,7 +324,8 @@ static void bdrv_co_drain_bh_cb(void *opaque)
                                   data->ignore_bds_parents, data->poll);
         } else {
             bdrv_do_drained_end(bs, data->recursive, data->parent,
-                                data->ignore_bds_parents);
+                                data->ignore_bds_parents,
+                                data->drained_end_counter);
         }
         if (ctx == co_ctx) {
             aio_context_release(ctx);
@@ -326,7 +343,8 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
                                                 bool begin, bool recursive,
                                                 BdrvChild *parent,
                                                 bool ignore_bds_parents,
-                                                bool poll)
+                                                bool poll,
+                                                int *drained_end_counter)
 {
     BdrvCoDrainData data;
 
@@ -343,7 +361,9 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
         .parent = parent,
         .ignore_bds_parents = ignore_bds_parents,
         .poll = poll,
+        .drained_end_counter = drained_end_counter,
     };
+
     if (bs) {
         bdrv_inc_in_flight(bs);
     }
@@ -367,7 +387,7 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
     }
 
     bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
-    bdrv_drain_invoke(bs, true);
+    bdrv_drain_invoke(bs, true, NULL);
 }
 
 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
@@ -378,7 +398,7 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
 
     if (qemu_in_coroutine()) {
         bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
-                               poll);
+                               poll, NULL);
         return;
     }
 
@@ -419,20 +439,21 @@ void bdrv_subtree_drained_begin(BlockDriverState *bs)
 }
 
 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
-                                BdrvChild *parent, bool ignore_bds_parents)
+                                BdrvChild *parent, bool ignore_bds_parents,
+                                int *drained_end_counter)
 {
     BdrvChild *child, *next;
     int old_quiesce_counter;
 
     if (qemu_in_coroutine()) {
         bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
-                               false);
+                               false, drained_end_counter);
         return;
     }
     assert(bs->quiesce_counter > 0);
 
     /* Re-enable things in child-to-parent order */
-    bdrv_drain_invoke(bs, false);
+    bdrv_drain_invoke(bs, false, drained_end_counter);
     bdrv_parent_drained_end(bs, parent, ignore_bds_parents);
 
     old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
@@ -444,19 +465,20 @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
         assert(!ignore_bds_parents);
         bs->recursive_quiesce_counter--;
         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
-            bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents);
+            bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
+                                drained_end_counter);
         }
     }
 }
 
 void bdrv_drained_end(BlockDriverState *bs)
 {
-    bdrv_do_drained_end(bs, false, NULL, false);
+    bdrv_do_drained_end(bs, false, NULL, false, NULL);
 }
 
 void bdrv_subtree_drained_end(BlockDriverState *bs)
 {
-    bdrv_do_drained_end(bs, true, NULL, false);
+    bdrv_do_drained_end(bs, true, NULL, false, NULL);
 }
 
 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
@@ -473,7 +495,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
     int i;
 
     for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
-        bdrv_do_drained_end(child->bs, true, child, false);
+        bdrv_do_drained_end(child->bs, true, child, false, NULL);
     }
 }
 
@@ -543,7 +565,7 @@ void bdrv_drain_all_begin(void)
     BlockDriverState *bs = NULL;
 
     if (qemu_in_coroutine()) {
-        bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true);
+        bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
         return;
     }
 
@@ -579,7 +601,7 @@ void bdrv_drain_all_end(void)
         AioContext *aio_context = bdrv_get_aio_context(bs);
 
         aio_context_acquire(aio_context);
-        bdrv_do_drained_end(bs, false, NULL, true);
+        bdrv_do_drained_end(bs, false, NULL, true, NULL);
         aio_context_release(aio_context);
     }
 
-- 
2.20.1



  parent reply	other threads:[~2019-07-19 13:45 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-19 13:43 [Qemu-devel] [PULL 00/13] Block layer patches Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 01/13] iotests: Set read-zeroes on in null block driver for Valgrind Kevin Wolf
2019-07-24  7:18   ` Christian Borntraeger
2019-07-24  7:30     ` Andrey Shinkevich
2019-07-24  7:33       ` Christian Borntraeger
2019-07-24  7:37         ` Andrey Shinkevich
2019-07-24  7:38       ` Kevin Wolf
2019-07-24  7:57         ` Andrey Shinkevich
2019-07-24  8:05           ` Kevin Wolf
2019-07-24  8:23             ` Andrey Shinkevich
2019-07-19 13:43 ` [Qemu-devel] [PULL 02/13] block: Introduce BdrvChild.parent_quiesce_counter Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 03/13] tests: Add job commit by drained_end test Kevin Wolf
2019-07-19 13:43 ` Kevin Wolf [this message]
2019-07-19 13:43 ` [Qemu-devel] [PULL 05/13] block: Make bdrv_parent_drained_[^_]*() static Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 06/13] tests: Lock AioContexts in test-block-iothread Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 07/13] block: Do not poll in bdrv_do_drained_end() Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 08/13] tests: Extend commit by drained_end test Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 09/13] block: Loop unsafely in bdrv*drained_end() Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 10/13] iotests: Add @has_quit to vm.shutdown() Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 11/13] iotests: Test commit with a filter on the chain Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 12/13] vl: Drain before (block) job cancel when quitting Kevin Wolf
2019-07-19 13:43 ` [Qemu-devel] [PULL 13/13] iotests: Test quitting with job on throttled node Kevin Wolf
2019-07-22  9:11 ` [Qemu-devel] [PULL 00/13] Block layer patches Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190719134345.23526-5-kwolf@redhat.com \
    --to=kwolf@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).