All of lore.kernel.org
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Max Reitz <mreitz@redhat.com>, Kevin Wolf <kwolf@redhat.com>,
	qemu-block@nongnu.org, Paolo Bonzini <pbonzini@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Fam Zheng <famz@redhat.com>,
	"fuweiwei (C)" <fuweiwei2@huawei.com>
Subject: [Qemu-devel] [PATCH v2] block: make BDRV_POLL_WHILE() re-entrancy safe
Date: Wed,  7 Mar 2018 12:46:19 +0000	[thread overview]
Message-ID: <20180307124619.6218-1-stefanha@redhat.com> (raw)

Nested BDRV_POLL_WHILE() calls can occur.  Currently
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.

This patch converts the bool wait_->need_kick flag to an unsigned
wait_->num_waiters counter.

Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
the condition again after the inner caller completes (invoking the inner
caller counts as aio_poll() progress).

Reported-by: "fuweiwei (C)" <fuweiwei2@huawei.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
v2:
 * Rebase onto qemu.git/master now that AIO_WAIT_WHILE() has landed
   [Kevin]

 include/block/aio-wait.h | 61 ++++++++++++++++++++++++------------------------
 util/aio-wait.c          |  2 +-
 2 files changed, 31 insertions(+), 32 deletions(-)

diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index a48c744fa8..74cde07bef 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -50,8 +50,8 @@
  *   }
  */
 typedef struct {
-    /* Is the main loop waiting for a kick?  Accessed with atomic ops. */
-    bool need_kick;
+    /* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
+    unsigned num_waiters;
 } AioWait;
 
 /**
@@ -71,35 +71,34 @@ typedef struct {
  * wait on conditions between two IOThreads since that could lead to deadlock,
  * go via the main loop instead.
  */
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({                  \
-    bool waited_ = false;                                   \
-    bool busy_ = true;                                      \
-    AioWait *wait_ = (wait);                                \
-    AioContext *ctx_ = (ctx);                               \
-    if (in_aio_context_home_thread(ctx_)) {                 \
-        while ((cond) || busy_) {                           \
-            busy_ = aio_poll(ctx_, (cond));                 \
-            waited_ |= !!(cond) | busy_;                    \
-        }                                                   \
-    } else {                                                \
-        assert(qemu_get_current_aio_context() ==            \
-               qemu_get_aio_context());                     \
-        assert(!wait_->need_kick);                          \
-        /* Set wait_->need_kick before evaluating cond.  */ \
-        atomic_mb_set(&wait_->need_kick, true);             \
-        while (busy_) {                                     \
-            if ((cond)) {                                   \
-                waited_ = busy_ = true;                     \
-                aio_context_release(ctx_);                  \
-                aio_poll(qemu_get_aio_context(), true);     \
-                aio_context_acquire(ctx_);                  \
-            } else {                                        \
-                busy_ = aio_poll(ctx_, false);              \
-                waited_ |= busy_;                           \
-            }                                               \
-        }                                                   \
-        atomic_set(&wait_->need_kick, false);               \
-    }                                                       \
+#define AIO_WAIT_WHILE(wait, ctx, cond) ({                         \
+    bool waited_ = false;                                          \
+    bool busy_ = true;                                             \
+    AioWait *wait_ = (wait);                                       \
+    AioContext *ctx_ = (ctx);                                      \
+    if (in_aio_context_home_thread(ctx_)) {                        \
+        while ((cond) || busy_) {                                  \
+            busy_ = aio_poll(ctx_, (cond));                        \
+            waited_ |= !!(cond) | busy_;                           \
+        }                                                          \
+    } else {                                                       \
+        assert(qemu_get_current_aio_context() ==                   \
+               qemu_get_aio_context());                            \
+        /* Increment wait_->num_waiters before evaluating cond. */ \
+        atomic_inc(&wait_->num_waiters);                           \
+        while (busy_) {                                            \
+            if ((cond)) {                                          \
+                waited_ = busy_ = true;                            \
+                aio_context_release(ctx_);                         \
+                aio_poll(qemu_get_aio_context(), true);            \
+                aio_context_acquire(ctx_);                         \
+            } else {                                               \
+                busy_ = aio_poll(ctx_, false);                     \
+                waited_ |= busy_;                                  \
+            }                                                      \
+        }                                                          \
+        atomic_dec(&wait_->num_waiters);                           \
+    }                                                              \
     waited_; })
 
 /**
diff --git a/util/aio-wait.c b/util/aio-wait.c
index a487cdb852..be85a32273 100644
--- a/util/aio-wait.c
+++ b/util/aio-wait.c
@@ -34,7 +34,7 @@ static void dummy_bh_cb(void *opaque)
 void aio_wait_kick(AioWait *wait)
 {
     /* The barrier (or an atomic op) is in the caller.  */
-    if (atomic_read(&wait->need_kick)) {
+    if (atomic_read(&wait->num_waiters)) {
         aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
     }
 }
-- 
2.14.3

             reply	other threads:[~2018-03-07 12:46 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-07 12:46 Stefan Hajnoczi [this message]
2018-03-07 23:19 ` [Qemu-devel] [PATCH v2] block: make BDRV_POLL_WHILE() re-entrancy safe Eric Blake
2018-03-12 11:07 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180307124619.6218-1-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=famz@redhat.com \
    --cc=fuweiwei2@huawei.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.