All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes
@ 2017-05-09  9:35 Paolo Bonzini
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 1/4] curl: strengthen assertion in curl_clean_state Paolo Bonzini
                   ` (4 more replies)
  0 siblings, 5 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-05-09  9:35 UTC (permalink / raw)
  To: qemu-devel; +Cc: qemu-stable, jcody, rjones

This is the full version of the simple patch:

@@ -473,7 +475,9 @@
             break;
         }
         if (!state) {
+            qemu_mutex_unlock(&s->mutex);
             aio_poll(bdrv_get_aio_context(bs), true);
+            qemu_mutex_lock(&s->mutex);
         }
     } while(!state);

that was tested by Richard last week.  Richard, please retest with your test
case.

Thanks,

Paolo

Paolo Bonzini (4):
  curl: strengthen assertion in curl_clean_state
  curl: never invoke callbacks with s->mutex held
  curl: avoid recursive locking of BDRVCURLState mutex
  curl: improve search for unused CURLState

 block/curl.c | 78 ++++++++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 55 insertions(+), 23 deletions(-)

-- 
2.12.2

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Qemu-devel] [PATCH 1/4] curl: strengthen assertion in curl_clean_state
  2017-05-09  9:35 [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Paolo Bonzini
@ 2017-05-09  9:35 ` Paolo Bonzini
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 2/4] curl: never invoke callbacks with s->mutex held Paolo Bonzini
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-05-09  9:35 UTC (permalink / raw)
  To: qemu-devel; +Cc: qemu-stable, jcody, rjones

curl_clean_state should only be called after all AIOCBs have been
completed.  This is not so obvious for the call from curl_detach_aio_context,
so assert that.

Cc: qemu-stable@nongnu.org
Cc: jcody@redhat.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 block/curl.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/block/curl.c b/block/curl.c
index 2708d57c2f..25a301e7b4 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -532,6 +532,11 @@ static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s)
 
 static void curl_clean_state(CURLState *s)
 {
+    int j;
+    for (j = 0; j < CURL_NUM_ACB; j++) {
+        assert(!s->acb[j]);
+    }
+
     if (s->s->multi)
         curl_multi_remove_handle(s->s->multi, s->curl);
 
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [Qemu-devel] [PATCH 2/4] curl: never invoke callbacks with s->mutex held
  2017-05-09  9:35 [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Paolo Bonzini
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 1/4] curl: strengthen assertion in curl_clean_state Paolo Bonzini
@ 2017-05-09  9:35 ` Paolo Bonzini
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 3/4] curl: avoid recursive locking of BDRVCURLState mutex Paolo Bonzini
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-05-09  9:35 UTC (permalink / raw)
  To: qemu-devel; +Cc: qemu-stable, jcody, rjones

All curl callbacks go through curl_multi_do, and hence are called with
s->mutex held.  Note that with comments, and make curl_read_cb drop the
lock before invoking the callback.

Likewise for curl_find_buf, where the callback can be invoked by the
caller.

Cc: qemu-stable@nongnu.org
Cc: jcody@redhat.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 block/curl.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/block/curl.c b/block/curl.c
index 25a301e7b4..a362c1b2a6 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -147,6 +147,7 @@ static void curl_multi_do(void *arg);
 static void curl_multi_read(void *arg);
 
 #ifdef NEED_CURL_TIMER_CALLBACK
+/* Called from curl_multi_do_locked, with s->mutex held.  */
 static int curl_timer_cb(CURLM *multi, long timeout_ms, void *opaque)
 {
     BDRVCURLState *s = opaque;
@@ -163,6 +164,7 @@ static int curl_timer_cb(CURLM *multi, long timeout_ms, void *opaque)
 }
 #endif
 
+/* Called from curl_multi_do_locked, with s->mutex held.  */
 static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
                         void *userp, void *sp)
 {
@@ -212,6 +214,7 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
     return 0;
 }
 
+/* Called from curl_multi_do_locked, with s->mutex held.  */
 static size_t curl_header_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
 {
     BDRVCURLState *s = opaque;
@@ -226,6 +229,7 @@ static size_t curl_header_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
     return realsize;
 }
 
+/* Called from curl_multi_do_locked, with s->mutex held.  */
 static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
 {
     CURLState *s = ((CURLState*)opaque);
@@ -264,7 +268,9 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
                                   request_length - offset);
             }
 
+            qemu_mutex_unlock(&s->s->mutex);
             acb->common.cb(acb->common.opaque, 0);
+            qemu_mutex_lock(&s->s->mutex);
             qemu_aio_unref(acb);
             s->acb[i] = NULL;
         }
@@ -305,8 +311,6 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
             if (clamped_len < len) {
                 qemu_iovec_memset(acb->qiov, clamped_len, 0, len - clamped_len);
             }
-            acb->common.cb(acb->common.opaque, 0);
-
             return FIND_RET_OK;
         }
 
@@ -832,8 +836,8 @@ static void curl_readv_bh_cb(void *p)
     // we can just call the callback and be done.
     switch (curl_find_buf(s, start, acb->nb_sectors * BDRV_SECTOR_SIZE, acb)) {
         case FIND_RET_OK:
-            qemu_aio_unref(acb);
-            // fall through
+            ret = 0;
+            goto out;
         case FIND_RET_WAIT:
             goto out;
         default:
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [Qemu-devel] [PATCH 3/4] curl: avoid recursive locking of BDRVCURLState mutex
  2017-05-09  9:35 [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Paolo Bonzini
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 1/4] curl: strengthen assertion in curl_clean_state Paolo Bonzini
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 2/4] curl: never invoke callbacks with s->mutex held Paolo Bonzini
@ 2017-05-09  9:35 ` Paolo Bonzini
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 4/4] curl: improve search for unused CURLState Paolo Bonzini
  2017-05-09 10:15 ` [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Richard W.M. Jones
  4 siblings, 0 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-05-09  9:35 UTC (permalink / raw)
  To: qemu-devel; +Cc: qemu-stable, jcody, rjones

The curl driver has a ugly hack where, if it cannot find an empty CURLState,
it just uses aio_poll to wait for one to be empty.  This is probably
buggy when used together with dataplane, and a fix is in the next patch.

A more immediate effect of the bug however is that it can cause a
recursive call to curl_readv_bh_cb and recursively taking the
BDRVCURLState mutex.  This causes a deadlock.

The fix is to unlock the mutex around aio_poll, but for cleanliness we
should also take the mutex around all calls to curl_init_state, even if
reaching the unlock/lock pair is impossible.  The same is true for
curl_clean_state.

Reported-by: Richard W.M. Jones <rjones@redhat.com>
Cc: jcody@redhat.com
Cc: qemu-stable@nongnu.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 block/curl.c | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/block/curl.c b/block/curl.c
index a362c1b2a6..e8fcc5ca34 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -281,6 +281,7 @@ read_end:
     return size * nmemb;
 }
 
+/* Called with s->mutex held.  */
 static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
                          CURLAIOCB *acb)
 {
@@ -453,6 +454,7 @@ static void curl_multi_timeout_do(void *arg)
 #endif
 }
 
+/* Called with s->mutex held.  */
 static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s)
 {
     CURLState *state = NULL;
@@ -471,7 +473,9 @@ static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s)
             break;
         }
         if (!state) {
+            qemu_mutex_unlock(&s->mutex);
             aio_poll(bdrv_get_aio_context(bs), true);
+            qemu_mutex_lock(&s->mutex);
         }
     } while(!state);
 
@@ -534,6 +538,7 @@ static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s)
     return state;
 }
 
+/* Called with s->mutex held.  */
 static void curl_clean_state(CURLState *s)
 {
     int j;
@@ -565,6 +570,7 @@ static void curl_detach_aio_context(BlockDriverState *bs)
     BDRVCURLState *s = bs->opaque;
     int i;
 
+    qemu_mutex_lock(&s->mutex);
     for (i = 0; i < CURL_NUM_STATES; i++) {
         if (s->states[i].in_use) {
             curl_clean_state(&s->states[i]);
@@ -580,6 +586,7 @@ static void curl_detach_aio_context(BlockDriverState *bs)
         curl_multi_cleanup(s->multi);
         s->multi = NULL;
     }
+    qemu_mutex_unlock(&s->mutex);
 
     timer_del(&s->timer);
 }
@@ -745,9 +752,12 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
     }
 
     DPRINTF("CURL: Opening %s\n", file);
+    qemu_mutex_init(&s->mutex);
     s->aio_context = bdrv_get_aio_context(bs);
     s->url = g_strdup(file);
+    qemu_mutex_lock(&s->mutex);
     state = curl_init_state(bs, s);
+    qemu_mutex_unlock(&s->mutex);
     if (!state)
         goto out_noclean;
 
@@ -791,11 +801,12 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
     }
     DPRINTF("CURL: Size = %zd\n", s->len);
 
+    qemu_mutex_lock(&s->mutex);
     curl_clean_state(state);
+    qemu_mutex_unlock(&s->mutex);
     curl_easy_cleanup(state->curl);
     state->curl = NULL;
 
-    qemu_mutex_init(&s->mutex);
     curl_attach_aio_context(bs, bdrv_get_aio_context(bs));
 
     qemu_opts_del(opts);
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [Qemu-devel] [PATCH 4/4] curl: improve search for unused CURLState
  2017-05-09  9:35 [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Paolo Bonzini
                   ` (2 preceding siblings ...)
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 3/4] curl: avoid recursive locking of BDRVCURLState mutex Paolo Bonzini
@ 2017-05-09  9:35 ` Paolo Bonzini
  2017-05-09 10:15 ` [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Richard W.M. Jones
  4 siblings, 0 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-05-09  9:35 UTC (permalink / raw)
  To: qemu-devel; +Cc: qemu-stable, jcody, rjones

The main change here is to pull the search loop to a new function,
making it easier to switch from aio_poll to BDRV_POLL_WHILE.

However, the "for (j...)" loop is also dead (there is no other
read of "j" outside the loop), so remove it.

Cc: jcody@redhat.com
Cc: qemu-stable@nongnu.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 block/curl.c | 52 ++++++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 20 deletions(-)

diff --git a/block/curl.c b/block/curl.c
index e8fcc5ca34..79e504a2cc 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -455,29 +455,41 @@ static void curl_multi_timeout_do(void *arg)
 }
 
 /* Called with s->mutex held.  */
+static bool curl_find_unused_state_locked(BDRVCURLState *s, CURLState **state)
+{
+    int i;
+
+    for (i = 0; i < CURL_NUM_STATES; i++) {
+        if (!s->states[i].in_use) {
+            s->states[i].in_use = 1;
+            *state = &s->states[i];
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static bool curl_find_unused_state(BDRVCURLState *s, CURLState **state)
+{
+    bool ret;
+
+    qemu_mutex_lock(&s->mutex);
+    ret = curl_find_unused_state_locked(s, state);
+    qemu_mutex_unlock(&s->mutex);
+    return ret;
+}
+
+/* Called with s->mutex held.  */
 static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s)
 {
     CURLState *state = NULL;
-    int i, j;
-
-    do {
-        for (i=0; i<CURL_NUM_STATES; i++) {
-            for (j=0; j<CURL_NUM_ACB; j++)
-                if (s->states[i].acb[j])
-                    continue;
-            if (s->states[i].in_use)
-                continue;
-
-            state = &s->states[i];
-            state->in_use = 1;
-            break;
-        }
-        if (!state) {
-            qemu_mutex_unlock(&s->mutex);
-            aio_poll(bdrv_get_aio_context(bs), true);
-            qemu_mutex_lock(&s->mutex);
-        }
-    } while(!state);
+
+    if (!curl_find_unused_state_locked(s, &state)) {
+        qemu_mutex_unlock(&s->mutex);
+        BDRV_POLL_WHILE(bs, !curl_find_unused_state(s, &state));
+        qemu_mutex_lock(&s->mutex);
+    }
 
     if (!state->curl) {
         state->curl = curl_easy_init();
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes
  2017-05-09  9:35 [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Paolo Bonzini
                   ` (3 preceding siblings ...)
  2017-05-09  9:35 ` [Qemu-devel] [PATCH 4/4] curl: improve search for unused CURLState Paolo Bonzini
@ 2017-05-09 10:15 ` Richard W.M. Jones
  2017-05-09 16:03   ` Jeff Cody
  4 siblings, 1 reply; 9+ messages in thread
From: Richard W.M. Jones @ 2017-05-09 10:15 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, qemu-stable, jcody


No I'm afraid this patch series does not fix the bug.

The stack trace is below.

Rich.

Thread 4 (Thread 0x7f8595cf1700 (LWP 11235)):
#0  0x00007f86348e6700 in do_futex_wait () at /lib64/libpthread.so.0
#1  0x00007f86348e6813 in __new_sem_wait_slow () at /lib64/libpthread.so.0
#2  0x00005610e458519f in qemu_sem_timedwait (sem=sem@entry=0x5610e5db7508, ms=ms@entry=10000) at util/qemu-thread-posix.c:255
#3  0x00005610e458043c in worker_thread (opaque=0x5610e5db74a0)
    at util/thread-pool.c:92
#4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
#5  0x00007f862e830e0f in clone () at /lib64/libc.so.6

Thread 3 (Thread 0x7f8621347700 (LWP 10865)):
#0  0x00007f862e826837 in ioctl () at /lib64/libc.so.6
#1  0x00005610e4216387 in kvm_vcpu_ioctl (cpu=cpu@entry=0x5610e60f2030, type=type@entry=44672) at /home/rjones/d/qemu/kvm-all.c:2154
#2  0x00005610e42164be in kvm_cpu_exec (cpu=cpu@entry=0x5610e60f2030)
    at /home/rjones/d/qemu/kvm-all.c:1992
#3  0x00005610e4202f94 in qemu_kvm_cpu_thread_fn (arg=0x5610e60f2030)
    at /home/rjones/d/qemu/cpus.c:1118
#4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
#5  0x00007f862e830e0f in clone () at /lib64/libc.so.6

Thread 2 (Thread 0x7f8626559700 (LWP 10863)):
#0  0x00007f862e82b7a9 in syscall () at /lib64/libc.so.6
#1  0x00005610e4585325 in qemu_futex_wait (val=<optimized out>, f=<optimized out>) at /home/rjones/d/qemu/include/qemu/futex.h:26
#2  0x00005610e4585325 in qemu_event_wait (ev=ev@entry=0x5610e5019ee4 <rcu_call_ready_event>) at util/qemu-thread-posix.c:399
#3  0x00005610e459539e in call_rcu_thread (opaque=<optimized out>)
    at util/rcu.c:249
#4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
#5  0x00007f862e830e0f in clone () at /lib64/libc.so.6

Thread 1 (Thread 0x7f863757dc80 (LWP 10861)):
#0  0x00007f862e824dc6 in ppoll () at /lib64/libc.so.6
#1  0x00005610e4580ea9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
#2  0x00005610e4580ea9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=timeout@entry=-1) at util/qemu-timer.c:322
#3  0x00005610e4582af4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:622
#4  0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
#5  0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e78afb70)
    at block/curl.c:871
#6  0x00005610e457fb1e in aio_bh_call (bh=0x5610e6128f10) at util/async.c:90
#7  0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
    at util/async.c:118
#8  0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
#9  0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
#10 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e6f01380)
    at block/curl.c:871
#11 0x00005610e457fb1e in aio_bh_call (bh=0x5610e6e1fff0) at util/async.c:90
#12 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
    at util/async.c:118
#13 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
#14 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
#15 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e77d6ec0)
    at block/curl.c:871
#16 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7229960) at util/async.c:90
#17 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
    at util/async.c:118
#18 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
#19 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
#20 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7877220)
    at block/curl.c:871
#21 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7054e20) at util/async.c:90
#22 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
    at util/async.c:118
#23 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
#24 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
#25 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7903cb0)
    at block/curl.c:871
#26 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7903d10) at util/async.c:90
#27 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
    at util/async.c:118
#28 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
#29 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
#30 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7146fb0)
    at block/curl.c:871
#31 0x00005610e457fb1e in aio_bh_call (bh=0x5610e70593d0) at util/async.c:90
#32 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
    at util/async.c:118
#33 0x00005610e45829c0 in aio_dispatch (ctx=0x5610e5d92860)
    at util/aio-posix.c:429
#34 0x00005610e457f9fe in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:261
#35 0x00007f863235d1d7 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#36 0x00005610e4581c17 in glib_pollfds_poll () at util/main-loop.c:213
#37 0x00005610e4581c17 in os_host_main_loop_wait (timeout=<optimized out>)
    at util/main-loop.c:261
#38 0x00005610e4581c17 in main_loop_wait (nonblocking=<optimized out>)
    at util/main-loop.c:517
#39 0x00005610e41c3ad1 in main_loop () at vl.c:1899
#40 0x00005610e41c3ad1 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4719


-- 
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
Read my programming and virtualization blog: http://rwmj.wordpress.com
virt-p2v converts physical machines to virtual machines.  Boot with a
live CD or over the network (PXE) and turn machines into KVM guests.
http://libguestfs.org/virt-v2v

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes
  2017-05-09 10:15 ` [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Richard W.M. Jones
@ 2017-05-09 16:03   ` Jeff Cody
  2017-05-09 16:06     ` Paolo Bonzini
  2017-05-09 16:15     ` Richard W.M. Jones
  0 siblings, 2 replies; 9+ messages in thread
From: Jeff Cody @ 2017-05-09 16:03 UTC (permalink / raw)
  To: Richard W.M. Jones; +Cc: Paolo Bonzini, qemu-devel, qemu-stable

On Tue, May 09, 2017 at 11:15:06AM +0100, Richard W.M. Jones wrote:
> 
> No I'm afraid this patch series does not fix the bug.
> 
> The stack trace is below.
> 
> Rich.
> 

I'm looking through qemu-defel, and I'm not finding a reference to the bug
mentioned.  Maybe I'm just missing it... can you describe the bug or point
me to the relevant email thread?

Thanks,
Jeff

> Thread 4 (Thread 0x7f8595cf1700 (LWP 11235)):
> #0  0x00007f86348e6700 in do_futex_wait () at /lib64/libpthread.so.0
> #1  0x00007f86348e6813 in __new_sem_wait_slow () at /lib64/libpthread.so.0
> #2  0x00005610e458519f in qemu_sem_timedwait (sem=sem@entry=0x5610e5db7508, ms=ms@entry=10000) at util/qemu-thread-posix.c:255
> #3  0x00005610e458043c in worker_thread (opaque=0x5610e5db74a0)
>     at util/thread-pool.c:92
> #4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
> #5  0x00007f862e830e0f in clone () at /lib64/libc.so.6
> 
> Thread 3 (Thread 0x7f8621347700 (LWP 10865)):
> #0  0x00007f862e826837 in ioctl () at /lib64/libc.so.6
> #1  0x00005610e4216387 in kvm_vcpu_ioctl (cpu=cpu@entry=0x5610e60f2030, type=type@entry=44672) at /home/rjones/d/qemu/kvm-all.c:2154
> #2  0x00005610e42164be in kvm_cpu_exec (cpu=cpu@entry=0x5610e60f2030)
>     at /home/rjones/d/qemu/kvm-all.c:1992
> #3  0x00005610e4202f94 in qemu_kvm_cpu_thread_fn (arg=0x5610e60f2030)
>     at /home/rjones/d/qemu/cpus.c:1118
> #4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
> #5  0x00007f862e830e0f in clone () at /lib64/libc.so.6
> 
> Thread 2 (Thread 0x7f8626559700 (LWP 10863)):
> #0  0x00007f862e82b7a9 in syscall () at /lib64/libc.so.6
> #1  0x00005610e4585325 in qemu_futex_wait (val=<optimized out>, f=<optimized out>) at /home/rjones/d/qemu/include/qemu/futex.h:26
> #2  0x00005610e4585325 in qemu_event_wait (ev=ev@entry=0x5610e5019ee4 <rcu_call_ready_event>) at util/qemu-thread-posix.c:399
> #3  0x00005610e459539e in call_rcu_thread (opaque=<optimized out>)
>     at util/rcu.c:249
> #4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
> #5  0x00007f862e830e0f in clone () at /lib64/libc.so.6
> 
> Thread 1 (Thread 0x7f863757dc80 (LWP 10861)):
> #0  0x00007f862e824dc6 in ppoll () at /lib64/libc.so.6
> #1  0x00005610e4580ea9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
> #2  0x00005610e4580ea9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=timeout@entry=-1) at util/qemu-timer.c:322
> #3  0x00005610e4582af4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:622
> #4  0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
> #5  0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e78afb70)
>     at block/curl.c:871
> #6  0x00005610e457fb1e in aio_bh_call (bh=0x5610e6128f10) at util/async.c:90
> #7  0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>     at util/async.c:118
> #8  0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
> #9  0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
> #10 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e6f01380)
>     at block/curl.c:871
> #11 0x00005610e457fb1e in aio_bh_call (bh=0x5610e6e1fff0) at util/async.c:90
> #12 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>     at util/async.c:118
> #13 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
> #14 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
> #15 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e77d6ec0)
>     at block/curl.c:871
> #16 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7229960) at util/async.c:90
> #17 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>     at util/async.c:118
> #18 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
> #19 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
> #20 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7877220)
>     at block/curl.c:871
> #21 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7054e20) at util/async.c:90
> #22 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>     at util/async.c:118
> #23 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
> #24 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
> #25 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7903cb0)
>     at block/curl.c:871
> #26 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7903d10) at util/async.c:90
> #27 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>     at util/async.c:118
> #28 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
> #29 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
> #30 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7146fb0)
>     at block/curl.c:871
> #31 0x00005610e457fb1e in aio_bh_call (bh=0x5610e70593d0) at util/async.c:90
> #32 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>     at util/async.c:118
> #33 0x00005610e45829c0 in aio_dispatch (ctx=0x5610e5d92860)
>     at util/aio-posix.c:429
> #34 0x00005610e457f9fe in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:261
> #35 0x00007f863235d1d7 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
> #36 0x00005610e4581c17 in glib_pollfds_poll () at util/main-loop.c:213
> #37 0x00005610e4581c17 in os_host_main_loop_wait (timeout=<optimized out>)
>     at util/main-loop.c:261
> #38 0x00005610e4581c17 in main_loop_wait (nonblocking=<optimized out>)
>     at util/main-loop.c:517
> #39 0x00005610e41c3ad1 in main_loop () at vl.c:1899
> #40 0x00005610e41c3ad1 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4719
> 
> 
> -- 
> Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
> Read my programming and virtualization blog: http://rwmj.wordpress.com
> virt-p2v converts physical machines to virtual machines.  Boot with a
> live CD or over the network (PXE) and turn machines into KVM guests.
> http://libguestfs.org/virt-v2v

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes
  2017-05-09 16:03   ` Jeff Cody
@ 2017-05-09 16:06     ` Paolo Bonzini
  2017-05-09 16:15     ` Richard W.M. Jones
  1 sibling, 0 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-05-09 16:06 UTC (permalink / raw)
  To: Jeff Cody, Richard W.M. Jones; +Cc: qemu-devel, qemu-stable



On 09/05/2017 18:03, Jeff Cody wrote:
> On Tue, May 09, 2017 at 11:15:06AM +0100, Richard W.M. Jones wrote:
>>
>> No I'm afraid this patch series does not fix the bug.
>>
>> The stack trace is below.
>>
>> Rich.
>>
> 
> I'm looking through qemu-defel, and I'm not finding a reference to the bug
> mentioned.  Maybe I'm just missing it... can you describe the bug or point
> me to the relevant email thread?

"Re: [Qemu-devel] [PULL 1/4] curl: do not use aio_context_acquire/release"

FWIW, Rich reported that patches 1-3 do work.  I'll look into the 
problems with patch 4 tomorrow.

Paolo

> 
> Thanks,
> Jeff
> 
>> Thread 4 (Thread 0x7f8595cf1700 (LWP 11235)):
>> #0  0x00007f86348e6700 in do_futex_wait () at /lib64/libpthread.so.0
>> #1  0x00007f86348e6813 in __new_sem_wait_slow () at /lib64/libpthread.so.0
>> #2  0x00005610e458519f in qemu_sem_timedwait (sem=sem@entry=0x5610e5db7508, ms=ms@entry=10000) at util/qemu-thread-posix.c:255
>> #3  0x00005610e458043c in worker_thread (opaque=0x5610e5db74a0)
>>     at util/thread-pool.c:92
>> #4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
>> #5  0x00007f862e830e0f in clone () at /lib64/libc.so.6
>>
>> Thread 3 (Thread 0x7f8621347700 (LWP 10865)):
>> #0  0x00007f862e826837 in ioctl () at /lib64/libc.so.6
>> #1  0x00005610e4216387 in kvm_vcpu_ioctl (cpu=cpu@entry=0x5610e60f2030, type=type@entry=44672) at /home/rjones/d/qemu/kvm-all.c:2154
>> #2  0x00005610e42164be in kvm_cpu_exec (cpu=cpu@entry=0x5610e60f2030)
>>     at /home/rjones/d/qemu/kvm-all.c:1992
>> #3  0x00005610e4202f94 in qemu_kvm_cpu_thread_fn (arg=0x5610e60f2030)
>>     at /home/rjones/d/qemu/cpus.c:1118
>> #4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
>> #5  0x00007f862e830e0f in clone () at /lib64/libc.so.6
>>
>> Thread 2 (Thread 0x7f8626559700 (LWP 10863)):
>> #0  0x00007f862e82b7a9 in syscall () at /lib64/libc.so.6
>> #1  0x00005610e4585325 in qemu_futex_wait (val=<optimized out>, f=<optimized out>) at /home/rjones/d/qemu/include/qemu/futex.h:26
>> #2  0x00005610e4585325 in qemu_event_wait (ev=ev@entry=0x5610e5019ee4 <rcu_call_ready_event>) at util/qemu-thread-posix.c:399
>> #3  0x00005610e459539e in call_rcu_thread (opaque=<optimized out>)
>>     at util/rcu.c:249
>> #4  0x00007f86348dd36d in start_thread () at /lib64/libpthread.so.0
>> #5  0x00007f862e830e0f in clone () at /lib64/libc.so.6
>>
>> Thread 1 (Thread 0x7f863757dc80 (LWP 10861)):
>> #0  0x00007f862e824dc6 in ppoll () at /lib64/libc.so.6
>> #1  0x00005610e4580ea9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
>> #2  0x00005610e4580ea9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=timeout@entry=-1) at util/qemu-timer.c:322
>> #3  0x00005610e4582af4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:622
>> #4  0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
>> #5  0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e78afb70)
>>     at block/curl.c:871
>> #6  0x00005610e457fb1e in aio_bh_call (bh=0x5610e6128f10) at util/async.c:90
>> #7  0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>>     at util/async.c:118
>> #8  0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
>> #9  0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
>> #10 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e6f01380)
>>     at block/curl.c:871
>> #11 0x00005610e457fb1e in aio_bh_call (bh=0x5610e6e1fff0) at util/async.c:90
>> #12 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>>     at util/async.c:118
>> #13 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
>> #14 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
>> #15 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e77d6ec0)
>>     at block/curl.c:871
>> #16 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7229960) at util/async.c:90
>> #17 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>>     at util/async.c:118
>> #18 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
>> #19 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
>> #20 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7877220)
>>     at block/curl.c:871
>> #21 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7054e20) at util/async.c:90
>> #22 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>>     at util/async.c:118
>> #23 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
>> #24 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
>> #25 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7903cb0)
>>     at block/curl.c:871
>> #26 0x00005610e457fb1e in aio_bh_call (bh=0x5610e7903d10) at util/async.c:90
>> #27 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>>     at util/async.c:118
>> #28 0x00005610e4582cf4 in aio_poll (ctx=ctx@entry=0x5610e5d92860, blocking=<optimized out>) at util/aio-posix.c:682
>> #29 0x00007f8624848299 in curl_init_state (bs=<optimized out>, s=s@entry=0x5610e5defb10) at block/curl.c:490
>> #30 0x00007f8624848a51 in curl_readv_bh_cb (p=0x5610e7146fb0)
>>     at block/curl.c:871
>> #31 0x00005610e457fb1e in aio_bh_call (bh=0x5610e70593d0) at util/async.c:90
>> #32 0x00005610e457fb1e in aio_bh_poll (ctx=ctx@entry=0x5610e5d92860)
>>     at util/async.c:118
>> #33 0x00005610e45829c0 in aio_dispatch (ctx=0x5610e5d92860)
>>     at util/aio-posix.c:429
>> #34 0x00005610e457f9fe in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:261
>> #35 0x00007f863235d1d7 in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
>> #36 0x00005610e4581c17 in glib_pollfds_poll () at util/main-loop.c:213
>> #37 0x00005610e4581c17 in os_host_main_loop_wait (timeout=<optimized out>)
>>     at util/main-loop.c:261
>> #38 0x00005610e4581c17 in main_loop_wait (nonblocking=<optimized out>)
>>     at util/main-loop.c:517
>> #39 0x00005610e41c3ad1 in main_loop () at vl.c:1899
>> #40 0x00005610e41c3ad1 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4719
>>
>>
>> -- 
>> Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
>> Read my programming and virtualization blog: http://rwmj.wordpress.com
>> virt-p2v converts physical machines to virtual machines.  Boot with a
>> live CD or over the network (PXE) and turn machines into KVM guests.
>> http://libguestfs.org/virt-v2v

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes
  2017-05-09 16:03   ` Jeff Cody
  2017-05-09 16:06     ` Paolo Bonzini
@ 2017-05-09 16:15     ` Richard W.M. Jones
  1 sibling, 0 replies; 9+ messages in thread
From: Richard W.M. Jones @ 2017-05-09 16:15 UTC (permalink / raw)
  To: Jeff Cody; +Cc: Paolo Bonzini, qemu-devel, qemu-stable

On Tue, May 09, 2017 at 12:03:30PM -0400, Jeff Cody wrote:
> On Tue, May 09, 2017 at 11:15:06AM +0100, Richard W.M. Jones wrote:
> > 
> > No I'm afraid this patch series does not fix the bug.
> > 
> > The stack trace is below.
> > 
> > Rich.
> > 
> 
> I'm looking through qemu-defel, and I'm not finding a reference to the bug
> mentioned.  Maybe I'm just missing it... can you describe the bug or point
> me to the relevant email thread?

Hi Jeff, the bug is:

https://bugzilla.redhat.com/show_bug.cgi?id=1447590

Rich.

-- 
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
Read my programming and virtualization blog: http://rwmj.wordpress.com
virt-df lists disk usage of guests without needing to install any
software inside the virtual machine.  Supports Linux and Windows.
http://people.redhat.com/~rjones/virt-df/

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-05-09 16:15 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-09  9:35 [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Paolo Bonzini
2017-05-09  9:35 ` [Qemu-devel] [PATCH 1/4] curl: strengthen assertion in curl_clean_state Paolo Bonzini
2017-05-09  9:35 ` [Qemu-devel] [PATCH 2/4] curl: never invoke callbacks with s->mutex held Paolo Bonzini
2017-05-09  9:35 ` [Qemu-devel] [PATCH 3/4] curl: avoid recursive locking of BDRVCURLState mutex Paolo Bonzini
2017-05-09  9:35 ` [Qemu-devel] [PATCH 4/4] curl: improve search for unused CURLState Paolo Bonzini
2017-05-09 10:15 ` [Qemu-devel] [PATCH 0/4] curl: locking cleanups and fixes Richard W.M. Jones
2017-05-09 16:03   ` Jeff Cody
2017-05-09 16:06     ` Paolo Bonzini
2017-05-09 16:15     ` Richard W.M. Jones

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.