All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v3 0/4] exec: Make bounce buffer thread safe
@ 2015-03-16  5:31 Fam Zheng
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer Fam Zheng
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Fam Zheng @ 2015-03-16  5:31 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini

v3: Address Paolo's comments:
    Use atomic_xchg for bounce buffer.
    Use mutex and BH for map_client_list.

The global bounce buffer used for non-direct memory access is not thread-safe:

 1) Access to "bounce" is not atomic.

 2) Access to "map_client_list" is not atomic.

 3) In dma_blk_cb, there is a race condition between:

        mem = dma_memory_map(...
    and
        cpu_register_map_client(...

    Bounce may become available after dma_memory_map failed but before
    cpu_register_map_client is called.

 4) The reschedule_dma is not in the right AioContext;
    continue_after_map_failure called from other threads will race with
    dma_aio_cancel.

This series fixes these issues respectively.


Fam Zheng (4):
  exec: Atomic access to bounce buffer
  exec: Protect map_client_list with mutex
  exec: Notify cpu_register_map_client caller if the bounce buffer is
    available
  dma-helpers: Fix race condition of continue_after_map_failure and
    dma_aio_cancel

 dma-helpers.c             | 17 +++++------
 exec.c                    | 77 ++++++++++++++++++++++++++++++++---------------
 include/exec/cpu-common.h |  3 +-
 3 files changed, 62 insertions(+), 35 deletions(-)

-- 
1.9.3

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer
  2015-03-16  5:31 [Qemu-devel] [PATCH v3 0/4] exec: Make bounce buffer thread safe Fam Zheng
@ 2015-03-16  5:31 ` Fam Zheng
  2015-03-16  7:30   ` Paolo Bonzini
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 2/4] exec: Protect map_client_list with mutex Fam Zheng
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 12+ messages in thread
From: Fam Zheng @ 2015-03-16  5:31 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini

There could be a race condition when two processes call
address_space_map concurrently and both want to use the bounce buffer.

Add an in_use flag in BounceBuffer to sync it.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 exec.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/exec.c b/exec.c
index e97071a..4080044 100644
--- a/exec.c
+++ b/exec.c
@@ -2483,6 +2483,7 @@ typedef struct {
     void *buffer;
     hwaddr addr;
     hwaddr len;
+    bool in_use;
 } BounceBuffer;
 
 static BounceBuffer bounce;
@@ -2571,9 +2572,10 @@ void *address_space_map(AddressSpace *as,
     l = len;
     mr = address_space_translate(as, addr, &xlat, &l, is_write);
     if (!memory_access_is_direct(mr, is_write)) {
-        if (bounce.buffer) {
+        if (atomic_xchg(&bounce.in_use, true)) {
             return NULL;
         }
+        smp_mb();
         /* Avoid unbounded allocations */
         l = MIN(l, TARGET_PAGE_SIZE);
         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
@@ -2641,6 +2643,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
     qemu_vfree(bounce.buffer);
     bounce.buffer = NULL;
     memory_region_unref(bounce.mr);
+    atomic_mb_set(&bounce.in_use, false);
     cpu_notify_map_clients();
 }
 
-- 
1.9.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [Qemu-devel] [PATCH v3 2/4] exec: Protect map_client_list with mutex
  2015-03-16  5:31 [Qemu-devel] [PATCH v3 0/4] exec: Make bounce buffer thread safe Fam Zheng
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer Fam Zheng
@ 2015-03-16  5:31 ` Fam Zheng
  2015-03-16  7:33   ` Paolo Bonzini
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 3/4] exec: Notify cpu_register_map_client caller if the bounce buffer is available Fam Zheng
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel Fam Zheng
  3 siblings, 1 reply; 12+ messages in thread
From: Fam Zheng @ 2015-03-16  5:31 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini

So that accesses from multiple threads are safe.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 exec.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/exec.c b/exec.c
index 4080044..3e54580 100644
--- a/exec.c
+++ b/exec.c
@@ -429,15 +429,6 @@ address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
 }
 #endif
 
-void cpu_exec_init_all(void)
-{
-#if !defined(CONFIG_USER_ONLY)
-    qemu_mutex_init(&ram_list.mutex);
-    memory_map_init();
-    io_mem_init();
-#endif
-}
-
 #if !defined(CONFIG_USER_ONLY)
 
 static int cpu_common_post_load(void *opaque, int version_id)
@@ -2494,6 +2485,7 @@ typedef struct MapClient {
     QLIST_ENTRY(MapClient) link;
 } MapClient;
 
+QemuMutex map_client_list_lock;
 static QLIST_HEAD(map_client_list, MapClient) map_client_list
     = QLIST_HEAD_INITIALIZER(map_client_list);
 
@@ -2501,12 +2493,24 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
 {
     MapClient *client = g_malloc(sizeof(*client));
 
+    qemu_mutex_lock(&map_client_list_lock);
     client->opaque = opaque;
     client->callback = callback;
     QLIST_INSERT_HEAD(&map_client_list, client, link);
+    qemu_mutex_unlock(&map_client_list_lock);
     return client;
 }
 
+void cpu_exec_init_all(void)
+{
+#if !defined(CONFIG_USER_ONLY)
+    qemu_mutex_init(&ram_list.mutex);
+    memory_map_init();
+    io_mem_init();
+#endif
+    qemu_mutex_init(&map_client_list_lock);
+}
+
 static void cpu_unregister_map_client(void *_client)
 {
     MapClient *client = (MapClient *)_client;
@@ -2519,11 +2523,13 @@ static void cpu_notify_map_clients(void)
 {
     MapClient *client;
 
+    qemu_mutex_lock(&map_client_list_lock);
     while (!QLIST_EMPTY(&map_client_list)) {
         client = QLIST_FIRST(&map_client_list);
         client->callback(client->opaque);
         cpu_unregister_map_client(client);
     }
+    qemu_mutex_unlock(&map_client_list_lock);
 }
 
 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
-- 
1.9.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [Qemu-devel] [PATCH v3 3/4] exec: Notify cpu_register_map_client caller if the bounce buffer is available
  2015-03-16  5:31 [Qemu-devel] [PATCH v3 0/4] exec: Make bounce buffer thread safe Fam Zheng
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer Fam Zheng
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 2/4] exec: Protect map_client_list with mutex Fam Zheng
@ 2015-03-16  5:31 ` Fam Zheng
  2015-03-16  7:34   ` Paolo Bonzini
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel Fam Zheng
  3 siblings, 1 reply; 12+ messages in thread
From: Fam Zheng @ 2015-03-16  5:31 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini

The caller's workflow is like

    if (!address_space_map()) {
        ...
        cpu_register_map_client();
    }

If bounce buffer became available after address_space_map() but before
cpu_register_map_client(), the caller could miss it and has to wait for the
next bounce buffer notify, which may never happen in the worse case.

Just notify the list in cpu_register_map_client().

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 exec.c | 22 +++++++++++++++-------
 1 file changed, 15 insertions(+), 7 deletions(-)

diff --git a/exec.c b/exec.c
index 3e54580..20381a0 100644
--- a/exec.c
+++ b/exec.c
@@ -2489,6 +2489,17 @@ QemuMutex map_client_list_lock;
 static QLIST_HEAD(map_client_list, MapClient) map_client_list
     = QLIST_HEAD_INITIALIZER(map_client_list);
 
+static void cpu_notify_map_clients_unlocked(void)
+{
+    MapClient *client;
+
+    while (!QLIST_EMPTY(&map_client_list)) {
+        client = QLIST_FIRST(&map_client_list);
+        client->callback(client->opaque);
+        cpu_unregister_map_client(client);
+    }
+}
+
 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
 {
     MapClient *client = g_malloc(sizeof(*client));
@@ -2497,6 +2508,9 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
     client->opaque = opaque;
     client->callback = callback;
     QLIST_INSERT_HEAD(&map_client_list, client, link);
+    if (!atomic_read(&bounce.in_use)) {
+        cpu_notify_map_clients_unlocked();
+    }
     qemu_mutex_unlock(&map_client_list_lock);
     return client;
 }
@@ -2521,14 +2535,8 @@ static void cpu_unregister_map_client(void *_client)
 
 static void cpu_notify_map_clients(void)
 {
-    MapClient *client;
-
     qemu_mutex_lock(&map_client_list_lock);
-    while (!QLIST_EMPTY(&map_client_list)) {
-        client = QLIST_FIRST(&map_client_list);
-        client->callback(client->opaque);
-        cpu_unregister_map_client(client);
-    }
+    cpu_notify_map_clients_unlocked();
     qemu_mutex_unlock(&map_client_list_lock);
 }
 
-- 
1.9.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel
  2015-03-16  5:31 [Qemu-devel] [PATCH v3 0/4] exec: Make bounce buffer thread safe Fam Zheng
                   ` (2 preceding siblings ...)
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 3/4] exec: Notify cpu_register_map_client caller if the bounce buffer is available Fam Zheng
@ 2015-03-16  5:31 ` Fam Zheng
  2015-03-16  7:36   ` Paolo Bonzini
  3 siblings, 1 reply; 12+ messages in thread
From: Fam Zheng @ 2015-03-16  5:31 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini

If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:

     continue_after_map_failure               dma_aio_cancel
     ------------------------------------------------------------------
     aio_bh_new
                                              qemu_bh_delete
     qemu_bh_schedule (use after free)

Also, the old code doesn't run the bh in the right AioContext.

Fix both problems by passing a QEMUBH to cpu_register_map_client.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 dma-helpers.c             | 17 ++++++++---------
 exec.c                    | 32 +++++++++++++++++++++-----------
 include/exec/cpu-common.h |  3 ++-
 3 files changed, 31 insertions(+), 21 deletions(-)

diff --git a/dma-helpers.c b/dma-helpers.c
index 6918572..1fddf6a 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque)
     dma_blk_cb(dbs, 0);
 }
 
-static void continue_after_map_failure(void *opaque)
-{
-    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
-
-    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
-    qemu_bh_schedule(dbs->bh);
-}
-
 static void dma_blk_unmap(DMAAIOCB *dbs)
 {
     int i;
@@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret)
 
     if (dbs->iov.size == 0) {
         trace_dma_map_wait(dbs);
-        cpu_register_map_client(dbs, continue_after_map_failure);
+        dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
+                             reschedule_dma, dbs);
+        cpu_register_map_client(dbs->bh);
         return;
     }
 
@@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
     if (dbs->acb) {
         blk_aio_cancel_async(dbs->acb);
     }
+    if (dbs->bh) {
+        cpu_unregister_map_client(dbs->bh);
+        qemu_bh_delete(dbs->bh);
+        dbs->bh = NULL;
+    }
 }
 
 
diff --git a/exec.c b/exec.c
index 20381a0..b15ca5e 100644
--- a/exec.c
+++ b/exec.c
@@ -2480,8 +2480,7 @@ typedef struct {
 static BounceBuffer bounce;
 
 typedef struct MapClient {
-    void *opaque;
-    void (*callback)(void *opaque);
+    QEMUBH *bh;
     QLIST_ENTRY(MapClient) link;
 } MapClient;
 
@@ -2489,30 +2488,29 @@ QemuMutex map_client_list_lock;
 static QLIST_HEAD(map_client_list, MapClient) map_client_list
     = QLIST_HEAD_INITIALIZER(map_client_list);
 
+static void cpu_unregister_map_client_do(MapClient *client);
 static void cpu_notify_map_clients_unlocked(void)
 {
     MapClient *client;
 
     while (!QLIST_EMPTY(&map_client_list)) {
         client = QLIST_FIRST(&map_client_list);
-        client->callback(client->opaque);
-        cpu_unregister_map_client(client);
+        qemu_bh_schedule(client->bh);
+        cpu_unregister_map_client_do(client);
     }
 }
 
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+void cpu_register_map_client(QEMUBH *bh)
 {
     MapClient *client = g_malloc(sizeof(*client));
 
     qemu_mutex_lock(&map_client_list_lock);
-    client->opaque = opaque;
-    client->callback = callback;
+    client->bh = bh;
     QLIST_INSERT_HEAD(&map_client_list, client, link);
     if (!atomic_read(&bounce.in_use)) {
         cpu_notify_map_clients_unlocked();
     }
     qemu_mutex_unlock(&map_client_list_lock);
-    return client;
 }
 
 void cpu_exec_init_all(void)
@@ -2525,14 +2523,26 @@ void cpu_exec_init_all(void)
     qemu_mutex_init(&map_client_list_lock);
 }
 
-static void cpu_unregister_map_client(void *_client)
+static void cpu_unregister_map_client_do(MapClient *client)
 {
-    MapClient *client = (MapClient *)_client;
-
     QLIST_REMOVE(client, link);
     g_free(client);
 }
 
+void cpu_unregister_map_client(QEMUBH *bh)
+{
+    MapClient *client;
+
+    qemu_mutex_lock(&map_client_list_lock);
+    QLIST_FOREACH(client, &map_client_list, link) {
+        if (client->bh == bh) {
+            cpu_unregister_map_client_do(client);
+            break;
+        }
+    }
+    qemu_mutex_unlock(&map_client_list_lock);
+}
+
 static void cpu_notify_map_clients(void)
 {
     qemu_mutex_lock(&map_client_list_lock);
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index fcc3162..43428bd 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr,
                               int is_write);
 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
                                int is_write, hwaddr access_len);
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
+void cpu_register_map_client(QEMUBH *bh);
+void cpu_unregister_map_client(QEMUBH *bh);
 
 bool cpu_physical_memory_is_io(hwaddr phys_addr);
 
-- 
1.9.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer Fam Zheng
@ 2015-03-16  7:30   ` Paolo Bonzini
  2015-03-16  7:42     ` Fam Zheng
  0 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2015-03-16  7:30 UTC (permalink / raw)
  To: Fam Zheng, qemu-devel



On 16/03/2015 06:31, Fam Zheng wrote:
> There could be a race condition when two processes call
> address_space_map concurrently and both want to use the bounce buffer.
> 
> Add an in_use flag in BounceBuffer to sync it.
> 
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  exec.c | 5 ++++-
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/exec.c b/exec.c
> index e97071a..4080044 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2483,6 +2483,7 @@ typedef struct {
>      void *buffer;
>      hwaddr addr;
>      hwaddr len;
> +    bool in_use;
>  } BounceBuffer;
>  
>  static BounceBuffer bounce;
> @@ -2571,9 +2572,10 @@ void *address_space_map(AddressSpace *as,
>      l = len;
>      mr = address_space_translate(as, addr, &xlat, &l, is_write);
>      if (!memory_access_is_direct(mr, is_write)) {
> -        if (bounce.buffer) {
> +        if (atomic_xchg(&bounce.in_use, true)) {
>              return NULL;
>          }
> +        smp_mb();

smp_mb() not needed.

Ok with this change.

Paolo

>          /* Avoid unbounded allocations */
>          l = MIN(l, TARGET_PAGE_SIZE);
>          bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
> @@ -2641,6 +2643,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
>      qemu_vfree(bounce.buffer);
>      bounce.buffer = NULL;
>      memory_region_unref(bounce.mr);
> +    atomic_mb_set(&bounce.in_use, false);
>      cpu_notify_map_clients();
>  }
>  
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/4] exec: Protect map_client_list with mutex
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 2/4] exec: Protect map_client_list with mutex Fam Zheng
@ 2015-03-16  7:33   ` Paolo Bonzini
  2015-03-16  7:55     ` Fam Zheng
  0 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2015-03-16  7:33 UTC (permalink / raw)
  To: Fam Zheng, qemu-devel



On 16/03/2015 06:31, Fam Zheng wrote:
> So that accesses from multiple threads are safe.
> 
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  exec.c | 24 +++++++++++++++---------
>  1 file changed, 15 insertions(+), 9 deletions(-)
> 
> diff --git a/exec.c b/exec.c
> index 4080044..3e54580 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -429,15 +429,6 @@ address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
>  }
>  #endif
>  
> -void cpu_exec_init_all(void)
> -{
> -#if !defined(CONFIG_USER_ONLY)
> -    qemu_mutex_init(&ram_list.mutex);
> -    memory_map_init();
> -    io_mem_init();
> -#endif
> -}
> -
>  #if !defined(CONFIG_USER_ONLY)
>  
>  static int cpu_common_post_load(void *opaque, int version_id)
> @@ -2494,6 +2485,7 @@ typedef struct MapClient {
>      QLIST_ENTRY(MapClient) link;
>  } MapClient;
>  
> +QemuMutex map_client_list_lock;
>  static QLIST_HEAD(map_client_list, MapClient) map_client_list
>      = QLIST_HEAD_INITIALIZER(map_client_list);
>  
> @@ -2501,12 +2493,24 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
>  {
>      MapClient *client = g_malloc(sizeof(*client));
>  
> +    qemu_mutex_lock(&map_client_list_lock);
>      client->opaque = opaque;
>      client->callback = callback;
>      QLIST_INSERT_HEAD(&map_client_list, client, link);
> +    qemu_mutex_unlock(&map_client_list_lock);
>      return client;
>  }
>  
> +void cpu_exec_init_all(void)
> +{
> +#if !defined(CONFIG_USER_ONLY)
> +    qemu_mutex_init(&ram_list.mutex);
> +    memory_map_init();
> +    io_mem_init();
> +#endif
> +    qemu_mutex_init(&map_client_list_lock);
> +}
> +

You are moving cpu_exec_init_all within an #ifndef CONFIG_USER_ONLY.
Does this patch compile for user-mode emulation?

The move itself is okay but only if you remove the two calls in
*-user/main.c (and possibly move the prototype to include/exec/exec-all.h).

>  static void cpu_unregister_map_client(void *_client)
>  {
>      MapClient *client = (MapClient *)_client;
> @@ -2519,11 +2523,13 @@ static void cpu_notify_map_clients(void)
>  {
>      MapClient *client;
>  
> +    qemu_mutex_lock(&map_client_list_lock);
>      while (!QLIST_EMPTY(&map_client_list)) {
>          client = QLIST_FIRST(&map_client_list);
>          client->callback(client->opaque);

A good rule of thumb is never hold a lock while calling "unknown" code.
 This will be fixed in patch 4, so it's okay.

Paolo

>          cpu_unregister_map_client(client);
>      }
> +    qemu_mutex_unlock(&map_client_list_lock);
>  }
>  
>  bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v3 3/4] exec: Notify cpu_register_map_client caller if the bounce buffer is available
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 3/4] exec: Notify cpu_register_map_client caller if the bounce buffer is available Fam Zheng
@ 2015-03-16  7:34   ` Paolo Bonzini
  2015-03-16  7:44     ` Fam Zheng
  0 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2015-03-16  7:34 UTC (permalink / raw)
  To: Fam Zheng, qemu-devel



On 16/03/2015 06:31, Fam Zheng wrote:
> The caller's workflow is like
> 
>     if (!address_space_map()) {
>         ...
>         cpu_register_map_client();
>     }
> 
> If bounce buffer became available after address_space_map() but before
> cpu_register_map_client(), the caller could miss it and has to wait for the
> next bounce buffer notify, which may never happen in the worse case.
> 
> Just notify the list in cpu_register_map_client().
> 
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  exec.c | 22 +++++++++++++++-------
>  1 file changed, 15 insertions(+), 7 deletions(-)
> 
> diff --git a/exec.c b/exec.c
> index 3e54580..20381a0 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2489,6 +2489,17 @@ QemuMutex map_client_list_lock;
>  static QLIST_HEAD(map_client_list, MapClient) map_client_list
>      = QLIST_HEAD_INITIALIZER(map_client_list);
>  
> +static void cpu_notify_map_clients_unlocked(void)
> +{
> +    MapClient *client;
> +
> +    while (!QLIST_EMPTY(&map_client_list)) {
> +        client = QLIST_FIRST(&map_client_list);
> +        client->callback(client->opaque);
> +        cpu_unregister_map_client(client);
> +    }
> +}

Isn't the convention to call these functions "*_locked" (e.g.
timer_mod_ns_locked, monitor_flush_locked, cpu_get_clock_locked)?

Otherwise okay.

Paolo

> +
>  void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
>  {
>      MapClient *client = g_malloc(sizeof(*client));
> @@ -2497,6 +2508,9 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
>      client->opaque = opaque;
>      client->callback = callback;
>      QLIST_INSERT_HEAD(&map_client_list, client, link);
> +    if (!atomic_read(&bounce.in_use)) {
> +        cpu_notify_map_clients_unlocked();
> +    }
>      qemu_mutex_unlock(&map_client_list_lock);
>      return client;
>  }
> @@ -2521,14 +2535,8 @@ static void cpu_unregister_map_client(void *_client)
>  
>  static void cpu_notify_map_clients(void)
>  {
> -    MapClient *client;
> -
>      qemu_mutex_lock(&map_client_list_lock);
> -    while (!QLIST_EMPTY(&map_client_list)) {
> -        client = QLIST_FIRST(&map_client_list);
> -        client->callback(client->opaque);
> -        cpu_unregister_map_client(client);
> -    }
> +    cpu_notify_map_clients_unlocked();
>      qemu_mutex_unlock(&map_client_list_lock);
>  }
>  
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel
  2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel Fam Zheng
@ 2015-03-16  7:36   ` Paolo Bonzini
  0 siblings, 0 replies; 12+ messages in thread
From: Paolo Bonzini @ 2015-03-16  7:36 UTC (permalink / raw)
  To: Fam Zheng, qemu-devel



On 16/03/2015 06:31, Fam Zheng wrote:
> If DMA's owning thread cancels the IO while the bounce buffer's owning thread
> is notifying the "cpu client list", a use-after-free happens:
> 
>      continue_after_map_failure               dma_aio_cancel
>      ------------------------------------------------------------------
>      aio_bh_new
>                                               qemu_bh_delete
>      qemu_bh_schedule (use after free)
> 
> Also, the old code doesn't run the bh in the right AioContext.
> 
> Fix both problems by passing a QEMUBH to cpu_register_map_client.
> 
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  dma-helpers.c             | 17 ++++++++---------
>  exec.c                    | 32 +++++++++++++++++++++-----------
>  include/exec/cpu-common.h |  3 ++-
>  3 files changed, 31 insertions(+), 21 deletions(-)
> 
> diff --git a/dma-helpers.c b/dma-helpers.c
> index 6918572..1fddf6a 100644
> --- a/dma-helpers.c
> +++ b/dma-helpers.c
> @@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque)
>      dma_blk_cb(dbs, 0);
>  }
>  
> -static void continue_after_map_failure(void *opaque)
> -{
> -    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> -
> -    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
> -    qemu_bh_schedule(dbs->bh);
> -}
> -
>  static void dma_blk_unmap(DMAAIOCB *dbs)
>  {
>      int i;
> @@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret)
>  
>      if (dbs->iov.size == 0) {
>          trace_dma_map_wait(dbs);
> -        cpu_register_map_client(dbs, continue_after_map_failure);
> +        dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
> +                             reschedule_dma, dbs);
> +        cpu_register_map_client(dbs->bh);
>          return;
>      }
>  
> @@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
>      if (dbs->acb) {
>          blk_aio_cancel_async(dbs->acb);
>      }
> +    if (dbs->bh) {
> +        cpu_unregister_map_client(dbs->bh);
> +        qemu_bh_delete(dbs->bh);
> +        dbs->bh = NULL;
> +    }
>  }
>  
>  
> diff --git a/exec.c b/exec.c
> index 20381a0..b15ca5e 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2480,8 +2480,7 @@ typedef struct {
>  static BounceBuffer bounce;
>  
>  typedef struct MapClient {
> -    void *opaque;
> -    void (*callback)(void *opaque);
> +    QEMUBH *bh;
>      QLIST_ENTRY(MapClient) link;
>  } MapClient;
>  
> @@ -2489,30 +2488,29 @@ QemuMutex map_client_list_lock;
>  static QLIST_HEAD(map_client_list, MapClient) map_client_list
>      = QLIST_HEAD_INITIALIZER(map_client_list);
>  
> +static void cpu_unregister_map_client_do(MapClient *client);
>  static void cpu_notify_map_clients_unlocked(void)
>  {
>      MapClient *client;
>  
>      while (!QLIST_EMPTY(&map_client_list)) {
>          client = QLIST_FIRST(&map_client_list);
> -        client->callback(client->opaque);
> -        cpu_unregister_map_client(client);
> +        qemu_bh_schedule(client->bh);
> +        cpu_unregister_map_client_do(client);
>      }
>  }
>  
> -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
> +void cpu_register_map_client(QEMUBH *bh)
>  {
>      MapClient *client = g_malloc(sizeof(*client));
>  
>      qemu_mutex_lock(&map_client_list_lock);
> -    client->opaque = opaque;
> -    client->callback = callback;
> +    client->bh = bh;
>      QLIST_INSERT_HEAD(&map_client_list, client, link);
>      if (!atomic_read(&bounce.in_use)) {
>          cpu_notify_map_clients_unlocked();
>      }
>      qemu_mutex_unlock(&map_client_list_lock);
> -    return client;
>  }
>  
>  void cpu_exec_init_all(void)
> @@ -2525,14 +2523,26 @@ void cpu_exec_init_all(void)
>      qemu_mutex_init(&map_client_list_lock);
>  }
>  
> -static void cpu_unregister_map_client(void *_client)
> +static void cpu_unregister_map_client_do(MapClient *client)
>  {
> -    MapClient *client = (MapClient *)_client;
> -
>      QLIST_REMOVE(client, link);
>      g_free(client);
>  }
>  
> +void cpu_unregister_map_client(QEMUBH *bh)
> +{
> +    MapClient *client;
> +
> +    qemu_mutex_lock(&map_client_list_lock);
> +    QLIST_FOREACH(client, &map_client_list, link) {
> +        if (client->bh == bh) {
> +            cpu_unregister_map_client_do(client);
> +            break;
> +        }
> +    }
> +    qemu_mutex_unlock(&map_client_list_lock);
> +}
> +
>  static void cpu_notify_map_clients(void)
>  {
>      qemu_mutex_lock(&map_client_list_lock);
> diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
> index fcc3162..43428bd 100644
> --- a/include/exec/cpu-common.h
> +++ b/include/exec/cpu-common.h
> @@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr,
>                                int is_write);
>  void cpu_physical_memory_unmap(void *buffer, hwaddr len,
>                                 int is_write, hwaddr access_len);
> -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
> +void cpu_register_map_client(QEMUBH *bh);
> +void cpu_unregister_map_client(QEMUBH *bh);
>  
>  bool cpu_physical_memory_is_io(hwaddr phys_addr);
>  
> 

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer
  2015-03-16  7:30   ` Paolo Bonzini
@ 2015-03-16  7:42     ` Fam Zheng
  0 siblings, 0 replies; 12+ messages in thread
From: Fam Zheng @ 2015-03-16  7:42 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel

On Mon, 03/16 08:30, Paolo Bonzini wrote:
> 
> 
> On 16/03/2015 06:31, Fam Zheng wrote:
> > There could be a race condition when two processes call
> > address_space_map concurrently and both want to use the bounce buffer.
> > 
> > Add an in_use flag in BounceBuffer to sync it.
> > 
> > Signed-off-by: Fam Zheng <famz@redhat.com>
> > ---
> >  exec.c | 5 ++++-
> >  1 file changed, 4 insertions(+), 1 deletion(-)
> > 
> > diff --git a/exec.c b/exec.c
> > index e97071a..4080044 100644
> > --- a/exec.c
> > +++ b/exec.c
> > @@ -2483,6 +2483,7 @@ typedef struct {
> >      void *buffer;
> >      hwaddr addr;
> >      hwaddr len;
> > +    bool in_use;
> >  } BounceBuffer;
> >  
> >  static BounceBuffer bounce;
> > @@ -2571,9 +2572,10 @@ void *address_space_map(AddressSpace *as,
> >      l = len;
> >      mr = address_space_translate(as, addr, &xlat, &l, is_write);
> >      if (!memory_access_is_direct(mr, is_write)) {
> > -        if (bounce.buffer) {
> > +        if (atomic_xchg(&bounce.in_use, true)) {
> >              return NULL;
> >          }
> > +        smp_mb();
> 
> smp_mb() not needed.

OK, I was confused by the Linux documentation on atomic_xchg. Now I've looked
at the right places, it is not needed. Thanks,

Fam

> 
> Ok with this change.
> 
> Paolo
> 
> >          /* Avoid unbounded allocations */
> >          l = MIN(l, TARGET_PAGE_SIZE);
> >          bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
> > @@ -2641,6 +2643,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
> >      qemu_vfree(bounce.buffer);
> >      bounce.buffer = NULL;
> >      memory_region_unref(bounce.mr);
> > +    atomic_mb_set(&bounce.in_use, false);
> >      cpu_notify_map_clients();
> >  }
> >  
> > 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v3 3/4] exec: Notify cpu_register_map_client caller if the bounce buffer is available
  2015-03-16  7:34   ` Paolo Bonzini
@ 2015-03-16  7:44     ` Fam Zheng
  0 siblings, 0 replies; 12+ messages in thread
From: Fam Zheng @ 2015-03-16  7:44 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel

On Mon, 03/16 08:34, Paolo Bonzini wrote:
> 
> 
> On 16/03/2015 06:31, Fam Zheng wrote:
> > The caller's workflow is like
> > 
> >     if (!address_space_map()) {
> >         ...
> >         cpu_register_map_client();
> >     }
> > 
> > If bounce buffer became available after address_space_map() but before
> > cpu_register_map_client(), the caller could miss it and has to wait for the
> > next bounce buffer notify, which may never happen in the worse case.
> > 
> > Just notify the list in cpu_register_map_client().
> > 
> > Signed-off-by: Fam Zheng <famz@redhat.com>
> > ---
> >  exec.c | 22 +++++++++++++++-------
> >  1 file changed, 15 insertions(+), 7 deletions(-)
> > 
> > diff --git a/exec.c b/exec.c
> > index 3e54580..20381a0 100644
> > --- a/exec.c
> > +++ b/exec.c
> > @@ -2489,6 +2489,17 @@ QemuMutex map_client_list_lock;
> >  static QLIST_HEAD(map_client_list, MapClient) map_client_list
> >      = QLIST_HEAD_INITIALIZER(map_client_list);
> >  
> > +static void cpu_notify_map_clients_unlocked(void)
> > +{
> > +    MapClient *client;
> > +
> > +    while (!QLIST_EMPTY(&map_client_list)) {
> > +        client = QLIST_FIRST(&map_client_list);
> > +        client->callback(client->opaque);
> > +        cpu_unregister_map_client(client);
> > +    }
> > +}
> 
> Isn't the convention to call these functions "*_locked" (e.g.
> timer_mod_ns_locked, monitor_flush_locked, cpu_get_clock_locked)?

Exactly, will rename. Thanks.

Fam

> 
> Otherwise okay.
> 
> Paolo
> 
> > +
> >  void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
> >  {
> >      MapClient *client = g_malloc(sizeof(*client));
> > @@ -2497,6 +2508,9 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
> >      client->opaque = opaque;
> >      client->callback = callback;
> >      QLIST_INSERT_HEAD(&map_client_list, client, link);
> > +    if (!atomic_read(&bounce.in_use)) {
> > +        cpu_notify_map_clients_unlocked();
> > +    }
> >      qemu_mutex_unlock(&map_client_list_lock);
> >      return client;
> >  }
> > @@ -2521,14 +2535,8 @@ static void cpu_unregister_map_client(void *_client)
> >  
> >  static void cpu_notify_map_clients(void)
> >  {
> > -    MapClient *client;
> > -
> >      qemu_mutex_lock(&map_client_list_lock);
> > -    while (!QLIST_EMPTY(&map_client_list)) {
> > -        client = QLIST_FIRST(&map_client_list);
> > -        client->callback(client->opaque);
> > -        cpu_unregister_map_client(client);
> > -    }
> > +    cpu_notify_map_clients_unlocked();
> >      qemu_mutex_unlock(&map_client_list_lock);
> >  }
> >  
> > 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/4] exec: Protect map_client_list with mutex
  2015-03-16  7:33   ` Paolo Bonzini
@ 2015-03-16  7:55     ` Fam Zheng
  0 siblings, 0 replies; 12+ messages in thread
From: Fam Zheng @ 2015-03-16  7:55 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel

On Mon, 03/16 08:33, Paolo Bonzini wrote:
> 
> 
> On 16/03/2015 06:31, Fam Zheng wrote:
> > So that accesses from multiple threads are safe.
> > 
> > Signed-off-by: Fam Zheng <famz@redhat.com>
> > ---
> >  exec.c | 24 +++++++++++++++---------
> >  1 file changed, 15 insertions(+), 9 deletions(-)
> > 
> > diff --git a/exec.c b/exec.c
> > index 4080044..3e54580 100644
> > --- a/exec.c
> > +++ b/exec.c
> > @@ -429,15 +429,6 @@ address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
> >  }
> >  #endif
> >  
> > -void cpu_exec_init_all(void)
> > -{
> > -#if !defined(CONFIG_USER_ONLY)
> > -    qemu_mutex_init(&ram_list.mutex);
> > -    memory_map_init();
> > -    io_mem_init();
> > -#endif
> > -}
> > -
> >  #if !defined(CONFIG_USER_ONLY)
> >  
> >  static int cpu_common_post_load(void *opaque, int version_id)
> > @@ -2494,6 +2485,7 @@ typedef struct MapClient {
> >      QLIST_ENTRY(MapClient) link;
> >  } MapClient;
> >  
> > +QemuMutex map_client_list_lock;
> >  static QLIST_HEAD(map_client_list, MapClient) map_client_list
> >      = QLIST_HEAD_INITIALIZER(map_client_list);
> >  
> > @@ -2501,12 +2493,24 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
> >  {
> >      MapClient *client = g_malloc(sizeof(*client));
> >  
> > +    qemu_mutex_lock(&map_client_list_lock);
> >      client->opaque = opaque;
> >      client->callback = callback;
> >      QLIST_INSERT_HEAD(&map_client_list, client, link);
> > +    qemu_mutex_unlock(&map_client_list_lock);
> >      return client;
> >  }
> >  
> > +void cpu_exec_init_all(void)
> > +{
> > +#if !defined(CONFIG_USER_ONLY)
> > +    qemu_mutex_init(&ram_list.mutex);
> > +    memory_map_init();
> > +    io_mem_init();
> > +#endif
> > +    qemu_mutex_init(&map_client_list_lock);
> > +}
> > +
> 
> You are moving cpu_exec_init_all within an #ifndef CONFIG_USER_ONLY.
> Does this patch compile for user-mode emulation?

No. Good catch!

> 
> The move itself is okay but only if you remove the two calls in
> *-user/main.c (and possibly move the prototype to include/exec/exec-all.h).

Sounds good, I'll split that patch in v4.

Fam

> 
> >  static void cpu_unregister_map_client(void *_client)
> >  {
> >      MapClient *client = (MapClient *)_client;
> > @@ -2519,11 +2523,13 @@ static void cpu_notify_map_clients(void)
> >  {
> >      MapClient *client;
> >  
> > +    qemu_mutex_lock(&map_client_list_lock);
> >      while (!QLIST_EMPTY(&map_client_list)) {
> >          client = QLIST_FIRST(&map_client_list);
> >          client->callback(client->opaque);
> 
> A good rule of thumb is never hold a lock while calling "unknown" code.
>  This will be fixed in patch 4, so it's okay.
> 
> Paolo
> 
> >          cpu_unregister_map_client(client);
> >      }
> > +    qemu_mutex_unlock(&map_client_list_lock);
> >  }
> >  
> >  bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
> > 

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2015-03-16  7:55 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-16  5:31 [Qemu-devel] [PATCH v3 0/4] exec: Make bounce buffer thread safe Fam Zheng
2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer Fam Zheng
2015-03-16  7:30   ` Paolo Bonzini
2015-03-16  7:42     ` Fam Zheng
2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 2/4] exec: Protect map_client_list with mutex Fam Zheng
2015-03-16  7:33   ` Paolo Bonzini
2015-03-16  7:55     ` Fam Zheng
2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 3/4] exec: Notify cpu_register_map_client caller if the bounce buffer is available Fam Zheng
2015-03-16  7:34   ` Paolo Bonzini
2015-03-16  7:44     ` Fam Zheng
2015-03-16  5:31 ` [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel Fam Zheng
2015-03-16  7:36   ` Paolo Bonzini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.