All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
@ 2015-02-17 21:33 Max Reitz
  2015-02-17 21:49 ` Paolo Bonzini
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Max Reitz @ 2015-02-17 21:33 UTC (permalink / raw)
  To: qemu-devel; +Cc: Kevin Wolf, Stefan Weil, Stefan Hajnoczi, Max Reitz

Concurrently modifying the bmap is not a good idea; this patch adds a
lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
can go wrong without.

Signed-off-by: Max Reitz <mreitz@redhat.com>
---
 block/vdi.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/block/vdi.c b/block/vdi.c
index 74030c6..c5ff428 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -51,6 +51,7 @@
 
 #include "qemu-common.h"
 #include "block/block_int.h"
+#include "block/coroutine.h"
 #include "qemu/module.h"
 #include "migration/migration.h"
 
@@ -196,6 +197,8 @@ typedef struct {
     /* VDI header (converted to host endianness). */
     VdiHeader header;
 
+    CoMutex bmap_lock;
+
     Error *migration_blocker;
 } BDRVVdiState;
 
@@ -498,6 +501,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
         goto fail_free_bmap;
     }
 
+    qemu_co_mutex_init(&s->bmap_lock);
+
     /* Disable migration when vdi images are used */
     error_set(&s->migration_blocker,
               QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
@@ -619,6 +624,9 @@ static int vdi_co_write(BlockDriverState *bs,
                n_sectors, sector_num);
 
         /* prepare next AIO request */
+        if (!block) {
+            qemu_co_mutex_lock(&s->bmap_lock);
+        }
         bmap_entry = le32_to_cpu(s->bmap[block_index]);
         if (!VDI_IS_ALLOCATED(bmap_entry)) {
             /* Allocate new block and write to it. */
@@ -641,9 +649,13 @@ static int vdi_co_write(BlockDriverState *bs,
                    (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
             ret = bdrv_write(bs->file, offset, block, s->block_sectors);
         } else {
-            uint64_t offset = s->header.offset_data / SECTOR_SIZE +
-                              (uint64_t)bmap_entry * s->block_sectors +
-                              sector_in_block;
+            uint64_t offset;
+
+            qemu_co_mutex_unlock(&s->bmap_lock);
+
+            offset = s->header.offset_data / SECTOR_SIZE +
+                     (uint64_t)bmap_entry * s->block_sectors +
+                     sector_in_block;
             ret = bdrv_write(bs->file, offset, buf, n_sectors);
         }
 
@@ -656,6 +668,9 @@ static int vdi_co_write(BlockDriverState *bs,
 
     logout("finished data write\n");
     if (ret < 0) {
+        if (block) {
+            qemu_co_mutex_unlock(&s->bmap_lock);
+        }
         return ret;
     }
 
@@ -688,6 +703,8 @@ static int vdi_co_write(BlockDriverState *bs,
         logout("will write %u block map sectors starting from entry %u\n",
                n_sectors, bmap_first);
         ret = bdrv_write(bs->file, offset, base, n_sectors);
+
+        qemu_co_mutex_unlock(&s->bmap_lock);
     }
 
     return ret;
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
  2015-02-17 21:33 [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests Max Reitz
@ 2015-02-17 21:49 ` Paolo Bonzini
  2015-02-17 21:49   ` Max Reitz
  2015-02-18  7:52 ` Paolo Bonzini
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 9+ messages in thread
From: Paolo Bonzini @ 2015-02-17 21:49 UTC (permalink / raw)
  To: Max Reitz, qemu-devel
  Cc: Kevin Wolf, Stefan Weil, qemu-stable, Stefan Hajnoczi

Cc: qemu-stable <qemu-stable@nongnu.org>

On 17/02/2015 22:33, Max Reitz wrote:
> Concurrently modifying the bmap is not a good idea; this patch adds a
> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
> can go wrong without.
> 
> Signed-off-by: Max Reitz <mreitz@redhat.com>
> ---
>  block/vdi.c | 23 ++++++++++++++++++++---
>  1 file changed, 20 insertions(+), 3 deletions(-)
> 
> diff --git a/block/vdi.c b/block/vdi.c
> index 74030c6..c5ff428 100644
> --- a/block/vdi.c
> +++ b/block/vdi.c
> @@ -51,6 +51,7 @@
>  
>  #include "qemu-common.h"
>  #include "block/block_int.h"
> +#include "block/coroutine.h"
>  #include "qemu/module.h"
>  #include "migration/migration.h"
>  
> @@ -196,6 +197,8 @@ typedef struct {
>      /* VDI header (converted to host endianness). */
>      VdiHeader header;
>  
> +    CoMutex bmap_lock;
> +
>      Error *migration_blocker;
>  } BDRVVdiState;
>  
> @@ -498,6 +501,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
>          goto fail_free_bmap;
>      }
>  
> +    qemu_co_mutex_init(&s->bmap_lock);
> +
>      /* Disable migration when vdi images are used */
>      error_set(&s->migration_blocker,
>                QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
> @@ -619,6 +624,9 @@ static int vdi_co_write(BlockDriverState *bs,
>                 n_sectors, sector_num);
>  
>          /* prepare next AIO request */
> +        if (!block) {
> +            qemu_co_mutex_lock(&s->bmap_lock);
> +        }
>          bmap_entry = le32_to_cpu(s->bmap[block_index]);
>          if (!VDI_IS_ALLOCATED(bmap_entry)) {
>              /* Allocate new block and write to it. */
> @@ -641,9 +649,13 @@ static int vdi_co_write(BlockDriverState *bs,
>                     (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
>              ret = bdrv_write(bs->file, offset, block, s->block_sectors);
>          } else {
> -            uint64_t offset = s->header.offset_data / SECTOR_SIZE +
> -                              (uint64_t)bmap_entry * s->block_sectors +
> -                              sector_in_block;
> +            uint64_t offset;
> +
> +            qemu_co_mutex_unlock(&s->bmap_lock);
> +
> +            offset = s->header.offset_data / SECTOR_SIZE +
> +                     (uint64_t)bmap_entry * s->block_sectors +
> +                     sector_in_block;
>              ret = bdrv_write(bs->file, offset, buf, n_sectors);
>          }
>  
> @@ -656,6 +668,9 @@ static int vdi_co_write(BlockDriverState *bs,
>  
>      logout("finished data write\n");
>      if (ret < 0) {
> +        if (block) {
> +            qemu_co_mutex_unlock(&s->bmap_lock);
> +        }
>          return ret;
>      }
>  
> @@ -688,6 +703,8 @@ static int vdi_co_write(BlockDriverState *bs,
>          logout("will write %u block map sectors starting from entry %u\n",
>                 n_sectors, bmap_first);
>          ret = bdrv_write(bs->file, offset, base, n_sectors);
> +
> +        qemu_co_mutex_unlock(&s->bmap_lock);
>      }
>  
>      return ret;
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
  2015-02-17 21:49 ` Paolo Bonzini
@ 2015-02-17 21:49   ` Max Reitz
  0 siblings, 0 replies; 9+ messages in thread
From: Max Reitz @ 2015-02-17 21:49 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel
  Cc: Kevin Wolf, Stefan Weil, qemu-stable, Stefan Hajnoczi

On 2015-02-17 at 16:49, Paolo Bonzini wrote:
> Cc: qemu-stable <qemu-stable@nongnu.org>

Right, I forgot that. Thanks!

Max

> On 17/02/2015 22:33, Max Reitz wrote:
>> Concurrently modifying the bmap is not a good idea; this patch adds a
>> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
>> can go wrong without.
>>
>> Signed-off-by: Max Reitz <mreitz@redhat.com>
>> ---
>>   block/vdi.c | 23 ++++++++++++++++++++---
>>   1 file changed, 20 insertions(+), 3 deletions(-)
>>
>> diff --git a/block/vdi.c b/block/vdi.c
>> index 74030c6..c5ff428 100644
>> --- a/block/vdi.c
>> +++ b/block/vdi.c
>> @@ -51,6 +51,7 @@
>>   
>>   #include "qemu-common.h"
>>   #include "block/block_int.h"
>> +#include "block/coroutine.h"
>>   #include "qemu/module.h"
>>   #include "migration/migration.h"
>>   
>> @@ -196,6 +197,8 @@ typedef struct {
>>       /* VDI header (converted to host endianness). */
>>       VdiHeader header;
>>   
>> +    CoMutex bmap_lock;
>> +
>>       Error *migration_blocker;
>>   } BDRVVdiState;
>>   
>> @@ -498,6 +501,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
>>           goto fail_free_bmap;
>>       }
>>   
>> +    qemu_co_mutex_init(&s->bmap_lock);
>> +
>>       /* Disable migration when vdi images are used */
>>       error_set(&s->migration_blocker,
>>                 QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
>> @@ -619,6 +624,9 @@ static int vdi_co_write(BlockDriverState *bs,
>>                  n_sectors, sector_num);
>>   
>>           /* prepare next AIO request */
>> +        if (!block) {
>> +            qemu_co_mutex_lock(&s->bmap_lock);
>> +        }
>>           bmap_entry = le32_to_cpu(s->bmap[block_index]);
>>           if (!VDI_IS_ALLOCATED(bmap_entry)) {
>>               /* Allocate new block and write to it. */
>> @@ -641,9 +649,13 @@ static int vdi_co_write(BlockDriverState *bs,
>>                      (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
>>               ret = bdrv_write(bs->file, offset, block, s->block_sectors);
>>           } else {
>> -            uint64_t offset = s->header.offset_data / SECTOR_SIZE +
>> -                              (uint64_t)bmap_entry * s->block_sectors +
>> -                              sector_in_block;
>> +            uint64_t offset;
>> +
>> +            qemu_co_mutex_unlock(&s->bmap_lock);
>> +
>> +            offset = s->header.offset_data / SECTOR_SIZE +
>> +                     (uint64_t)bmap_entry * s->block_sectors +
>> +                     sector_in_block;
>>               ret = bdrv_write(bs->file, offset, buf, n_sectors);
>>           }
>>   
>> @@ -656,6 +668,9 @@ static int vdi_co_write(BlockDriverState *bs,
>>   
>>       logout("finished data write\n");
>>       if (ret < 0) {
>> +        if (block) {
>> +            qemu_co_mutex_unlock(&s->bmap_lock);
>> +        }
>>           return ret;
>>       }
>>   
>> @@ -688,6 +703,8 @@ static int vdi_co_write(BlockDriverState *bs,
>>           logout("will write %u block map sectors starting from entry %u\n",
>>                  n_sectors, bmap_first);
>>           ret = bdrv_write(bs->file, offset, base, n_sectors);
>> +
>> +        qemu_co_mutex_unlock(&s->bmap_lock);
>>       }
>>   
>>       return ret;
>>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
  2015-02-17 21:33 [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests Max Reitz
  2015-02-17 21:49 ` Paolo Bonzini
@ 2015-02-18  7:52 ` Paolo Bonzini
  2015-02-18 15:10   ` Max Reitz
  2015-02-18 12:39 ` Kevin Wolf
  2015-02-27 11:25 ` Stefan Hajnoczi
  3 siblings, 1 reply; 9+ messages in thread
From: Paolo Bonzini @ 2015-02-18  7:52 UTC (permalink / raw)
  To: Max Reitz, qemu-devel; +Cc: Kevin Wolf, Stefan Weil, Stefan Hajnoczi



On 17/02/2015 22:33, Max Reitz wrote:
> Concurrently modifying the bmap is not a good idea; this patch adds a
> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
> can go wrong without.
> 
> Signed-off-by: Max Reitz <mreitz@redhat.com>
> ---
>  block/vdi.c | 23 ++++++++++++++++++++---
>  1 file changed, 20 insertions(+), 3 deletions(-)
> 
> diff --git a/block/vdi.c b/block/vdi.c
> index 74030c6..c5ff428 100644
> --- a/block/vdi.c
> +++ b/block/vdi.c
> @@ -51,6 +51,7 @@
>  
>  #include "qemu-common.h"
>  #include "block/block_int.h"
> +#include "block/coroutine.h"
>  #include "qemu/module.h"
>  #include "migration/migration.h"
>  
> @@ -196,6 +197,8 @@ typedef struct {
>      /* VDI header (converted to host endianness). */
>      VdiHeader header;
>  
> +    CoMutex bmap_lock;
> +
>      Error *migration_blocker;
>  } BDRVVdiState;
>  
> @@ -498,6 +501,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
>          goto fail_free_bmap;
>      }
>  
> +    qemu_co_mutex_init(&s->bmap_lock);
> +
>      /* Disable migration when vdi images are used */
>      error_set(&s->migration_blocker,
>                QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
> @@ -619,6 +624,9 @@ static int vdi_co_write(BlockDriverState *bs,
>                 n_sectors, sector_num);
>  
>          /* prepare next AIO request */
> +        if (!block) {
> +            qemu_co_mutex_lock(&s->bmap_lock);
> +        }
>          bmap_entry = le32_to_cpu(s->bmap[block_index]);
>          if (!VDI_IS_ALLOCATED(bmap_entry)) {
>              /* Allocate new block and write to it. */
> @@ -641,9 +649,13 @@ static int vdi_co_write(BlockDriverState *bs,
>                     (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
>              ret = bdrv_write(bs->file, offset, block, s->block_sectors);
>          } else {
> -            uint64_t offset = s->header.offset_data / SECTOR_SIZE +
> -                              (uint64_t)bmap_entry * s->block_sectors +
> -                              sector_in_block;
> +            uint64_t offset;
> +
> +            qemu_co_mutex_unlock(&s->bmap_lock);

This qemu_co_mutex_unlock should only be done "if (!block)", because you
can have two iterations of the loop, the first going down the "then" and
the second going down the "else".  In that case you must not give away
the lock, because the lock will be needed when you write the bitmap.

> +
> +            offset = s->header.offset_data / SECTOR_SIZE +
> +                     (uint64_t)bmap_entry * s->block_sectors +
> +                     sector_in_block;
>              ret = bdrv_write(bs->file, offset, buf, n_sectors);
>          }
>  
> @@ -656,6 +668,9 @@ static int vdi_co_write(BlockDriverState *bs,
>  
>      logout("finished data write\n");
>      if (ret < 0) {
> +        if (block) {
> +            qemu_co_mutex_unlock(&s->bmap_lock);
> +        }
>          return ret;
>      }
>  
> @@ -688,6 +703,8 @@ static int vdi_co_write(BlockDriverState *bs,
>          logout("will write %u block map sectors starting from entry %u\n",
>                 n_sectors, bmap_first);
>          ret = bdrv_write(bs->file, offset, base, n_sectors);
> +
> +        qemu_co_mutex_unlock(&s->bmap_lock);
>      }
>  
>      return ret;
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
  2015-02-17 21:33 [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests Max Reitz
  2015-02-17 21:49 ` Paolo Bonzini
  2015-02-18  7:52 ` Paolo Bonzini
@ 2015-02-18 12:39 ` Kevin Wolf
  2015-02-18 15:09   ` Max Reitz
  2015-02-27 11:25 ` Stefan Hajnoczi
  3 siblings, 1 reply; 9+ messages in thread
From: Kevin Wolf @ 2015-02-18 12:39 UTC (permalink / raw)
  To: Max Reitz; +Cc: Stefan Weil, qemu-devel, Stefan Hajnoczi

Am 17.02.2015 um 22:33 hat Max Reitz geschrieben:
> Concurrently modifying the bmap is not a good idea;

Why? I mean, the fact that this fixes something for you probably means
that there really is some piece of local state that is invalidated by
concurrent writes, but it's not obvious to me what it is.

What could obviously happen is that metadata is written before the data
is on the disk, but as we don't support backing files for VDI, this is
irrelvant. (And if it were relevant, it stil wouldn't be fixed by your
patch because the driver never flushes.)

> this patch adds a
> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
> can go wrong without.
> 
> Signed-off-by: Max Reitz <mreitz@redhat.com>

Kevin

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
  2015-02-18 12:39 ` Kevin Wolf
@ 2015-02-18 15:09   ` Max Reitz
  0 siblings, 0 replies; 9+ messages in thread
From: Max Reitz @ 2015-02-18 15:09 UTC (permalink / raw)
  To: Kevin Wolf; +Cc: Stefan Weil, qemu-devel, Stefan Hajnoczi

On 2015-02-18 at 07:39, Kevin Wolf wrote:
> Am 17.02.2015 um 22:33 hat Max Reitz geschrieben:
>> Concurrently modifying the bmap is not a good idea;
> Why? I mean, the fact that this fixes something for you probably means
> that there really is some piece of local state that is invalidated by
> concurrent writes, but it's not obvious to me what it is.

One thing I can see is the following: The write operations for header 
and bmap are separate. Therefore, writing the header may succeed; then, 
when writing the bmap, something may yield and another instance of 
vdi_co_write() gets to run. That instance is still in the while loop, 
allocating a new block.

It modifies s->bmap[] and increases s->header.blocks_allocated; then 
tries to write. That yields and the first coroutine wakes up. It writes 
the bmap to the disk, which is now inconsistent with 
s->header.blocks_allocated, though. So that looks to me like it may 
break, although it in fact doesn't seem to be responsible for the 
problem at hand.

For that problem, always doing the unlock() after the bdrv_write() in 
the "if (!VDI_IS_ALLOCATED(bmap_entry))" path seems enough; although I 
cannot really explain how coroutines yielding from writing the data 
block interferes with other coroutines allocating new blocks.

However, in case you agree with my reasoning for the possible corruption 
(which I did not try to produce), this patch (which would fix that) 
would also happen to prevent the really appearing VDI problems, so...

Max

> What could obviously happen is that metadata is written before the data
> is on the disk, but as we don't support backing files for VDI, this is
> irrelvant. (And if it were relevant, it stil wouldn't be fixed by your
> patch because the driver never flushes.)
>
>> this patch adds a
>> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
>> can go wrong without.
>>
>> Signed-off-by: Max Reitz <mreitz@redhat.com>
> Kevin

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
  2015-02-18  7:52 ` Paolo Bonzini
@ 2015-02-18 15:10   ` Max Reitz
  0 siblings, 0 replies; 9+ messages in thread
From: Max Reitz @ 2015-02-18 15:10 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel; +Cc: Kevin Wolf, Stefan Weil, Stefan Hajnoczi

On 2015-02-18 at 02:52, Paolo Bonzini wrote:
>
> On 17/02/2015 22:33, Max Reitz wrote:
>> Concurrently modifying the bmap is not a good idea; this patch adds a
>> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
>> can go wrong without.
>>
>> Signed-off-by: Max Reitz <mreitz@redhat.com>
>> ---
>>   block/vdi.c | 23 ++++++++++++++++++++---
>>   1 file changed, 20 insertions(+), 3 deletions(-)
>>
>> diff --git a/block/vdi.c b/block/vdi.c
>> index 74030c6..c5ff428 100644
>> --- a/block/vdi.c
>> +++ b/block/vdi.c
>> @@ -51,6 +51,7 @@
>>   
>>   #include "qemu-common.h"
>>   #include "block/block_int.h"
>> +#include "block/coroutine.h"
>>   #include "qemu/module.h"
>>   #include "migration/migration.h"
>>   
>> @@ -196,6 +197,8 @@ typedef struct {
>>       /* VDI header (converted to host endianness). */
>>       VdiHeader header;
>>   
>> +    CoMutex bmap_lock;
>> +
>>       Error *migration_blocker;
>>   } BDRVVdiState;
>>   
>> @@ -498,6 +501,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
>>           goto fail_free_bmap;
>>       }
>>   
>> +    qemu_co_mutex_init(&s->bmap_lock);
>> +
>>       /* Disable migration when vdi images are used */
>>       error_set(&s->migration_blocker,
>>                 QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
>> @@ -619,6 +624,9 @@ static int vdi_co_write(BlockDriverState *bs,
>>                  n_sectors, sector_num);
>>   
>>           /* prepare next AIO request */
>> +        if (!block) {
>> +            qemu_co_mutex_lock(&s->bmap_lock);
>> +        }
>>           bmap_entry = le32_to_cpu(s->bmap[block_index]);
>>           if (!VDI_IS_ALLOCATED(bmap_entry)) {
>>               /* Allocate new block and write to it. */
>> @@ -641,9 +649,13 @@ static int vdi_co_write(BlockDriverState *bs,
>>                      (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
>>               ret = bdrv_write(bs->file, offset, block, s->block_sectors);
>>           } else {
>> -            uint64_t offset = s->header.offset_data / SECTOR_SIZE +
>> -                              (uint64_t)bmap_entry * s->block_sectors +
>> -                              sector_in_block;
>> +            uint64_t offset;
>> +
>> +            qemu_co_mutex_unlock(&s->bmap_lock);
> This qemu_co_mutex_unlock should only be done "if (!block)", because you
> can have two iterations of the loop, the first going down the "then" and
> the second going down the "else".  In that case you must not give away
> the lock, because the lock will be needed when you write the bitmap.

Oh, right, and also there's an if (ret < 0) { return ret; } between the 
last two hunks of this patch which needs an additional unlock().

Max

>
>> +
>> +            offset = s->header.offset_data / SECTOR_SIZE +
>> +                     (uint64_t)bmap_entry * s->block_sectors +
>> +                     sector_in_block;
>>               ret = bdrv_write(bs->file, offset, buf, n_sectors);
>>           }
>>   
>> @@ -656,6 +668,9 @@ static int vdi_co_write(BlockDriverState *bs,
>>   
>>       logout("finished data write\n");
>>       if (ret < 0) {
>> +        if (block) {
>> +            qemu_co_mutex_unlock(&s->bmap_lock);
>> +        }
>>           return ret;
>>       }
>>   
>> @@ -688,6 +703,8 @@ static int vdi_co_write(BlockDriverState *bs,
>>           logout("will write %u block map sectors starting from entry %u\n",
>>                  n_sectors, bmap_first);
>>           ret = bdrv_write(bs->file, offset, base, n_sectors);
>> +
>> +        qemu_co_mutex_unlock(&s->bmap_lock);
>>       }
>>   
>>       return ret;
>>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
  2015-02-17 21:33 [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests Max Reitz
                   ` (2 preceding siblings ...)
  2015-02-18 12:39 ` Kevin Wolf
@ 2015-02-27 11:25 ` Stefan Hajnoczi
  2015-02-27 11:43   ` Kevin Wolf
  3 siblings, 1 reply; 9+ messages in thread
From: Stefan Hajnoczi @ 2015-02-27 11:25 UTC (permalink / raw)
  To: Max Reitz; +Cc: Kevin Wolf, Stefan Weil, qemu-devel, Stefan Hajnoczi

[-- Attachment #1: Type: text/plain, Size: 592 bytes --]

On Tue, Feb 17, 2015 at 04:33:39PM -0500, Max Reitz wrote:
> Concurrently modifying the bmap is not a good idea; this patch adds a
> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
> can go wrong without.
> 
> Signed-off-by: Max Reitz <mreitz@redhat.com>
> ---
>  block/vdi.c | 23 ++++++++++++++++++++---
>  1 file changed, 20 insertions(+), 3 deletions(-)

Max: Ping?

Would be nice to merge a new revision with Paolo's comment addressed.
Let's get it in for hard freeze (March 3rd) so that it gets testing
during the release candidate phase.

Stefan

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests
  2015-02-27 11:25 ` Stefan Hajnoczi
@ 2015-02-27 11:43   ` Kevin Wolf
  0 siblings, 0 replies; 9+ messages in thread
From: Kevin Wolf @ 2015-02-27 11:43 UTC (permalink / raw)
  To: Stefan Hajnoczi; +Cc: Stefan Weil, qemu-devel, Stefan Hajnoczi, Max Reitz

[-- Attachment #1: Type: text/plain, Size: 1015 bytes --]

Am 27.02.2015 um 12:25 hat Stefan Hajnoczi geschrieben:
> On Tue, Feb 17, 2015 at 04:33:39PM -0500, Max Reitz wrote:
> > Concurrently modifying the bmap is not a good idea; this patch adds a
> > lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what
> > can go wrong without.
> > 
> > Signed-off-by: Max Reitz <mreitz@redhat.com>
> > ---
> >  block/vdi.c | 23 ++++++++++++++++++++---
> >  1 file changed, 20 insertions(+), 3 deletions(-)
> 
> Max: Ping?
> 
> Would be nice to merge a new revision with Paolo's comment addressed.
> Let's get it in for hard freeze (March 3rd) so that it gets testing
> during the release candidate phase.

And we still don't know _why_ this fixes anything.

If we're locking just because that's the obvious thing to try and it
appears to fix something, I would be much more comfortable with one
lock at the very start of the function, and one unlock at its very end,
plus a TODO comment that someone should figure out why it's needed.

Kevin

[-- Attachment #2: Type: application/pgp-signature, Size: 836 bytes --]

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2015-02-27 11:43 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-02-17 21:33 [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests Max Reitz
2015-02-17 21:49 ` Paolo Bonzini
2015-02-17 21:49   ` Max Reitz
2015-02-18  7:52 ` Paolo Bonzini
2015-02-18 15:10   ` Max Reitz
2015-02-18 12:39 ` Kevin Wolf
2015-02-18 15:09   ` Max Reitz
2015-02-27 11:25 ` Stefan Hajnoczi
2015-02-27 11:43   ` Kevin Wolf

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.