* [Qemu-devel] [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache
@ 2014-02-21 2:14 Gonglei (Arei)
2014-02-21 11:03 ` Dr. David Alan Gilbert
0 siblings, 1 reply; 5+ messages in thread
From: Gonglei (Arei) @ 2014-02-21 2:14 UTC (permalink / raw)
To: qemu-devel; +Cc: chenliang (T), owasserm, Dr. David Alan Gilbert, Juan Quintela
Resizing the xbzrle cache during migration causes qemu-crash,
because the main-thread and migration-thread modify the xbzrle
cache size concurrently without lock-protection.
Signed-off-by: ChenLiang <chenliang88@huawei.com>
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
---
Changes against the previous version:
*Remove function cache_max_num_items and cache_page_size
*Protect migration_end and ram_save_setup by XBZRLE.lock
arch_init.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++++----
1 files changed, 56 insertions(+), 5 deletions(-)
diff --git a/arch_init.c b/arch_init.c
index 80574a0..d295237 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -164,26 +164,69 @@ static struct {
uint8_t *encoded_buf;
/* buffer for storing page content */
uint8_t *current_buf;
- /* Cache for XBZRLE */
+ /* Cache for XBZRLE, Protected by lock. */
PageCache *cache;
+ QemuMutex lock;
} XBZRLE = {
.encoded_buf = NULL,
.current_buf = NULL,
.cache = NULL,
+ /* use the lock carefully */
+ .lock = {PTHREAD_MUTEX_INITIALIZER},
};
/* buffer used for XBZRLE decoding */
static uint8_t *xbzrle_decoded_buf;
+static void XBZRLE_cache_lock(void)
+{
+ qemu_mutex_lock(&XBZRLE.lock);
+}
+
+static void XBZRLE_cache_unlock(void)
+{
+ qemu_mutex_unlock(&XBZRLE.lock);
+}
+
int64_t xbzrle_cache_resize(int64_t new_size)
{
+ PageCache *new_cache, *old_cache, *cache;
+
if (new_size < TARGET_PAGE_SIZE) {
return -1;
}
- if (XBZRLE.cache != NULL) {
- return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
- TARGET_PAGE_SIZE;
+ XBZRLE_cache_lock();
+ /* check XBZRLE.cache again later */
+ cache = XBZRLE.cache;
+ XBZRLE_cache_unlock();
+
+ if (cache != NULL) {
+ if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
+ goto ret;
+ }
+ new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
+ TARGET_PAGE_SIZE);
+ if (!new_cache) {
+ DPRINTF("Error creating cache\n");
+ return -1;
+ }
+
+ XBZRLE_cache_lock();
+ /* the XBZRLE.cache may have be destroyed, check it again */
+ if (XBZRLE.cache != NULL) {
+ old_cache = XBZRLE.cache;
+ XBZRLE.cache = new_cache;
+ new_cache = NULL;
+ }
+ XBZRLE_cache_unlock();
+
+ if (NULL == new_cache) {
+ cache_fini(old_cache);
+ } else {
+ cache_fini(new_cache);
+ }
}
+ret:
return pow2floor(new_size);
}
@@ -522,6 +565,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
ret = ram_control_save_page(f, block->offset,
offset, TARGET_PAGE_SIZE, &bytes_sent);
+ XBZRLE_cache_lock();
+
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
if (ret != RAM_SAVE_CONTROL_DELAYED) {
if (bytes_sent > 0) {
@@ -553,6 +598,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
acct_info.norm_pages++;
}
+ XBZRLE_cache_unlock();
+
/* if page is unmodified, continue to the next */
if (bytes_sent > 0) {
last_sent_block = block;
@@ -620,6 +667,7 @@ static void migration_end(void)
migration_bitmap = NULL;
}
+ XBZRLE_cache_lock();
if (XBZRLE.cache) {
cache_fini(XBZRLE.cache);
g_free(XBZRLE.cache);
@@ -629,6 +677,7 @@ static void migration_end(void)
XBZRLE.encoded_buf = NULL;
XBZRLE.current_buf = NULL;
}
+ XBZRLE_cache_unlock();
}
static void ram_migration_cancel(void *opaque)
@@ -659,13 +708,16 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
dirty_rate_high_cnt = 0;
if (migrate_use_xbzrle()) {
+ XBZRLE_cache_lock();
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE);
if (!XBZRLE.cache) {
+ XBZRLE_cache_unlock();
DPRINTF("Error creating cache\n");
return -1;
}
+ XBZRLE_cache_unlock();
/* We prefer not to abort if there is no memory */
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
@@ -681,7 +733,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
XBZRLE.encoded_buf = NULL;
return -1;
}
-
acct_clear();
}
--
1.6.0.2
Best regards,
-Gonglei
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [Qemu-devel] [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache
2014-02-21 2:14 [Qemu-devel] [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache Gonglei (Arei)
@ 2014-02-21 11:03 ` Dr. David Alan Gilbert
2014-02-21 11:59 ` Gonglei (Arei)
0 siblings, 1 reply; 5+ messages in thread
From: Dr. David Alan Gilbert @ 2014-02-21 11:03 UTC (permalink / raw)
To: Gonglei (Arei); +Cc: chenliang (T), owasserm, qemu-devel, Juan Quintela
* Gonglei (Arei) (arei.gonglei@huawei.com) wrote:
> Resizing the xbzrle cache during migration causes qemu-crash,
> because the main-thread and migration-thread modify the xbzrle
> cache size concurrently without lock-protection.
>
> Signed-off-by: ChenLiang <chenliang88@huawei.com>
> Signed-off-by: Gonglei <arei.gonglei@huawei.com>
> ---
> Changes against the previous version:
> *Remove function cache_max_num_items and cache_page_size
> *Protect migration_end and ram_save_setup by XBZRLE.lock
>
> arch_init.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++++----
> 1 files changed, 56 insertions(+), 5 deletions(-)
>
> diff --git a/arch_init.c b/arch_init.c
> index 80574a0..d295237 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -164,26 +164,69 @@ static struct {
> uint8_t *encoded_buf;
> /* buffer for storing page content */
> uint8_t *current_buf;
> - /* Cache for XBZRLE */
> + /* Cache for XBZRLE, Protected by lock. */
> PageCache *cache;
> + QemuMutex lock;
> } XBZRLE = {
> .encoded_buf = NULL,
> .current_buf = NULL,
> .cache = NULL,
> + /* use the lock carefully */
> + .lock = {PTHREAD_MUTEX_INITIALIZER},
> };
> /* buffer used for XBZRLE decoding */
> static uint8_t *xbzrle_decoded_buf;
>
> +static void XBZRLE_cache_lock(void)
> +{
> + qemu_mutex_lock(&XBZRLE.lock);
> +}
> +
> +static void XBZRLE_cache_unlock(void)
> +{
> + qemu_mutex_unlock(&XBZRLE.lock);
> +}
> +
You might want to make these only bother with the lock if xbzrle is enabled
- however actually, I think it's probably just best to keep them as is,
and simple.
> int64_t xbzrle_cache_resize(int64_t new_size)
> {
> + PageCache *new_cache, *old_cache, *cache;
> +
> if (new_size < TARGET_PAGE_SIZE) {
> return -1;
> }
>
> - if (XBZRLE.cache != NULL) {
> - return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
> - TARGET_PAGE_SIZE;
> + XBZRLE_cache_lock();
> + /* check XBZRLE.cache again later */
> + cache = XBZRLE.cache;
> + XBZRLE_cache_unlock();
That's a bit of an odd way of atomically reading a pointer;
I think an
smp_rmb();
cache = atomic_read(&XBZRLE.cache);
would have done the trick.
Personally I would have just kept the lock at the top and not released
it until the end of the following section.
> + if (cache != NULL) {
> + if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
> + goto ret;
Hmm goto not really needed there.
> + }
> + new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
> + TARGET_PAGE_SIZE);
> + if (!new_cache) {
> + DPRINTF("Error creating cache\n");
> + return -1;
> + }
> +
> + XBZRLE_cache_lock();
> + /* the XBZRLE.cache may have be destroyed, check it again */
> + if (XBZRLE.cache != NULL) {
> + old_cache = XBZRLE.cache;
> + XBZRLE.cache = new_cache;
> + new_cache = NULL;
> + }
> + XBZRLE_cache_unlock();
> +
> + if (NULL == new_cache) {
> + cache_fini(old_cache);
> + } else {
> + cache_fini(new_cache);
> + }
Yes, it took me a few readings to understand that; i.e. if new_cache=NULL then
it's actually now in use in XBZRLE.cache so only the old_cache needs
freeing.
> }
> +ret:
> return pow2floor(new_size);
> }
>
> @@ -522,6 +565,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
> ret = ram_control_save_page(f, block->offset,
> offset, TARGET_PAGE_SIZE, &bytes_sent);
>
> + XBZRLE_cache_lock();
> +
> if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
> if (ret != RAM_SAVE_CONTROL_DELAYED) {
> if (bytes_sent > 0) {
> @@ -553,6 +598,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
> acct_info.norm_pages++;
> }
>
> + XBZRLE_cache_unlock();
> +
> /* if page is unmodified, continue to the next */
> if (bytes_sent > 0) {
> last_sent_block = block;
Note that's not actually safe until my other xbzrle patch lands, because
the cache data is currently passed into put_buffer_async that will read it
later (possibly after you've reallocated) - but that's ok, that should
get fixed in the mean time.
> @@ -620,6 +667,7 @@ static void migration_end(void)
> migration_bitmap = NULL;
> }
>
> + XBZRLE_cache_lock();
> if (XBZRLE.cache) {
> cache_fini(XBZRLE.cache);
> g_free(XBZRLE.cache);
> @@ -629,6 +677,7 @@ static void migration_end(void)
> XBZRLE.encoded_buf = NULL;
> XBZRLE.current_buf = NULL;
> }
> + XBZRLE_cache_unlock();
> }
>
> static void ram_migration_cancel(void *opaque)
> @@ -659,13 +708,16 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
> dirty_rate_high_cnt = 0;
>
> if (migrate_use_xbzrle()) {
> + XBZRLE_cache_lock();
> XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
> TARGET_PAGE_SIZE,
> TARGET_PAGE_SIZE);
> if (!XBZRLE.cache) {
> + XBZRLE_cache_unlock();
> DPRINTF("Error creating cache\n");
> return -1;
> }
> + XBZRLE_cache_unlock();
>
> /* We prefer not to abort if there is no memory */
> XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
> @@ -681,7 +733,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
> XBZRLE.encoded_buf = NULL;
> return -1;
> }
> -
> acct_clear();
> }
>
> --
> 1.6.0.2
>
> Best regards,
> -Gonglei
Thanks,
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Qemu-devel] [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache
2014-02-21 11:03 ` Dr. David Alan Gilbert
@ 2014-02-21 11:59 ` Gonglei (Arei)
2014-02-21 12:10 ` Dr. David Alan Gilbert
0 siblings, 1 reply; 5+ messages in thread
From: Gonglei (Arei) @ 2014-02-21 11:59 UTC (permalink / raw)
To: Dr. David Alan Gilbert; +Cc: chenliang (T), owasserm, qemu-devel, Juan Quintela
Hi,
> -----Original Message-----
> From: Dr. David Alan Gilbert [mailto:dgilbert@redhat.com]
> Sent: Friday, February 21, 2014 7:04 PM
> To: Gonglei (Arei)
> Cc: qemu-devel@nongnu.org; Juan Quintela; owasserm@redhat.com;
> chenliang (T)
> Subject: Re: [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache
>
> * Gonglei (Arei) (arei.gonglei@huawei.com) wrote:
> > Resizing the xbzrle cache during migration causes qemu-crash,
> > because the main-thread and migration-thread modify the xbzrle
> > cache size concurrently without lock-protection.
> >
> > Signed-off-by: ChenLiang <chenliang88@huawei.com>
> > Signed-off-by: Gonglei <arei.gonglei@huawei.com>
> > ---
> > Changes against the previous version:
> > *Remove function cache_max_num_items and cache_page_size
> > *Protect migration_end and ram_save_setup by XBZRLE.lock
> >
> > arch_init.c | 61
> ++++++++++++++++++++++++++++++++++++++++++++++++++++++----
> > 1 files changed, 56 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch_init.c b/arch_init.c
> > index 80574a0..d295237 100644
> > --- a/arch_init.c
> > +++ b/arch_init.c
> > @@ -164,26 +164,69 @@ static struct {
> > uint8_t *encoded_buf;
> > /* buffer for storing page content */
> > uint8_t *current_buf;
> > - /* Cache for XBZRLE */
> > + /* Cache for XBZRLE, Protected by lock. */
> > PageCache *cache;
> > + QemuMutex lock;
> > } XBZRLE = {
> > .encoded_buf = NULL,
> > .current_buf = NULL,
> > .cache = NULL,
> > + /* use the lock carefully */
> > + .lock = {PTHREAD_MUTEX_INITIALIZER},
> > };
> > /* buffer used for XBZRLE decoding */
> > static uint8_t *xbzrle_decoded_buf;
> >
> > +static void XBZRLE_cache_lock(void)
> > +{
> > + qemu_mutex_lock(&XBZRLE.lock);
> > +}
> > +
> > +static void XBZRLE_cache_unlock(void)
> > +{
> > + qemu_mutex_unlock(&XBZRLE.lock);
> > +}
> > +
>
> You might want to make these only bother with the lock if xbzrle is enabled
> - however actually, I think it's probably just best to keep them as is,
> and simple.
To be honest, we can't follow your meaning. Can you explain it in detail.
>
> > int64_t xbzrle_cache_resize(int64_t new_size)
> > {
> > + PageCache *new_cache, *old_cache, *cache;
> > +
> > if (new_size < TARGET_PAGE_SIZE) {
> > return -1;
> > }
> >
> > - if (XBZRLE.cache != NULL) {
> > - return cache_resize(XBZRLE.cache, new_size /
> TARGET_PAGE_SIZE) *
> > - TARGET_PAGE_SIZE;
> > + XBZRLE_cache_lock();
> > + /* check XBZRLE.cache again later */
> > + cache = XBZRLE.cache;
> > + XBZRLE_cache_unlock();
>
> That's a bit of an odd way of atomically reading a pointer;
> I think an
> smp_rmb();
> cache = atomic_read(&XBZRLE.cache);
>
> would have done the trick.
We will realize the function without the temporary variable cache.
>
> Personally I would have just kept the lock at the top and not released
> it until the end of the following section.
>
Yes, you are right. But the cache_init function maybe consume time, we think it will be better that unlock the
Xbzrle.cache before cache_init and relock it later.
> > + if (cache != NULL) {
> > + if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
> > + goto ret;
>
> Hmm goto not really needed there.
Check.
>
> > + }
> > + new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
> > + TARGET_PAGE_SIZE);
> > + if (!new_cache) {
> > + DPRINTF("Error creating cache\n");
> > + return -1;
> > + }
> > +
> > + XBZRLE_cache_lock();
> > + /* the XBZRLE.cache may have be destroyed, check it again */
> > + if (XBZRLE.cache != NULL) {
> > + old_cache = XBZRLE.cache;
> > + XBZRLE.cache = new_cache;
> > + new_cache = NULL;
> > + }
> > + XBZRLE_cache_unlock();
> > +
>
>
> > + if (NULL == new_cache) {
> > + cache_fini(old_cache);
> > + } else {
> > + cache_fini(new_cache);
> > + }
>
> Yes, it took me a few readings to understand that; i.e. if new_cache=NULL then
> it's actually now in use in XBZRLE.cache so only the old_cache needs
> freeing.
>
> > }
> > +ret:
> > return pow2floor(new_size);
> > }
> >
> > @@ -522,6 +565,8 @@ static int ram_save_block(QEMUFile *f, bool
> last_stage)
> > ret = ram_control_save_page(f, block->offset,
> > offset, TARGET_PAGE_SIZE,
> &bytes_sent);
> >
> > + XBZRLE_cache_lock();
> > +
> > if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
> > if (ret != RAM_SAVE_CONTROL_DELAYED) {
> > if (bytes_sent > 0) {
> > @@ -553,6 +598,8 @@ static int ram_save_block(QEMUFile *f, bool
> last_stage)
> > acct_info.norm_pages++;
> > }
> >
> > + XBZRLE_cache_unlock();
> > +
> > /* if page is unmodified, continue to the next */
> > if (bytes_sent > 0) {
> > last_sent_block = block;
>
> Note that's not actually safe until my other xbzrle patch lands, because
> the cache data is currently passed into put_buffer_async that will read it
> later (possibly after you've reallocated) - but that's ok, that should
> get fixed in the mean time.
>
> > @@ -620,6 +667,7 @@ static void migration_end(void)
> > migration_bitmap = NULL;
> > }
> >
> > + XBZRLE_cache_lock();
> > if (XBZRLE.cache) {
> > cache_fini(XBZRLE.cache);
> > g_free(XBZRLE.cache);
> > @@ -629,6 +677,7 @@ static void migration_end(void)
> > XBZRLE.encoded_buf = NULL;
> > XBZRLE.current_buf = NULL;
> > }
> > + XBZRLE_cache_unlock();
> > }
> >
> > static void ram_migration_cancel(void *opaque)
> > @@ -659,13 +708,16 @@ static int ram_save_setup(QEMUFile *f, void
> *opaque)
> > dirty_rate_high_cnt = 0;
> >
> > if (migrate_use_xbzrle()) {
> > + XBZRLE_cache_lock();
> > XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
> > TARGET_PAGE_SIZE,
> > TARGET_PAGE_SIZE);
> > if (!XBZRLE.cache) {
> > + XBZRLE_cache_unlock();
> > DPRINTF("Error creating cache\n");
> > return -1;
> > }
> > + XBZRLE_cache_unlock();
> >
> > /* We prefer not to abort if there is no memory */
> > XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
> > @@ -681,7 +733,6 @@ static int ram_save_setup(QEMUFile *f, void
> *opaque)
> > XBZRLE.encoded_buf = NULL;
> > return -1;
> > }
> > -
> > acct_clear();
> > }
> >
> > --
> > 1.6.0.2
> >
> > Best regards,
> > -Gonglei
>
> Thanks,
>
> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
>
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Best regards,
-Gonglei
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Qemu-devel] [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache
2014-02-21 11:59 ` Gonglei (Arei)
@ 2014-02-21 12:10 ` Dr. David Alan Gilbert
2014-02-21 13:32 ` Gonglei (Arei)
0 siblings, 1 reply; 5+ messages in thread
From: Dr. David Alan Gilbert @ 2014-02-21 12:10 UTC (permalink / raw)
To: Gonglei (Arei); +Cc: chenliang (T), owasserm, qemu-devel, Juan Quintela
* Gonglei (Arei) (arei.gonglei@huawei.com) wrote:
> Hi,
<snip>
> > > +static void XBZRLE_cache_lock(void)
> > > +{
> > > + qemu_mutex_lock(&XBZRLE.lock);
> > > +}
> > > +
> > > +static void XBZRLE_cache_unlock(void)
> > > +{
> > > + qemu_mutex_unlock(&XBZRLE.lock);
> > > +}
> > > +
> >
> > You might want to make these only bother with the lock if xbzrle is enabled
> > - however actually, I think it's probably just best to keep them as is,
> > and simple.
> To be honest, we can't follow your meaning. Can you explain it in detail.
These two functions are called from a few places, including ram_save_block
even if xbzrle isn't enabled; I was just suggesting you might not want
to check the lock if xbzrle is disabled; however, I think it's simpler
to leave it as is, and doubt the overhead is worth the complexity.
Dave
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Qemu-devel] [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache
2014-02-21 12:10 ` Dr. David Alan Gilbert
@ 2014-02-21 13:32 ` Gonglei (Arei)
0 siblings, 0 replies; 5+ messages in thread
From: Gonglei (Arei) @ 2014-02-21 13:32 UTC (permalink / raw)
To: Dr. David Alan Gilbert; +Cc: chenliang (T), owasserm, qemu-devel, Juan Quintela
> -----Original Message-----
> From: Dr. David Alan Gilbert [mailto:dgilbert@redhat.com]
> Sent: Friday, February 21, 2014 8:10 PM
> To: Gonglei (Arei)
> Cc: qemu-devel@nongnu.org; Juan Quintela; owasserm@redhat.com;
> chenliang (T)
> Subject: Re: [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache
>
> * Gonglei (Arei) (arei.gonglei@huawei.com) wrote:
> > Hi,
>
> <snip>
>
> > > > +static void XBZRLE_cache_lock(void)
> > > > +{
> > > > + qemu_mutex_lock(&XBZRLE.lock);
> > > > +}
> > > > +
> > > > +static void XBZRLE_cache_unlock(void)
> > > > +{
> > > > + qemu_mutex_unlock(&XBZRLE.lock);
> > > > +}
> > > > +
> > >
> > > You might want to make these only bother with the lock if xbzrle is enabled
> > > - however actually, I think it's probably just best to keep them as is,
> > > and simple.
> > To be honest, we can't follow your meaning. Can you explain it in detail.
>
> These two functions are called from a few places, including ram_save_block
> even if xbzrle isn't enabled; I was just suggesting you might not want
> to check the lock if xbzrle is disabled; however, I think it's simpler
> to leave it as is, and doubt the overhead is worth the complexity.
I see, thank you so much, Dave.
>
> Dave
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Best regards,
-Gonglei
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2014-02-21 13:35 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-02-21 2:14 [Qemu-devel] [PATCH v2] XBZRLE: Fix qemu crash when resize the xbzrle cache Gonglei (Arei)
2014-02-21 11:03 ` Dr. David Alan Gilbert
2014-02-21 11:59 ` Gonglei (Arei)
2014-02-21 12:10 ` Dr. David Alan Gilbert
2014-02-21 13:32 ` Gonglei (Arei)
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.