* [Qemu-devel] [PATCH v2 1/5] migration: Rename save_live_setup() to save_setup()
2017-06-13 9:53 [Qemu-devel] [PATCH v2 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
@ 2017-06-13 9:53 ` Juan Quintela
2017-06-16 17:30 ` Dr. David Alan Gilbert
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 2/5] migration: Rename cleanup() to save_cleanup() Juan Quintela
` (3 subsequent siblings)
4 siblings, 1 reply; 12+ messages in thread
From: Juan Quintela @ 2017-06-13 9:53 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf, stefanha
We are going to use it now for more than save live regions.
Once there rename qemu_savevm_state_begin() to qemu_savevm_state_setup().
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
hw/ppc/spapr.c | 2 +-
include/migration/register.h | 2 +-
migration/block.c | 2 +-
migration/colo.c | 2 +-
migration/migration.c | 2 +-
migration/ram.c | 2 +-
migration/savevm.c | 12 ++++++------
migration/savevm.h | 2 +-
migration/trace-events | 2 +-
9 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index dcd44a1..7db5396 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1868,7 +1868,7 @@ static void htab_cleanup(void *opaque)
}
static SaveVMHandlers savevm_htab_handlers = {
- .save_live_setup = htab_save_setup,
+ .save_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate,
.save_live_complete_precopy = htab_save_complete,
.cleanup = htab_cleanup,
diff --git a/include/migration/register.h b/include/migration/register.h
index 717c617..333e3df 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -36,7 +36,7 @@ typedef struct SaveVMHandlers {
int (*save_live_iterate)(QEMUFile *f, void *opaque);
/* This runs outside the iothread lock! */
- int (*save_live_setup)(QEMUFile *f, void *opaque);
+ int (*save_setup)(QEMUFile *f, void *opaque);
void (*save_live_pending)(QEMUFile *f, void *opaque,
uint64_t threshold_size,
uint64_t *non_postcopiable_pending,
diff --git a/migration/block.c b/migration/block.c
index 3aae5a3..c160d41 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -1007,7 +1007,7 @@ static bool block_is_active(void *opaque)
}
static SaveVMHandlers savevm_block_handlers = {
- .save_live_setup = block_save_setup,
+ .save_setup = block_save_setup,
.save_live_iterate = block_save_iterate,
.save_live_complete_precopy = block_save_complete,
.save_live_pending = block_save_pending,
diff --git a/migration/colo.c b/migration/colo.c
index c436d63..8bc1690 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -350,7 +350,7 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
/* Disable block migration */
migrate_set_block_enabled(false, &local_err);
qemu_savevm_state_header(fb);
- qemu_savevm_state_begin(fb);
+ qemu_savevm_state_setup(fb);
qemu_mutex_lock_iothread();
qemu_savevm_state_complete_precopy(fb, false);
qemu_mutex_unlock_iothread();
diff --git a/migration/migration.c b/migration/migration.c
index 52dac9d..0799424 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1825,7 +1825,7 @@ static void *migration_thread(void *opaque)
qemu_savevm_send_postcopy_advise(s->to_dst_file);
}
- qemu_savevm_state_begin(s->to_dst_file);
+ qemu_savevm_state_setup(s->to_dst_file);
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
diff --git a/migration/ram.c b/migration/ram.c
index 7f687f5..3454bf7 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2605,7 +2605,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
static SaveVMHandlers savevm_ram_handlers = {
- .save_live_setup = ram_save_setup,
+ .save_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
.save_live_complete_postcopy = ram_save_complete,
.save_live_complete_precopy = ram_save_complete,
diff --git a/migration/savevm.c b/migration/savevm.c
index f32a82d..7f2a5bd 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -606,7 +606,7 @@ int register_savevm_live(DeviceState *dev,
se->opaque = opaque;
se->vmsd = NULL;
/* if this is a live_savem then set is_ram */
- if (ops->save_live_setup != NULL) {
+ if (ops->save_setup != NULL) {
se->is_ram = 1;
}
@@ -977,14 +977,14 @@ void qemu_savevm_state_header(QEMUFile *f)
}
-void qemu_savevm_state_begin(QEMUFile *f)
+void qemu_savevm_state_setup(QEMUFile *f)
{
SaveStateEntry *se;
int ret;
- trace_savevm_state_begin();
+ trace_savevm_state_setup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (!se->ops || !se->ops->save_live_setup) {
+ if (!se->ops || !se->ops->save_setup) {
continue;
}
if (se->ops && se->ops->is_active) {
@@ -994,7 +994,7 @@ void qemu_savevm_state_begin(QEMUFile *f)
}
save_section_header(f, se, QEMU_VM_SECTION_START);
- ret = se->ops->save_live_setup(f, se->opaque);
+ ret = se->ops->save_setup(f, se->opaque);
save_section_footer(f, se);
if (ret < 0) {
qemu_file_set_error(f, ret);
@@ -1252,7 +1252,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
qemu_mutex_unlock_iothread();
qemu_savevm_state_header(f);
- qemu_savevm_state_begin(f);
+ qemu_savevm_state_setup(f);
qemu_mutex_lock_iothread();
while (qemu_file_get_error(f) == 0) {
diff --git a/migration/savevm.h b/migration/savevm.h
index 45b59c1..54ce8f6 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -30,7 +30,7 @@
#define QEMU_VM_SECTION_FOOTER 0x7e
bool qemu_savevm_state_blocked(Error **errp);
-void qemu_savevm_state_begin(QEMUFile *f);
+void qemu_savevm_state_setup(QEMUFile *f);
void qemu_savevm_state_header(QEMUFile *f);
int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
void qemu_savevm_state_cleanup(void);
diff --git a/migration/trace-events b/migration/trace-events
index 38345be..9669e94 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -32,7 +32,7 @@ savevm_send_open_return_path(void) ""
savevm_send_ping(uint32_t val) "%x"
savevm_send_postcopy_listen(void) ""
savevm_send_postcopy_run(void) ""
-savevm_state_begin(void) ""
+savevm_state_setup(void) ""
savevm_state_header(void) ""
savevm_state_iterate(void) ""
savevm_state_cleanup(void) ""
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v2 1/5] migration: Rename save_live_setup() to save_setup()
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 1/5] migration: Rename save_live_setup() to save_setup() Juan Quintela
@ 2017-06-16 17:30 ` Dr. David Alan Gilbert
0 siblings, 0 replies; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-06-16 17:30 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf, stefanha
* Juan Quintela (quintela@redhat.com) wrote:
> We are going to use it now for more than save live regions.
> Once there rename qemu_savevm_state_begin() to qemu_savevm_state_setup().
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
> hw/ppc/spapr.c | 2 +-
> include/migration/register.h | 2 +-
> migration/block.c | 2 +-
> migration/colo.c | 2 +-
> migration/migration.c | 2 +-
> migration/ram.c | 2 +-
> migration/savevm.c | 12 ++++++------
> migration/savevm.h | 2 +-
> migration/trace-events | 2 +-
> 9 files changed, 14 insertions(+), 14 deletions(-)
>
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index dcd44a1..7db5396 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -1868,7 +1868,7 @@ static void htab_cleanup(void *opaque)
> }
>
> static SaveVMHandlers savevm_htab_handlers = {
> - .save_live_setup = htab_save_setup,
> + .save_setup = htab_save_setup,
> .save_live_iterate = htab_save_iterate,
> .save_live_complete_precopy = htab_save_complete,
> .cleanup = htab_cleanup,
> diff --git a/include/migration/register.h b/include/migration/register.h
> index 717c617..333e3df 100644
> --- a/include/migration/register.h
> +++ b/include/migration/register.h
> @@ -36,7 +36,7 @@ typedef struct SaveVMHandlers {
> int (*save_live_iterate)(QEMUFile *f, void *opaque);
>
> /* This runs outside the iothread lock! */
> - int (*save_live_setup)(QEMUFile *f, void *opaque);
> + int (*save_setup)(QEMUFile *f, void *opaque);
> void (*save_live_pending)(QEMUFile *f, void *opaque,
> uint64_t threshold_size,
> uint64_t *non_postcopiable_pending,
> diff --git a/migration/block.c b/migration/block.c
> index 3aae5a3..c160d41 100644
> --- a/migration/block.c
> +++ b/migration/block.c
> @@ -1007,7 +1007,7 @@ static bool block_is_active(void *opaque)
> }
>
> static SaveVMHandlers savevm_block_handlers = {
> - .save_live_setup = block_save_setup,
> + .save_setup = block_save_setup,
> .save_live_iterate = block_save_iterate,
> .save_live_complete_precopy = block_save_complete,
> .save_live_pending = block_save_pending,
> diff --git a/migration/colo.c b/migration/colo.c
> index c436d63..8bc1690 100644
> --- a/migration/colo.c
> +++ b/migration/colo.c
> @@ -350,7 +350,7 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
> /* Disable block migration */
> migrate_set_block_enabled(false, &local_err);
> qemu_savevm_state_header(fb);
> - qemu_savevm_state_begin(fb);
> + qemu_savevm_state_setup(fb);
> qemu_mutex_lock_iothread();
> qemu_savevm_state_complete_precopy(fb, false);
> qemu_mutex_unlock_iothread();
> diff --git a/migration/migration.c b/migration/migration.c
> index 52dac9d..0799424 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -1825,7 +1825,7 @@ static void *migration_thread(void *opaque)
> qemu_savevm_send_postcopy_advise(s->to_dst_file);
> }
>
> - qemu_savevm_state_begin(s->to_dst_file);
> + qemu_savevm_state_setup(s->to_dst_file);
>
> s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
> migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
> diff --git a/migration/ram.c b/migration/ram.c
> index 7f687f5..3454bf7 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -2605,7 +2605,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
> }
>
> static SaveVMHandlers savevm_ram_handlers = {
> - .save_live_setup = ram_save_setup,
> + .save_setup = ram_save_setup,
> .save_live_iterate = ram_save_iterate,
> .save_live_complete_postcopy = ram_save_complete,
> .save_live_complete_precopy = ram_save_complete,
> diff --git a/migration/savevm.c b/migration/savevm.c
> index f32a82d..7f2a5bd 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -606,7 +606,7 @@ int register_savevm_live(DeviceState *dev,
> se->opaque = opaque;
> se->vmsd = NULL;
> /* if this is a live_savem then set is_ram */
> - if (ops->save_live_setup != NULL) {
> + if (ops->save_setup != NULL) {
> se->is_ram = 1;
> }
>
> @@ -977,14 +977,14 @@ void qemu_savevm_state_header(QEMUFile *f)
>
> }
>
> -void qemu_savevm_state_begin(QEMUFile *f)
> +void qemu_savevm_state_setup(QEMUFile *f)
> {
> SaveStateEntry *se;
> int ret;
>
> - trace_savevm_state_begin();
> + trace_savevm_state_setup();
> QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> - if (!se->ops || !se->ops->save_live_setup) {
> + if (!se->ops || !se->ops->save_setup) {
> continue;
> }
> if (se->ops && se->ops->is_active) {
> @@ -994,7 +994,7 @@ void qemu_savevm_state_begin(QEMUFile *f)
> }
> save_section_header(f, se, QEMU_VM_SECTION_START);
>
> - ret = se->ops->save_live_setup(f, se->opaque);
> + ret = se->ops->save_setup(f, se->opaque);
> save_section_footer(f, se);
> if (ret < 0) {
> qemu_file_set_error(f, ret);
> @@ -1252,7 +1252,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
>
> qemu_mutex_unlock_iothread();
> qemu_savevm_state_header(f);
> - qemu_savevm_state_begin(f);
> + qemu_savevm_state_setup(f);
> qemu_mutex_lock_iothread();
>
> while (qemu_file_get_error(f) == 0) {
> diff --git a/migration/savevm.h b/migration/savevm.h
> index 45b59c1..54ce8f6 100644
> --- a/migration/savevm.h
> +++ b/migration/savevm.h
> @@ -30,7 +30,7 @@
> #define QEMU_VM_SECTION_FOOTER 0x7e
>
> bool qemu_savevm_state_blocked(Error **errp);
> -void qemu_savevm_state_begin(QEMUFile *f);
> +void qemu_savevm_state_setup(QEMUFile *f);
> void qemu_savevm_state_header(QEMUFile *f);
> int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
> void qemu_savevm_state_cleanup(void);
> diff --git a/migration/trace-events b/migration/trace-events
> index 38345be..9669e94 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -32,7 +32,7 @@ savevm_send_open_return_path(void) ""
> savevm_send_ping(uint32_t val) "%x"
> savevm_send_postcopy_listen(void) ""
> savevm_send_postcopy_run(void) ""
> -savevm_state_begin(void) ""
> +savevm_state_setup(void) ""
> savevm_state_header(void) ""
> savevm_state_iterate(void) ""
> savevm_state_cleanup(void) ""
> --
> 2.9.4
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH v2 2/5] migration: Rename cleanup() to save_cleanup()
2017-06-13 9:53 [Qemu-devel] [PATCH v2 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 1/5] migration: Rename save_live_setup() to save_setup() Juan Quintela
@ 2017-06-13 9:53 ` Juan Quintela
2017-06-16 17:33 ` Dr. David Alan Gilbert
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 3/5] migration: Create load_setup()/cleanup() methods Juan Quintela
` (2 subsequent siblings)
4 siblings, 1 reply; 12+ messages in thread
From: Juan Quintela @ 2017-06-13 9:53 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf, stefanha
We need a cleanup for loads, so we rename here to be consistent.
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
hw/ppc/spapr.c | 2 +-
include/migration/register.h | 2 +-
migration/block.c | 2 +-
migration/ram.c | 2 +-
migration/savevm.c | 4 ++--
5 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 7db5396..dabf822 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1871,7 +1871,7 @@ static SaveVMHandlers savevm_htab_handlers = {
.save_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate,
.save_live_complete_precopy = htab_save_complete,
- .cleanup = htab_cleanup,
+ .save_cleanup = htab_cleanup,
.load_state = htab_load,
};
diff --git a/include/migration/register.h b/include/migration/register.h
index 333e3df..9ad1e4c 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -21,7 +21,7 @@ typedef struct SaveVMHandlers {
/* This runs inside the iothread lock. */
SaveStateHandler *save_state;
- void (*cleanup)(void *opaque);
+ void (*save_cleanup)(void *opaque);
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
diff --git a/migration/block.c b/migration/block.c
index c160d41..ad36afb 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -1012,7 +1012,7 @@ static SaveVMHandlers savevm_block_handlers = {
.save_live_complete_precopy = block_save_complete,
.save_live_pending = block_save_pending,
.load_state = block_load,
- .cleanup = block_migration_cleanup,
+ .save_cleanup = block_migration_cleanup,
.is_active = block_is_active,
};
diff --git a/migration/ram.c b/migration/ram.c
index 3454bf7..be78e42 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2611,7 +2611,7 @@ static SaveVMHandlers savevm_ram_handlers = {
.save_live_complete_precopy = ram_save_complete,
.save_live_pending = ram_save_pending,
.load_state = ram_load,
- .cleanup = ram_migration_cleanup,
+ .save_cleanup = ram_migration_cleanup,
};
void ram_mig_init(void)
diff --git a/migration/savevm.c b/migration/savevm.c
index 7f2a5bd..fb93931 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1226,8 +1226,8 @@ void qemu_savevm_state_cleanup(void)
trace_savevm_state_cleanup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (se->ops && se->ops->cleanup) {
- se->ops->cleanup(se->opaque);
+ if (se->ops && se->ops->save_cleanup) {
+ se->ops->save_cleanup(se->opaque);
}
}
}
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v2 2/5] migration: Rename cleanup() to save_cleanup()
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 2/5] migration: Rename cleanup() to save_cleanup() Juan Quintela
@ 2017-06-16 17:33 ` Dr. David Alan Gilbert
0 siblings, 0 replies; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-06-16 17:33 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf, stefanha
* Juan Quintela (quintela@redhat.com) wrote:
> We need a cleanup for loads, so we rename here to be consistent.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
> hw/ppc/spapr.c | 2 +-
> include/migration/register.h | 2 +-
> migration/block.c | 2 +-
> migration/ram.c | 2 +-
> migration/savevm.c | 4 ++--
> 5 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 7db5396..dabf822 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -1871,7 +1871,7 @@ static SaveVMHandlers savevm_htab_handlers = {
> .save_setup = htab_save_setup,
> .save_live_iterate = htab_save_iterate,
> .save_live_complete_precopy = htab_save_complete,
> - .cleanup = htab_cleanup,
> + .save_cleanup = htab_cleanup,
> .load_state = htab_load,
It might make sense to follow the rename through so that those
become htab_save_cleanup.
However, only minor.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> };
>
> diff --git a/include/migration/register.h b/include/migration/register.h
> index 333e3df..9ad1e4c 100644
> --- a/include/migration/register.h
> +++ b/include/migration/register.h
> @@ -21,7 +21,7 @@ typedef struct SaveVMHandlers {
> /* This runs inside the iothread lock. */
> SaveStateHandler *save_state;
>
> - void (*cleanup)(void *opaque);
> + void (*save_cleanup)(void *opaque);
> int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
> int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
>
> diff --git a/migration/block.c b/migration/block.c
> index c160d41..ad36afb 100644
> --- a/migration/block.c
> +++ b/migration/block.c
> @@ -1012,7 +1012,7 @@ static SaveVMHandlers savevm_block_handlers = {
> .save_live_complete_precopy = block_save_complete,
> .save_live_pending = block_save_pending,
> .load_state = block_load,
> - .cleanup = block_migration_cleanup,
> + .save_cleanup = block_migration_cleanup,
> .is_active = block_is_active,
> };
>
> diff --git a/migration/ram.c b/migration/ram.c
> index 3454bf7..be78e42 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -2611,7 +2611,7 @@ static SaveVMHandlers savevm_ram_handlers = {
> .save_live_complete_precopy = ram_save_complete,
> .save_live_pending = ram_save_pending,
> .load_state = ram_load,
> - .cleanup = ram_migration_cleanup,
> + .save_cleanup = ram_migration_cleanup,
> };
>
> void ram_mig_init(void)
> diff --git a/migration/savevm.c b/migration/savevm.c
> index 7f2a5bd..fb93931 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -1226,8 +1226,8 @@ void qemu_savevm_state_cleanup(void)
>
> trace_savevm_state_cleanup();
> QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> - if (se->ops && se->ops->cleanup) {
> - se->ops->cleanup(se->opaque);
> + if (se->ops && se->ops->save_cleanup) {
> + se->ops->save_cleanup(se->opaque);
> }
> }
> }
> --
> 2.9.4
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH v2 3/5] migration: Create load_setup()/cleanup() methods
2017-06-13 9:53 [Qemu-devel] [PATCH v2 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 1/5] migration: Rename save_live_setup() to save_setup() Juan Quintela
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 2/5] migration: Rename cleanup() to save_cleanup() Juan Quintela
@ 2017-06-13 9:53 ` Juan Quintela
2017-06-16 17:54 ` Dr. David Alan Gilbert
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 4/5] migration: Convert ram to use new load_setup()/load_cleanup() Juan Quintela
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 5/5] migration: Make compression_threads use save/load_setup/cleanup() Juan Quintela
4 siblings, 1 reply; 12+ messages in thread
From: Juan Quintela @ 2017-06-13 9:53 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf, stefanha
We need to do things at load time and at cleanup time.
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
include/migration/register.h | 2 ++
migration/savevm.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
migration/trace-events | 2 ++
3 files changed, 48 insertions(+)
diff --git a/include/migration/register.h b/include/migration/register.h
index 9ad1e4c..4b5c0e0 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -42,6 +42,8 @@ typedef struct SaveVMHandlers {
uint64_t *non_postcopiable_pending,
uint64_t *postcopiable_pending);
LoadStateHandler *load_state;
+ int (*load_setup)(QEMUFile *f, void *opaque);
+ int (*load_cleanup)(void *opaque);
} SaveVMHandlers;
int register_savevm_live(DeviceState *dev,
diff --git a/migration/savevm.c b/migration/savevm.c
index fb93931..ed36f35 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1912,6 +1912,43 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
return 0;
}
+static int qemu_loadvm_state_setup(QEMUFile *f)
+{
+ SaveStateEntry *se;
+ int ret;
+
+ trace_loadvm_state_setup();
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (!se->ops || !se->ops->load_setup) {
+ continue;
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+
+ ret = se->ops->load_setup(f, se->opaque);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void qemu_loadvm_state_cleanup(void)
+{
+ SaveStateEntry *se;
+
+ trace_loadvm_state_cleanup();
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (se->ops && se->ops->load_cleanup) {
+ se->ops->load_cleanup(se->opaque);
+ }
+ }
+}
+
static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
{
uint8_t section_type;
@@ -1984,6 +2021,12 @@ int qemu_loadvm_state(QEMUFile *f)
return -ENOTSUP;
}
+
+ if (qemu_loadvm_state_setup(f) != 0) {
+ error_report("Load state of one device failed");
+ return -EINVAL;
+ }
+
if (!savevm_state.skip_configuration || enforce_config_section()) {
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
error_report("Configuration section missing");
@@ -2047,6 +2090,7 @@ int qemu_loadvm_state(QEMUFile *f)
}
}
+ qemu_loadvm_state_cleanup();
cpu_synchronize_all_post_init();
return ret;
diff --git a/migration/trace-events b/migration/trace-events
index 9669e94..cb2c4b5 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -7,6 +7,8 @@ qemu_loadvm_state_section_partend(uint32_t section_id) "%u"
qemu_loadvm_state_post_main(int ret) "%d"
qemu_loadvm_state_section_startfull(uint32_t section_id, const char *idstr, uint32_t instance_id, uint32_t version_id) "%u(%s) %u %u"
qemu_savevm_send_packaged(void) ""
+loadvm_state_setup(void) ""
+loadvm_state_cleanup(void) ""
loadvm_handle_cmd_packaged(unsigned int length) "%u"
loadvm_handle_cmd_packaged_main(int ret) "%d"
loadvm_handle_cmd_packaged_received(int ret) "%d"
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v2 3/5] migration: Create load_setup()/cleanup() methods
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 3/5] migration: Create load_setup()/cleanup() methods Juan Quintela
@ 2017-06-16 17:54 ` Dr. David Alan Gilbert
0 siblings, 0 replies; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-06-16 17:54 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf, stefanha
* Juan Quintela (quintela@redhat.com) wrote:
> We need to do things at load time and at cleanup time.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
> include/migration/register.h | 2 ++
> migration/savevm.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
> migration/trace-events | 2 ++
> 3 files changed, 48 insertions(+)
>
> diff --git a/include/migration/register.h b/include/migration/register.h
> index 9ad1e4c..4b5c0e0 100644
> --- a/include/migration/register.h
> +++ b/include/migration/register.h
> @@ -42,6 +42,8 @@ typedef struct SaveVMHandlers {
> uint64_t *non_postcopiable_pending,
> uint64_t *postcopiable_pending);
> LoadStateHandler *load_state;
> + int (*load_setup)(QEMUFile *f, void *opaque);
> + int (*load_cleanup)(void *opaque);
> } SaveVMHandlers;
>
> int register_savevm_live(DeviceState *dev,
> diff --git a/migration/savevm.c b/migration/savevm.c
> index fb93931..ed36f35 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -1912,6 +1912,43 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
> return 0;
> }
>
> +static int qemu_loadvm_state_setup(QEMUFile *f)
> +{
> + SaveStateEntry *se;
> + int ret;
> +
> + trace_loadvm_state_setup();
> + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> + if (!se->ops || !se->ops->load_setup) {
> + continue;
> + }
> + if (se->ops && se->ops->is_active) {
> + if (!se->ops->is_active(se->opaque)) {
> + continue;
> + }
> + }
> +
> + ret = se->ops->load_setup(f, se->opaque);
> + if (ret < 0) {
> + qemu_file_set_error(f, ret);
> + return ret;
> + }
> + }
> + return 0;
> +}
> +
> +static void qemu_loadvm_state_cleanup(void)
> +{
> + SaveStateEntry *se;
> +
> + trace_loadvm_state_cleanup();
> + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> + if (se->ops && se->ops->load_cleanup) {
> + se->ops->load_cleanup(se->opaque);
> + }
> + }
> +}
> +
> static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
> {
> uint8_t section_type;
> @@ -1984,6 +2021,12 @@ int qemu_loadvm_state(QEMUFile *f)
> return -ENOTSUP;
> }
>
> +
> + if (qemu_loadvm_state_setup(f) != 0) {
> + error_report("Load state of one device failed");
> + return -EINVAL;
I'd prefer if that was an error report in qemu_loadvm_state_setup,
because then it would be easy to print the device name rather than
'one'.
Dave
> + }
> +
> if (!savevm_state.skip_configuration || enforce_config_section()) {
> if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
> error_report("Configuration section missing");
> @@ -2047,6 +2090,7 @@ int qemu_loadvm_state(QEMUFile *f)
> }
> }
>
> + qemu_loadvm_state_cleanup();
> cpu_synchronize_all_post_init();
>
> return ret;
> diff --git a/migration/trace-events b/migration/trace-events
> index 9669e94..cb2c4b5 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -7,6 +7,8 @@ qemu_loadvm_state_section_partend(uint32_t section_id) "%u"
> qemu_loadvm_state_post_main(int ret) "%d"
> qemu_loadvm_state_section_startfull(uint32_t section_id, const char *idstr, uint32_t instance_id, uint32_t version_id) "%u(%s) %u %u"
> qemu_savevm_send_packaged(void) ""
> +loadvm_state_setup(void) ""
> +loadvm_state_cleanup(void) ""
> loadvm_handle_cmd_packaged(unsigned int length) "%u"
> loadvm_handle_cmd_packaged_main(int ret) "%d"
> loadvm_handle_cmd_packaged_received(int ret) "%d"
> --
> 2.9.4
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH v2 4/5] migration: Convert ram to use new load_setup()/load_cleanup()
2017-06-13 9:53 [Qemu-devel] [PATCH v2 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
` (2 preceding siblings ...)
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 3/5] migration: Create load_setup()/cleanup() methods Juan Quintela
@ 2017-06-13 9:53 ` Juan Quintela
2017-06-16 18:00 ` Dr. David Alan Gilbert
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 5/5] migration: Make compression_threads use save/load_setup/cleanup() Juan Quintela
4 siblings, 1 reply; 12+ messages in thread
From: Juan Quintela @ 2017-06-13 9:53 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf, stefanha
Once there, I rename ram_migration_cleanup() to ram_save_cleanup().
Notice that this is the first pass, and I only passed XBZRLE to the
new scheme. Moved decoded_buf to inside XBZRLE struct.
As a bonus, I don't have to export xbzrle functions from ram.c.
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
migration/migration.c | 3 ---
migration/ram.c | 52 +++++++++++++++++++++++++++++++++++----------------
migration/ram.h | 1 -
3 files changed, 36 insertions(+), 20 deletions(-)
diff --git a/migration/migration.c b/migration/migration.c
index 0799424..98f2ee1 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -350,9 +350,6 @@ static void process_incoming_migration_co(void *opaque)
migrate_decompress_threads_join();
exit(EXIT_FAILURE);
}
-
- free_xbzrle_decoded_buf();
-
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
qemu_bh_schedule(mis->bh);
}
diff --git a/migration/ram.c b/migration/ram.c
index be78e42..7040809 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -85,11 +85,10 @@ static struct {
QemuMutex lock;
/* it will store a page full of zeros */
uint8_t *zero_target_page;
+ /* buffer used for XBZRLE decoding */
+ uint8_t *decoded_buf;
} XBZRLE;
-/* buffer used for XBZRLE decoding */
-static uint8_t *xbzrle_decoded_buf;
-
static void XBZRLE_cache_lock(void)
{
if (migrate_use_xbzrle())
@@ -1350,13 +1349,18 @@ uint64_t ram_bytes_total(void)
return total;
}
-void free_xbzrle_decoded_buf(void)
+static void xbzrle_load_setup(void)
{
- g_free(xbzrle_decoded_buf);
- xbzrle_decoded_buf = NULL;
+ XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
}
-static void ram_migration_cleanup(void *opaque)
+static void xbzrle_load_cleanup(void)
+{
+ g_free(XBZRLE.decoded_buf);
+ XBZRLE.decoded_buf = NULL;
+}
+
+static void ram_save_cleanup(void *opaque)
{
RAMState **rsp = opaque;
RAMBlock *block;
@@ -2076,12 +2080,6 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
{
unsigned int xh_len;
int xh_flags;
- uint8_t *loaded_data;
-
- if (!xbzrle_decoded_buf) {
- xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
- }
- loaded_data = xbzrle_decoded_buf;
/* extract RLE header */
xh_flags = qemu_get_byte(f);
@@ -2097,10 +2095,10 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
return -1;
}
/* load data and decode */
- qemu_get_buffer_in_place(f, &loaded_data, xh_len);
+ qemu_get_buffer_in_place(f, &XBZRLE.decoded_buf, xh_len);
/* decode RLE */
- if (xbzrle_decode_buffer(loaded_data, xh_len, host,
+ if (xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
TARGET_PAGE_SIZE) == -1) {
error_report("Failed to load XBZRLE page - decode error!");
return -1;
@@ -2304,6 +2302,26 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
}
/**
+ * ram_load_setup: Setup RAM for migration incoming side
+ *
+ * Returns zero to indicate success and negative for error
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: RAMState pointer
+ */
+static int ram_load_setup(QEMUFile *f, void *opaque)
+{
+ xbzrle_load_setup();
+ return 0;
+}
+
+static int ram_load_cleanup(void *opaque)
+{
+ xbzrle_load_cleanup();
+ return 0;
+}
+
+/**
* ram_postcopy_incoming_init: allocate postcopy data structures
*
* Returns 0 for success and negative if there was one error
@@ -2611,7 +2629,9 @@ static SaveVMHandlers savevm_ram_handlers = {
.save_live_complete_precopy = ram_save_complete,
.save_live_pending = ram_save_pending,
.load_state = ram_load,
- .save_cleanup = ram_migration_cleanup,
+ .save_cleanup = ram_save_cleanup,
+ .load_setup = ram_load_setup,
+ .load_cleanup = ram_load_cleanup,
};
void ram_mig_init(void)
diff --git a/migration/ram.h b/migration/ram.h
index 6272eb0..a8b79a4 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -47,7 +47,6 @@ void migrate_decompress_threads_join(void);
uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
void acct_update_position(QEMUFile *f, size_t size, bool zero);
-void free_xbzrle_decoded_buf(void);
void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
unsigned long pages);
void ram_postcopy_migrated_memory_release(MigrationState *ms);
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v2 4/5] migration: Convert ram to use new load_setup()/load_cleanup()
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 4/5] migration: Convert ram to use new load_setup()/load_cleanup() Juan Quintela
@ 2017-06-16 18:00 ` Dr. David Alan Gilbert
2017-06-21 11:33 ` Juan Quintela
0 siblings, 1 reply; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-06-16 18:00 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf, stefanha
* Juan Quintela (quintela@redhat.com) wrote:
> Once there, I rename ram_migration_cleanup() to ram_save_cleanup().
> Notice that this is the first pass, and I only passed XBZRLE to the
> new scheme. Moved decoded_buf to inside XBZRLE struct.
> As a bonus, I don't have to export xbzrle functions from ram.c.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
> migration/migration.c | 3 ---
> migration/ram.c | 52 +++++++++++++++++++++++++++++++++++----------------
> migration/ram.h | 1 -
> 3 files changed, 36 insertions(+), 20 deletions(-)
>
> diff --git a/migration/migration.c b/migration/migration.c
> index 0799424..98f2ee1 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -350,9 +350,6 @@ static void process_incoming_migration_co(void *opaque)
> migrate_decompress_threads_join();
> exit(EXIT_FAILURE);
> }
> -
> - free_xbzrle_decoded_buf();
> -
> mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
> qemu_bh_schedule(mis->bh);
> }
> diff --git a/migration/ram.c b/migration/ram.c
> index be78e42..7040809 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -85,11 +85,10 @@ static struct {
> QemuMutex lock;
> /* it will store a page full of zeros */
> uint8_t *zero_target_page;
> + /* buffer used for XBZRLE decoding */
> + uint8_t *decoded_buf;
> } XBZRLE;
>
> -/* buffer used for XBZRLE decoding */
> -static uint8_t *xbzrle_decoded_buf;
> -
> static void XBZRLE_cache_lock(void)
> {
> if (migrate_use_xbzrle())
> @@ -1350,13 +1349,18 @@ uint64_t ram_bytes_total(void)
> return total;
> }
>
> -void free_xbzrle_decoded_buf(void)
> +static void xbzrle_load_setup(void)
> {
> - g_free(xbzrle_decoded_buf);
> - xbzrle_decoded_buf = NULL;
> + XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
> }
>
> -static void ram_migration_cleanup(void *opaque)
> +static void xbzrle_load_cleanup(void)
> +{
> + g_free(XBZRLE.decoded_buf);
> + XBZRLE.decoded_buf = NULL;
> +}
> +
> +static void ram_save_cleanup(void *opaque)
> {
> RAMState **rsp = opaque;
> RAMBlock *block;
> @@ -2076,12 +2080,6 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
> {
> unsigned int xh_len;
> int xh_flags;
> - uint8_t *loaded_data;
> -
> - if (!xbzrle_decoded_buf) {
> - xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
> - }
> - loaded_data = xbzrle_decoded_buf;
>
> /* extract RLE header */
> xh_flags = qemu_get_byte(f);
> @@ -2097,10 +2095,10 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
> return -1;
> }
> /* load data and decode */
> - qemu_get_buffer_in_place(f, &loaded_data, xh_len);
> + qemu_get_buffer_in_place(f, &XBZRLE.decoded_buf, xh_len);
No ! Note the & - loaded_data can get changed at that point to
point to an internal buffer rather than using that temporary.
So you still need the loaded_data and use that in the rest of
this function.
Dave
> /* decode RLE */
> - if (xbzrle_decode_buffer(loaded_data, xh_len, host,
> + if (xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
> TARGET_PAGE_SIZE) == -1) {
> error_report("Failed to load XBZRLE page - decode error!");
> return -1;
> @@ -2304,6 +2302,26 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
> }
>
> /**
> + * ram_load_setup: Setup RAM for migration incoming side
> + *
> + * Returns zero to indicate success and negative for error
> + *
> + * @f: QEMUFile where to receive the data
> + * @opaque: RAMState pointer
> + */
> +static int ram_load_setup(QEMUFile *f, void *opaque)
> +{
> + xbzrle_load_setup();
> + return 0;
> +}
> +
> +static int ram_load_cleanup(void *opaque)
> +{
> + xbzrle_load_cleanup();
> + return 0;
> +}
> +
> +/**
> * ram_postcopy_incoming_init: allocate postcopy data structures
> *
> * Returns 0 for success and negative if there was one error
> @@ -2611,7 +2629,9 @@ static SaveVMHandlers savevm_ram_handlers = {
> .save_live_complete_precopy = ram_save_complete,
> .save_live_pending = ram_save_pending,
> .load_state = ram_load,
> - .save_cleanup = ram_migration_cleanup,
> + .save_cleanup = ram_save_cleanup,
> + .load_setup = ram_load_setup,
> + .load_cleanup = ram_load_cleanup,
> };
>
> void ram_mig_init(void)
> diff --git a/migration/ram.h b/migration/ram.h
> index 6272eb0..a8b79a4 100644
> --- a/migration/ram.h
> +++ b/migration/ram.h
> @@ -47,7 +47,6 @@ void migrate_decompress_threads_join(void);
> uint64_t ram_pagesize_summary(void);
> int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
> void acct_update_position(QEMUFile *f, size_t size, bool zero);
> -void free_xbzrle_decoded_buf(void);
> void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
> unsigned long pages);
> void ram_postcopy_migrated_memory_release(MigrationState *ms);
> --
> 2.9.4
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v2 4/5] migration: Convert ram to use new load_setup()/load_cleanup()
2017-06-16 18:00 ` Dr. David Alan Gilbert
@ 2017-06-21 11:33 ` Juan Quintela
0 siblings, 0 replies; 12+ messages in thread
From: Juan Quintela @ 2017-06-21 11:33 UTC (permalink / raw)
To: Dr. David Alan Gilbert; +Cc: qemu-devel, lvivier, peterx, kwolf, stefanha
"Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:
> * Juan Quintela (quintela@redhat.com) wrote:
>> Once there, I rename ram_migration_cleanup() to ram_save_cleanup().
>> Notice that this is the first pass, and I only passed XBZRLE to the
>> new scheme. Moved decoded_buf to inside XBZRLE struct.
>> As a bonus, I don't have to export xbzrle functions from ram.c.
>>
>> Signed-off-by: Juan Quintela <quintela@redhat.com>
>> }
>> /* load data and decode */
>> - qemu_get_buffer_in_place(f, &loaded_data, xh_len);
>> + qemu_get_buffer_in_place(f, &XBZRLE.decoded_buf, xh_len);
>
> No ! Note the & - loaded_data can get changed at that point to
> point to an internal buffer rather than using that temporary.
>
> So you still need the loaded_data and use that in the rest of
> this function.
You are right.
I hate that nuances.
Thanks for the review.
Later, Juan.
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH v2 5/5] migration: Make compression_threads use save/load_setup/cleanup()
2017-06-13 9:53 [Qemu-devel] [PATCH v2 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
` (3 preceding siblings ...)
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 4/5] migration: Convert ram to use new load_setup()/load_cleanup() Juan Quintela
@ 2017-06-13 9:53 ` Juan Quintela
2017-06-16 18:04 ` Dr. David Alan Gilbert
4 siblings, 1 reply; 12+ messages in thread
From: Juan Quintela @ 2017-06-13 9:53 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf, stefanha
Once there, be consistent and use
compress_thread_{save,load}_{setup,cleanup}.
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
migration/migration.c | 5 -----
migration/ram.c | 12 ++++++++----
migration/ram.h | 5 -----
3 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/migration/migration.c b/migration/migration.c
index 98f2ee1..dc96021 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -284,7 +284,6 @@ static void process_incoming_migration_bh(void *opaque)
} else {
runstate_set(global_state_get_runstate());
}
- migrate_decompress_threads_join();
/*
* This must happen after any state changes since as soon as an external
* observer sees this event they might start to prod at the VM assuming
@@ -347,7 +346,6 @@ static void process_incoming_migration_co(void *opaque)
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_FAILED);
error_report("load of migration failed: %s", strerror(-ret));
- migrate_decompress_threads_join();
exit(EXIT_FAILURE);
}
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
@@ -358,7 +356,6 @@ void migration_fd_process_incoming(QEMUFile *f)
{
Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
- migrate_decompress_threads_create();
qemu_file_set_blocking(f, false);
qemu_coroutine_enter(co);
}
@@ -825,7 +822,6 @@ static void migrate_fd_cleanup(void *opaque)
}
qemu_mutex_lock_iothread();
- migrate_compress_threads_join();
qemu_fclose(s->to_dst_file);
s->to_dst_file = NULL;
}
@@ -1979,7 +1975,6 @@ void migrate_fd_connect(MigrationState *s)
}
}
- migrate_compress_threads_create();
qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
QEMU_THREAD_JOINABLE);
s->migration_thread_running = true;
diff --git a/migration/ram.c b/migration/ram.c
index 7040809..f35d65a 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -306,7 +306,7 @@ static inline void terminate_compression_threads(void)
}
}
-void migrate_compress_threads_join(void)
+static void compress_threads_save_cleanup(void)
{
int i, thread_count;
@@ -329,7 +329,7 @@ void migrate_compress_threads_join(void)
comp_param = NULL;
}
-void migrate_compress_threads_create(void)
+static void compress_threads_save_setup(void)
{
int i, thread_count;
@@ -1390,6 +1390,7 @@ static void ram_save_cleanup(void *opaque)
}
XBZRLE_cache_unlock();
migration_page_queue_free(*rsp);
+ compress_threads_save_cleanup();
g_free(*rsp);
*rsp = NULL;
}
@@ -1923,6 +1924,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
}
rcu_read_unlock();
+ compress_threads_save_setup();
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
@@ -2228,7 +2230,7 @@ static void wait_for_decompress_done(void)
qemu_mutex_unlock(&decomp_done_lock);
}
-void migrate_decompress_threads_create(void)
+static void compress_threads_load_setup(void)
{
int i, thread_count;
@@ -2249,7 +2251,7 @@ void migrate_decompress_threads_create(void)
}
}
-void migrate_decompress_threads_join(void)
+static void compress_threads_load_cleanup(void)
{
int i, thread_count;
@@ -2312,12 +2314,14 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
static int ram_load_setup(QEMUFile *f, void *opaque)
{
xbzrle_load_setup();
+ compress_threads_load_setup();
return 0;
}
static int ram_load_cleanup(void *opaque)
{
xbzrle_load_cleanup();
+ compress_threads_load_cleanup();
return 0;
}
diff --git a/migration/ram.h b/migration/ram.h
index a8b79a4..c081fde 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -39,11 +39,6 @@ int64_t xbzrle_cache_resize(int64_t new_size);
uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_total(void);
-void migrate_compress_threads_create(void);
-void migrate_compress_threads_join(void);
-void migrate_decompress_threads_create(void);
-void migrate_decompress_threads_join(void);
-
uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
void acct_update_position(QEMUFile *f, size_t size, bool zero);
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v2 5/5] migration: Make compression_threads use save/load_setup/cleanup()
2017-06-13 9:53 ` [Qemu-devel] [PATCH v2 5/5] migration: Make compression_threads use save/load_setup/cleanup() Juan Quintela
@ 2017-06-16 18:04 ` Dr. David Alan Gilbert
0 siblings, 0 replies; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-06-16 18:04 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf, stefanha
* Juan Quintela (quintela@redhat.com) wrote:
> Once there, be consistent and use
> compress_thread_{save,load}_{setup,cleanup}.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
> migration/migration.c | 5 -----
> migration/ram.c | 12 ++++++++----
> migration/ram.h | 5 -----
> 3 files changed, 8 insertions(+), 14 deletions(-)
>
> diff --git a/migration/migration.c b/migration/migration.c
> index 98f2ee1..dc96021 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -284,7 +284,6 @@ static void process_incoming_migration_bh(void *opaque)
> } else {
> runstate_set(global_state_get_runstate());
> }
> - migrate_decompress_threads_join();
> /*
> * This must happen after any state changes since as soon as an external
> * observer sees this event they might start to prod at the VM assuming
> @@ -347,7 +346,6 @@ static void process_incoming_migration_co(void *opaque)
> migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
> MIGRATION_STATUS_FAILED);
> error_report("load of migration failed: %s", strerror(-ret));
> - migrate_decompress_threads_join();
> exit(EXIT_FAILURE);
> }
> mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
> @@ -358,7 +356,6 @@ void migration_fd_process_incoming(QEMUFile *f)
> {
> Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
>
> - migrate_decompress_threads_create();
> qemu_file_set_blocking(f, false);
> qemu_coroutine_enter(co);
> }
> @@ -825,7 +822,6 @@ static void migrate_fd_cleanup(void *opaque)
> }
> qemu_mutex_lock_iothread();
>
> - migrate_compress_threads_join();
> qemu_fclose(s->to_dst_file);
> s->to_dst_file = NULL;
> }
> @@ -1979,7 +1975,6 @@ void migrate_fd_connect(MigrationState *s)
> }
> }
>
> - migrate_compress_threads_create();
> qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
> QEMU_THREAD_JOINABLE);
> s->migration_thread_running = true;
> diff --git a/migration/ram.c b/migration/ram.c
> index 7040809..f35d65a 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -306,7 +306,7 @@ static inline void terminate_compression_threads(void)
> }
> }
>
> -void migrate_compress_threads_join(void)
> +static void compress_threads_save_cleanup(void)
> {
> int i, thread_count;
>
> @@ -329,7 +329,7 @@ void migrate_compress_threads_join(void)
> comp_param = NULL;
> }
>
> -void migrate_compress_threads_create(void)
> +static void compress_threads_save_setup(void)
> {
> int i, thread_count;
>
> @@ -1390,6 +1390,7 @@ static void ram_save_cleanup(void *opaque)
> }
> XBZRLE_cache_unlock();
> migration_page_queue_free(*rsp);
> + compress_threads_save_cleanup();
> g_free(*rsp);
> *rsp = NULL;
> }
> @@ -1923,6 +1924,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
> }
>
> rcu_read_unlock();
> + compress_threads_save_setup();
>
> ram_control_before_iterate(f, RAM_CONTROL_SETUP);
> ram_control_after_iterate(f, RAM_CONTROL_SETUP);
> @@ -2228,7 +2230,7 @@ static void wait_for_decompress_done(void)
> qemu_mutex_unlock(&decomp_done_lock);
> }
>
> -void migrate_decompress_threads_create(void)
> +static void compress_threads_load_setup(void)
> {
> int i, thread_count;
>
> @@ -2249,7 +2251,7 @@ void migrate_decompress_threads_create(void)
> }
> }
>
> -void migrate_decompress_threads_join(void)
> +static void compress_threads_load_cleanup(void)
> {
> int i, thread_count;
>
> @@ -2312,12 +2314,14 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
> static int ram_load_setup(QEMUFile *f, void *opaque)
> {
> xbzrle_load_setup();
> + compress_threads_load_setup();
> return 0;
> }
>
> static int ram_load_cleanup(void *opaque)
> {
> xbzrle_load_cleanup();
> + compress_threads_load_cleanup();
> return 0;
> }
>
> diff --git a/migration/ram.h b/migration/ram.h
> index a8b79a4..c081fde 100644
> --- a/migration/ram.h
> +++ b/migration/ram.h
> @@ -39,11 +39,6 @@ int64_t xbzrle_cache_resize(int64_t new_size);
> uint64_t ram_bytes_remaining(void);
> uint64_t ram_bytes_total(void);
>
> -void migrate_compress_threads_create(void);
> -void migrate_compress_threads_join(void);
> -void migrate_decompress_threads_create(void);
> -void migrate_decompress_threads_join(void);
> -
> uint64_t ram_pagesize_summary(void);
> int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
> void acct_update_position(QEMUFile *f, size_t size, bool zero);
> --
> 2.9.4
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread