* [Qemu-devel] [PATCH v4 1/5] migration: Rename save_live_setup() to save_setup()
2017-06-28 9:52 [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
@ 2017-06-28 9:52 ` Juan Quintela
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 2/5] migration: Rename cleanup() to save_cleanup() Juan Quintela
` (4 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Juan Quintela @ 2017-06-28 9:52 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf
We are going to use it now for more than save live regions.
Once there rename qemu_savevm_state_begin() to qemu_savevm_state_setup().
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
hw/ppc/spapr.c | 2 +-
include/migration/register.h | 2 +-
migration/block.c | 2 +-
migration/colo.c | 2 +-
migration/migration.c | 2 +-
migration/ram.c | 2 +-
migration/savevm.c | 12 ++++++------
migration/savevm.h | 2 +-
migration/trace-events | 2 +-
9 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index ea44358..ffb889d 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1869,7 +1869,7 @@ static void htab_cleanup(void *opaque)
}
static SaveVMHandlers savevm_htab_handlers = {
- .save_live_setup = htab_save_setup,
+ .save_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate,
.save_live_complete_precopy = htab_save_complete,
.cleanup = htab_cleanup,
diff --git a/include/migration/register.h b/include/migration/register.h
index d9498d9..f607769 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -33,7 +33,7 @@ typedef struct SaveVMHandlers {
int (*save_live_iterate)(QEMUFile *f, void *opaque);
/* This runs outside the iothread lock! */
- int (*save_live_setup)(QEMUFile *f, void *opaque);
+ int (*save_setup)(QEMUFile *f, void *opaque);
void (*save_live_pending)(QEMUFile *f, void *opaque,
uint64_t threshold_size,
uint64_t *non_postcopiable_pending,
diff --git a/migration/block.c b/migration/block.c
index 7674ae1..7cb77e5 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -1004,7 +1004,7 @@ static bool block_is_active(void *opaque)
}
static SaveVMHandlers savevm_block_handlers = {
- .save_live_setup = block_save_setup,
+ .save_setup = block_save_setup,
.save_live_iterate = block_save_iterate,
.save_live_complete_precopy = block_save_complete,
.save_live_pending = block_save_pending,
diff --git a/migration/colo.c b/migration/colo.c
index c4ba4c3..ef35f00 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -350,7 +350,7 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
/* Disable block migration */
migrate_set_block_enabled(false, &local_err);
qemu_savevm_state_header(fb);
- qemu_savevm_state_begin(fb);
+ qemu_savevm_state_setup(fb);
qemu_mutex_lock_iothread();
qemu_savevm_state_complete_precopy(fb, false, false);
qemu_mutex_unlock_iothread();
diff --git a/migration/migration.c b/migration/migration.c
index 51ccd1a..8ea35f1 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1840,7 +1840,7 @@ static void *migration_thread(void *opaque)
qemu_savevm_send_postcopy_advise(s->to_dst_file);
}
- qemu_savevm_state_begin(s->to_dst_file);
+ qemu_savevm_state_setup(s->to_dst_file);
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
diff --git a/migration/ram.c b/migration/ram.c
index 0baa1e0..480248a 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2623,7 +2623,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
static SaveVMHandlers savevm_ram_handlers = {
- .save_live_setup = ram_save_setup,
+ .save_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
.save_live_complete_postcopy = ram_save_complete,
.save_live_complete_precopy = ram_save_complete,
diff --git a/migration/savevm.c b/migration/savevm.c
index be3f885..66f8160 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -596,7 +596,7 @@ int register_savevm_live(DeviceState *dev,
se->opaque = opaque;
se->vmsd = NULL;
/* if this is a live_savem then set is_ram */
- if (ops->save_live_setup != NULL) {
+ if (ops->save_setup != NULL) {
se->is_ram = 1;
}
@@ -955,14 +955,14 @@ void qemu_savevm_state_header(QEMUFile *f)
}
}
-void qemu_savevm_state_begin(QEMUFile *f)
+void qemu_savevm_state_setup(QEMUFile *f)
{
SaveStateEntry *se;
int ret;
- trace_savevm_state_begin();
+ trace_savevm_state_setup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (!se->ops || !se->ops->save_live_setup) {
+ if (!se->ops || !se->ops->save_setup) {
continue;
}
if (se->ops && se->ops->is_active) {
@@ -972,7 +972,7 @@ void qemu_savevm_state_begin(QEMUFile *f)
}
save_section_header(f, se, QEMU_VM_SECTION_START);
- ret = se->ops->save_live_setup(f, se->opaque);
+ ret = se->ops->save_setup(f, se->opaque);
save_section_footer(f, se);
if (ret < 0) {
qemu_file_set_error(f, ret);
@@ -1241,7 +1241,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
qemu_mutex_unlock_iothread();
qemu_savevm_state_header(f);
- qemu_savevm_state_begin(f);
+ qemu_savevm_state_setup(f);
qemu_mutex_lock_iothread();
while (qemu_file_get_error(f) == 0) {
diff --git a/migration/savevm.h b/migration/savevm.h
index 5a2ed11..6babc62 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -30,7 +30,7 @@
#define QEMU_VM_SECTION_FOOTER 0x7e
bool qemu_savevm_state_blocked(Error **errp);
-void qemu_savevm_state_begin(QEMUFile *f);
+void qemu_savevm_state_setup(QEMUFile *f);
void qemu_savevm_state_header(QEMUFile *f);
int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
void qemu_savevm_state_cleanup(void);
diff --git a/migration/trace-events b/migration/trace-events
index 38345be..9669e94 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -32,7 +32,7 @@ savevm_send_open_return_path(void) ""
savevm_send_ping(uint32_t val) "%x"
savevm_send_postcopy_listen(void) ""
savevm_send_postcopy_run(void) ""
-savevm_state_begin(void) ""
+savevm_state_setup(void) ""
savevm_state_header(void) ""
savevm_state_iterate(void) ""
savevm_state_cleanup(void) ""
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH v4 2/5] migration: Rename cleanup() to save_cleanup()
2017-06-28 9:52 [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 1/5] migration: Rename save_live_setup() to save_setup() Juan Quintela
@ 2017-06-28 9:52 ` Juan Quintela
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods Juan Quintela
` (3 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Juan Quintela @ 2017-06-28 9:52 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf
We need a cleanup for loads, so we rename here to be consistent.
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
--
Rename htab_cleanup to htap_save_cleanup as dave suggestion
---
hw/ppc/spapr.c | 4 ++--
include/migration/register.h | 2 +-
migration/block.c | 2 +-
migration/ram.c | 2 +-
migration/savevm.c | 4 ++--
5 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index ffb889d..9460b2e 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1861,7 +1861,7 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static void htab_cleanup(void *opaque)
+static void htab_save_cleanup(void *opaque)
{
sPAPRMachineState *spapr = opaque;
@@ -1872,7 +1872,7 @@ static SaveVMHandlers savevm_htab_handlers = {
.save_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate,
.save_live_complete_precopy = htab_save_complete,
- .cleanup = htab_cleanup,
+ .save_cleanup = htab_save_cleanup,
.load_state = htab_load,
};
diff --git a/include/migration/register.h b/include/migration/register.h
index f607769..938ea2b 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -18,7 +18,7 @@ typedef struct SaveVMHandlers {
/* This runs inside the iothread lock. */
SaveStateHandler *save_state;
- void (*cleanup)(void *opaque);
+ void (*save_cleanup)(void *opaque);
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
diff --git a/migration/block.c b/migration/block.c
index 7cb77e5..0c24a7f 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -1009,7 +1009,7 @@ static SaveVMHandlers savevm_block_handlers = {
.save_live_complete_precopy = block_save_complete,
.save_live_pending = block_save_pending,
.load_state = block_load,
- .cleanup = block_migration_cleanup,
+ .save_cleanup = block_migration_cleanup,
.is_active = block_is_active,
};
diff --git a/migration/ram.c b/migration/ram.c
index 480248a..649f76c 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2629,7 +2629,7 @@ static SaveVMHandlers savevm_ram_handlers = {
.save_live_complete_precopy = ram_save_complete,
.save_live_pending = ram_save_pending,
.load_state = ram_load,
- .cleanup = ram_migration_cleanup,
+ .save_cleanup = ram_migration_cleanup,
};
void ram_mig_init(void)
diff --git a/migration/savevm.c b/migration/savevm.c
index 66f8160..fee11c5 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1215,8 +1215,8 @@ void qemu_savevm_state_cleanup(void)
trace_savevm_state_cleanup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (se->ops && se->ops->cleanup) {
- se->ops->cleanup(se->opaque);
+ if (se->ops && se->ops->save_cleanup) {
+ se->ops->save_cleanup(se->opaque);
}
}
}
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods
2017-06-28 9:52 [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 1/5] migration: Rename save_live_setup() to save_setup() Juan Quintela
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 2/5] migration: Rename cleanup() to save_cleanup() Juan Quintela
@ 2017-06-28 9:52 ` Juan Quintela
2017-06-28 11:15 ` Dr. David Alan Gilbert
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 4/5] migration: Convert ram to use new load_setup()/load_cleanup() Juan Quintela
` (2 subsequent siblings)
5 siblings, 1 reply; 12+ messages in thread
From: Juan Quintela @ 2017-06-28 9:52 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf
We need to do things at load time and at cleanup time.
Signed-off-by: Juan Quintela <quintela@redhat.com>
--
Move the printing of the error message so we can print the device
giving the error.
Add call to postcopy stuff
---
include/migration/register.h | 2 ++
migration/savevm.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
migration/savevm.h | 1 +
migration/trace-events | 2 ++
4 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/include/migration/register.h b/include/migration/register.h
index 938ea2b..a0f1edd 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -39,6 +39,8 @@ typedef struct SaveVMHandlers {
uint64_t *non_postcopiable_pending,
uint64_t *postcopiable_pending);
LoadStateHandler *load_state;
+ int (*load_setup)(QEMUFile *f, void *opaque);
+ int (*load_cleanup)(void *opaque);
} SaveVMHandlers;
int register_savevm_live(DeviceState *dev,
diff --git a/migration/savevm.c b/migration/savevm.c
index fee11c5..fdd15fa 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1541,7 +1541,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
* got a bad migration state).
*/
migration_incoming_state_destroy();
-
+ qemu_loadvm_state_cleanup();
return NULL;
}
@@ -1901,6 +1901,44 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
return 0;
}
+static int qemu_loadvm_state_setup(QEMUFile *f)
+{
+ SaveStateEntry *se;
+ int ret;
+
+ trace_loadvm_state_setup();
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (!se->ops || !se->ops->load_setup) {
+ continue;
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+
+ ret = se->ops->load_setup(f, se->opaque);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ error_report("Load state of device %s failed", se->idstr);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+void qemu_loadvm_state_cleanup(void)
+{
+ SaveStateEntry *se;
+
+ trace_loadvm_state_cleanup();
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (se->ops && se->ops->load_cleanup) {
+ se->ops->load_cleanup(se->opaque);
+ }
+ }
+}
+
static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
{
uint8_t section_type;
@@ -1973,6 +2011,10 @@ int qemu_loadvm_state(QEMUFile *f)
return -ENOTSUP;
}
+ if (qemu_loadvm_state_setup(f) != 0) {
+ return -EINVAL;
+ }
+
if (migrate_get_current()->send_configuration) {
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
error_report("Configuration section missing");
@@ -2036,6 +2078,7 @@ int qemu_loadvm_state(QEMUFile *f)
}
}
+ qemu_loadvm_state_cleanup();
cpu_synchronize_all_post_init();
return ret;
diff --git a/migration/savevm.h b/migration/savevm.h
index 6babc62..295c4a1 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -53,5 +53,6 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name,
uint64_t *length_list);
int qemu_loadvm_state(QEMUFile *f);
+void qemu_loadvm_state_cleanup(void);
#endif
diff --git a/migration/trace-events b/migration/trace-events
index 9669e94..cb2c4b5 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -7,6 +7,8 @@ qemu_loadvm_state_section_partend(uint32_t section_id) "%u"
qemu_loadvm_state_post_main(int ret) "%d"
qemu_loadvm_state_section_startfull(uint32_t section_id, const char *idstr, uint32_t instance_id, uint32_t version_id) "%u(%s) %u %u"
qemu_savevm_send_packaged(void) ""
+loadvm_state_setup(void) ""
+loadvm_state_cleanup(void) ""
loadvm_handle_cmd_packaged(unsigned int length) "%u"
loadvm_handle_cmd_packaged_main(int ret) "%d"
loadvm_handle_cmd_packaged_received(int ret) "%d"
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods Juan Quintela
@ 2017-06-28 11:15 ` Dr. David Alan Gilbert
2017-06-28 12:01 ` Juan Quintela
0 siblings, 1 reply; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-06-28 11:15 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf
* Juan Quintela (quintela@redhat.com) wrote:
> We need to do things at load time and at cleanup time.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
>
> --
>
> Move the printing of the error message so we can print the device
> giving the error.
> Add call to postcopy stuff
> ---
> include/migration/register.h | 2 ++
> migration/savevm.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
> migration/savevm.h | 1 +
> migration/trace-events | 2 ++
> 4 files changed, 49 insertions(+), 1 deletion(-)
>
> diff --git a/include/migration/register.h b/include/migration/register.h
> index 938ea2b..a0f1edd 100644
> --- a/include/migration/register.h
> +++ b/include/migration/register.h
> @@ -39,6 +39,8 @@ typedef struct SaveVMHandlers {
> uint64_t *non_postcopiable_pending,
> uint64_t *postcopiable_pending);
> LoadStateHandler *load_state;
> + int (*load_setup)(QEMUFile *f, void *opaque);
> + int (*load_cleanup)(void *opaque);
> } SaveVMHandlers;
>
> int register_savevm_live(DeviceState *dev,
> diff --git a/migration/savevm.c b/migration/savevm.c
> index fee11c5..fdd15fa 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -1541,7 +1541,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
> * got a bad migration state).
> */
> migration_incoming_state_destroy();
> -
> + qemu_loadvm_state_cleanup();
Is that order right? It seems wrong to call the cleanup
code after MIS is destroyed.
(The precopy path seems to call mis_destroy at the end of
process_incoming_migration_bh which is much later).
Dave
> return NULL;
> }
> @@ -1901,6 +1901,44 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
> return 0;
> }
>
> +static int qemu_loadvm_state_setup(QEMUFile *f)
> +{
> + SaveStateEntry *se;
> + int ret;
> +
> + trace_loadvm_state_setup();
> + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> + if (!se->ops || !se->ops->load_setup) {
> + continue;
> + }
> + if (se->ops && se->ops->is_active) {
> + if (!se->ops->is_active(se->opaque)) {
> + continue;
> + }
> + }
> +
> + ret = se->ops->load_setup(f, se->opaque);
> + if (ret < 0) {
> + qemu_file_set_error(f, ret);
> + error_report("Load state of device %s failed", se->idstr);
> + return ret;
> + }
> + }
> + return 0;
> +}
> +
> +void qemu_loadvm_state_cleanup(void)
> +{
> + SaveStateEntry *se;
> +
> + trace_loadvm_state_cleanup();
> + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> + if (se->ops && se->ops->load_cleanup) {
> + se->ops->load_cleanup(se->opaque);
> + }
> + }
> +}
> +
> static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
> {
> uint8_t section_type;
> @@ -1973,6 +2011,10 @@ int qemu_loadvm_state(QEMUFile *f)
> return -ENOTSUP;
> }
>
> + if (qemu_loadvm_state_setup(f) != 0) {
> + return -EINVAL;
> + }
> +
> if (migrate_get_current()->send_configuration) {
> if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
> error_report("Configuration section missing");
> @@ -2036,6 +2078,7 @@ int qemu_loadvm_state(QEMUFile *f)
> }
> }
>
> + qemu_loadvm_state_cleanup();
> cpu_synchronize_all_post_init();
>
> return ret;
> diff --git a/migration/savevm.h b/migration/savevm.h
> index 6babc62..295c4a1 100644
> --- a/migration/savevm.h
> +++ b/migration/savevm.h
> @@ -53,5 +53,6 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name,
> uint64_t *length_list);
>
> int qemu_loadvm_state(QEMUFile *f);
> +void qemu_loadvm_state_cleanup(void);
>
> #endif
> diff --git a/migration/trace-events b/migration/trace-events
> index 9669e94..cb2c4b5 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -7,6 +7,8 @@ qemu_loadvm_state_section_partend(uint32_t section_id) "%u"
> qemu_loadvm_state_post_main(int ret) "%d"
> qemu_loadvm_state_section_startfull(uint32_t section_id, const char *idstr, uint32_t instance_id, uint32_t version_id) "%u(%s) %u %u"
> qemu_savevm_send_packaged(void) ""
> +loadvm_state_setup(void) ""
> +loadvm_state_cleanup(void) ""
> loadvm_handle_cmd_packaged(unsigned int length) "%u"
> loadvm_handle_cmd_packaged_main(int ret) "%d"
> loadvm_handle_cmd_packaged_received(int ret) "%d"
> --
> 2.9.4
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods
2017-06-28 11:15 ` Dr. David Alan Gilbert
@ 2017-06-28 12:01 ` Juan Quintela
2017-06-29 10:38 ` Dr. David Alan Gilbert
2017-07-10 13:28 ` Dr. David Alan Gilbert
0 siblings, 2 replies; 12+ messages in thread
From: Juan Quintela @ 2017-06-28 12:01 UTC (permalink / raw)
To: Dr. David Alan Gilbert; +Cc: qemu-devel, lvivier, peterx, kwolf
"Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:
> * Juan Quintela (quintela@redhat.com) wrote:
>> We need to do things at load time and at cleanup time.
>>
>> Signed-off-by: Juan Quintela <quintela@redhat.com>
>>
>> --
>>
>> Move the printing of the error message so we can print the device
>> giving the error.
>> Add call to postcopy stuff
>> ---
>> include/migration/register.h | 2 ++
>> migration/savevm.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
>> migration/savevm.h | 1 +
>> migration/trace-events | 2 ++
>> 4 files changed, 49 insertions(+), 1 deletion(-)
>>
>> diff --git a/include/migration/register.h b/include/migration/register.h
>> index 938ea2b..a0f1edd 100644
>> --- a/include/migration/register.h
>> +++ b/include/migration/register.h
>> @@ -39,6 +39,8 @@ typedef struct SaveVMHandlers {
>> uint64_t *non_postcopiable_pending,
>> uint64_t *postcopiable_pending);
>> LoadStateHandler *load_state;
>> + int (*load_setup)(QEMUFile *f, void *opaque);
>> + int (*load_cleanup)(void *opaque);
>> } SaveVMHandlers;
>>
>> int register_savevm_live(DeviceState *dev,
>> diff --git a/migration/savevm.c b/migration/savevm.c
>> index fee11c5..fdd15fa 100644
>> --- a/migration/savevm.c
>> +++ b/migration/savevm.c
>> @@ -1541,7 +1541,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
>> * got a bad migration state).
>> */
>> migration_incoming_state_destroy();
>> -
>> + qemu_loadvm_state_cleanup();
>
> Is that order right? It seems wrong to call the cleanup
> code after MIS is destroyed.
> (The precopy path seems to call mis_destroy at the end of
> process_incoming_migration_bh which is much later).
we can do either way, for now it don't matters.
Once there, it got me thinking that we are doing things in a very
"interesting" way on the incoming side:
(postcopy)
postcopy_ram_incoming_cleanup()
migration_incoming_state_destroy()
qemu_loadvm_state_cleanup()
(Ok, probably it is better to exchange the last two).
But I *think* that we should move the postcopy_ram_incoming_cleanup()
inside ram_load_cleanup(), no?
And we don't have a postcopy_ram_incoming_setup() We could put there the
mmap of mis->postcopy_tmp_zero_page and mis->largest_page_size, no?
I am trying to understand if the postcopy_ram_incoming_init() can be
moved soon, but I think no.
Later, Juan.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods
2017-06-28 12:01 ` Juan Quintela
@ 2017-06-29 10:38 ` Dr. David Alan Gilbert
2017-06-29 11:37 ` Dr. David Alan Gilbert
2017-07-10 13:28 ` Dr. David Alan Gilbert
1 sibling, 1 reply; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-06-29 10:38 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf
* Juan Quintela (quintela@redhat.com) wrote:
> "Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:
> > * Juan Quintela (quintela@redhat.com) wrote:
> >> We need to do things at load time and at cleanup time.
> >>
> >> Signed-off-by: Juan Quintela <quintela@redhat.com>
> >>
> >> --
> >>
> >> Move the printing of the error message so we can print the device
> >> giving the error.
> >> Add call to postcopy stuff
> >> ---
> >> include/migration/register.h | 2 ++
> >> migration/savevm.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
> >> migration/savevm.h | 1 +
> >> migration/trace-events | 2 ++
> >> 4 files changed, 49 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/include/migration/register.h b/include/migration/register.h
> >> index 938ea2b..a0f1edd 100644
> >> --- a/include/migration/register.h
> >> +++ b/include/migration/register.h
> >> @@ -39,6 +39,8 @@ typedef struct SaveVMHandlers {
> >> uint64_t *non_postcopiable_pending,
> >> uint64_t *postcopiable_pending);
> >> LoadStateHandler *load_state;
> >> + int (*load_setup)(QEMUFile *f, void *opaque);
> >> + int (*load_cleanup)(void *opaque);
> >> } SaveVMHandlers;
> >>
> >> int register_savevm_live(DeviceState *dev,
> >> diff --git a/migration/savevm.c b/migration/savevm.c
> >> index fee11c5..fdd15fa 100644
> >> --- a/migration/savevm.c
> >> +++ b/migration/savevm.c
> >> @@ -1541,7 +1541,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
> >> * got a bad migration state).
> >> */
> >> migration_incoming_state_destroy();
> >> -
> >> + qemu_loadvm_state_cleanup();
> >
> > Is that order right? It seems wrong to call the cleanup
> > code after MIS is destroyed.
> > (The precopy path seems to call mis_destroy at the end of
> > process_incoming_migration_bh which is much later).
>
> we can do either way, for now it don't matters.
>
> Once there, it got me thinking that we are doing things in a very
> "interesting" way on the incoming side:
>
> (postcopy)
>
> postcopy_ram_incoming_cleanup()
> migration_incoming_state_destroy()
> qemu_loadvm_state_cleanup()
>
> (Ok, probably it is better to exchange the last two).
>
> But I *think* that we should move the postcopy_ram_incoming_cleanup()
> inside ram_load_cleanup(), no?
postcopy_ram_incoming_cleanup shuts down a thread that's shared across
all RAMBlock's, so I don't think it can all be merged into
ram_load_cleanup. You might be able to do the equivalent of the
cleanup_range function.
> And we don't have a postcopy_ram_incoming_setup() We could put there the
> mmap of mis->postcopy_tmp_zero_page and mis->largest_page_size, no?
Again that's a single shared zero page, not per RAMBlock.
> I am trying to understand if the postcopy_ram_incoming_init() can be
> moved soon, but I think no.
Dave
>
> Later, Juan.
>
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods
2017-06-29 10:38 ` Dr. David Alan Gilbert
@ 2017-06-29 11:37 ` Dr. David Alan Gilbert
0 siblings, 0 replies; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-06-29 11:37 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf
* Dr. David Alan Gilbert (dgilbert@redhat.com) wrote:
> * Juan Quintela (quintela@redhat.com) wrote:
> > "Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:
> > > * Juan Quintela (quintela@redhat.com) wrote:
> > >> We need to do things at load time and at cleanup time.
> > >>
> > >> Signed-off-by: Juan Quintela <quintela@redhat.com>
> > >>
> > >> --
> > >>
> > >> Move the printing of the error message so we can print the device
> > >> giving the error.
> > >> Add call to postcopy stuff
> > >> ---
> > >> include/migration/register.h | 2 ++
> > >> migration/savevm.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
> > >> migration/savevm.h | 1 +
> > >> migration/trace-events | 2 ++
> > >> 4 files changed, 49 insertions(+), 1 deletion(-)
> > >>
> > >> diff --git a/include/migration/register.h b/include/migration/register.h
> > >> index 938ea2b..a0f1edd 100644
> > >> --- a/include/migration/register.h
> > >> +++ b/include/migration/register.h
> > >> @@ -39,6 +39,8 @@ typedef struct SaveVMHandlers {
> > >> uint64_t *non_postcopiable_pending,
> > >> uint64_t *postcopiable_pending);
> > >> LoadStateHandler *load_state;
> > >> + int (*load_setup)(QEMUFile *f, void *opaque);
> > >> + int (*load_cleanup)(void *opaque);
> > >> } SaveVMHandlers;
> > >>
> > >> int register_savevm_live(DeviceState *dev,
> > >> diff --git a/migration/savevm.c b/migration/savevm.c
> > >> index fee11c5..fdd15fa 100644
> > >> --- a/migration/savevm.c
> > >> +++ b/migration/savevm.c
> > >> @@ -1541,7 +1541,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
> > >> * got a bad migration state).
> > >> */
> > >> migration_incoming_state_destroy();
> > >> -
> > >> + qemu_loadvm_state_cleanup();
> > >
> > > Is that order right? It seems wrong to call the cleanup
> > > code after MIS is destroyed.
> > > (The precopy path seems to call mis_destroy at the end of
> > > process_incoming_migration_bh which is much later).
> >
> > we can do either way, for now it don't matters.
> >
> > Once there, it got me thinking that we are doing things in a very
> > "interesting" way on the incoming side:
> >
> > (postcopy)
> >
> > postcopy_ram_incoming_cleanup()
> > migration_incoming_state_destroy()
> > qemu_loadvm_state_cleanup()
> >
> > (Ok, probably it is better to exchange the last two).
> >
> > But I *think* that we should move the postcopy_ram_incoming_cleanup()
> > inside ram_load_cleanup(), no?
>
> postcopy_ram_incoming_cleanup shuts down a thread that's shared across
> all RAMBlock's, so I don't think it can all be merged into
> ram_load_cleanup. You might be able to do the equivalent of the
> cleanup_range function.
Actually that's wrong, we only call ram_load_cleanup once - because
RAM is special and is only register_savevm_live once, not per device.
So yes you probably can do that.
Dave
> > And we don't have a postcopy_ram_incoming_setup() We could put there the
> > mmap of mis->postcopy_tmp_zero_page and mis->largest_page_size, no?
>
> Again that's a single shared zero page, not per RAMBlock.
>
> > I am trying to understand if the postcopy_ram_incoming_init() can be
> > moved soon, but I think no.
>
> Dave
>
> >
> > Later, Juan.
> >
> >
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods
2017-06-28 12:01 ` Juan Quintela
2017-06-29 10:38 ` Dr. David Alan Gilbert
@ 2017-07-10 13:28 ` Dr. David Alan Gilbert
1 sibling, 0 replies; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-07-10 13:28 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf
* Juan Quintela (quintela@redhat.com) wrote:
> "Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:
> > * Juan Quintela (quintela@redhat.com) wrote:
> >> We need to do things at load time and at cleanup time.
> >>
> >> Signed-off-by: Juan Quintela <quintela@redhat.com>
> >>
> >> --
> >>
> >> Move the printing of the error message so we can print the device
> >> giving the error.
> >> Add call to postcopy stuff
> >> ---
> >> include/migration/register.h | 2 ++
> >> migration/savevm.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
> >> migration/savevm.h | 1 +
> >> migration/trace-events | 2 ++
> >> 4 files changed, 49 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/include/migration/register.h b/include/migration/register.h
> >> index 938ea2b..a0f1edd 100644
> >> --- a/include/migration/register.h
> >> +++ b/include/migration/register.h
> >> @@ -39,6 +39,8 @@ typedef struct SaveVMHandlers {
> >> uint64_t *non_postcopiable_pending,
> >> uint64_t *postcopiable_pending);
> >> LoadStateHandler *load_state;
> >> + int (*load_setup)(QEMUFile *f, void *opaque);
> >> + int (*load_cleanup)(void *opaque);
> >> } SaveVMHandlers;
> >>
> >> int register_savevm_live(DeviceState *dev,
> >> diff --git a/migration/savevm.c b/migration/savevm.c
> >> index fee11c5..fdd15fa 100644
> >> --- a/migration/savevm.c
> >> +++ b/migration/savevm.c
> >> @@ -1541,7 +1541,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
> >> * got a bad migration state).
> >> */
> >> migration_incoming_state_destroy();
> >> -
> >> + qemu_loadvm_state_cleanup();
> >
> > Is that order right? It seems wrong to call the cleanup
> > code after MIS is destroyed.
> > (The precopy path seems to call mis_destroy at the end of
> > process_incoming_migration_bh which is much later).
>
> we can do either way, for now it don't matters.
OK, yes, for now it doesn't matter, so:
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> Once there, it got me thinking that we are doing things in a very
> "interesting" way on the incoming side:
>
> (postcopy)
>
> postcopy_ram_incoming_cleanup()
> migration_incoming_state_destroy()
> qemu_loadvm_state_cleanup()
>
> (Ok, probably it is better to exchange the last two).
>
> But I *think* that we should move the postcopy_ram_incoming_cleanup()
> inside ram_load_cleanup(), no?
>
> And we don't have a postcopy_ram_incoming_setup() We could put there the
> mmap of mis->postcopy_tmp_zero_page and mis->largest_page_size, no?
>
> I am trying to understand if the postcopy_ram_incoming_init() can be
> moved soon, but I think no.
>
> Later, Juan.
>
>
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH v4 4/5] migration: Convert ram to use new load_setup()/load_cleanup()
2017-06-28 9:52 [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
` (2 preceding siblings ...)
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 3/5] migration: Create load_setup()/cleanup() methods Juan Quintela
@ 2017-06-28 9:52 ` Juan Quintela
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 5/5] migration: Make compression_threads use save/load_setup/cleanup() Juan Quintela
2017-07-10 13:32 ` [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side Dr. David Alan Gilbert
5 siblings, 0 replies; 12+ messages in thread
From: Juan Quintela @ 2017-06-28 9:52 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf
Once there, I rename ram_migration_cleanup() to ram_save_cleanup().
Notice that this is the first pass, and I only passed XBZRLE to the
new scheme. Moved decoded_buf to inside XBZRLE struct.
As a bonus, I don't have to export xbzrle functions from ram.c.
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
--
loaded_data pointer was needed because called can change it (dave)
spell loaded correctly in comment (dave)
---
migration/migration.c | 3 ---
migration/ram.c | 49 ++++++++++++++++++++++++++++++++++++-------------
migration/ram.h | 1 -
3 files changed, 36 insertions(+), 17 deletions(-)
diff --git a/migration/migration.c b/migration/migration.c
index 8ea35f1..e244876 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -357,9 +357,6 @@ static void process_incoming_migration_co(void *opaque)
migrate_decompress_threads_join();
exit(EXIT_FAILURE);
}
-
- free_xbzrle_decoded_buf();
-
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
qemu_bh_schedule(mis->bh);
}
diff --git a/migration/ram.c b/migration/ram.c
index 649f76c..73aec34 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -85,11 +85,10 @@ static struct {
QemuMutex lock;
/* it will store a page full of zeros */
uint8_t *zero_target_page;
+ /* buffer used for XBZRLE decoding */
+ uint8_t *decoded_buf;
} XBZRLE;
-/* buffer used for XBZRLE decoding */
-static uint8_t *xbzrle_decoded_buf;
-
static void XBZRLE_cache_lock(void)
{
if (migrate_use_xbzrle())
@@ -1350,13 +1349,18 @@ uint64_t ram_bytes_total(void)
return total;
}
-void free_xbzrle_decoded_buf(void)
+static void xbzrle_load_setup(void)
{
- g_free(xbzrle_decoded_buf);
- xbzrle_decoded_buf = NULL;
+ XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
}
-static void ram_migration_cleanup(void *opaque)
+static void xbzrle_load_cleanup(void)
+{
+ g_free(XBZRLE.decoded_buf);
+ XBZRLE.decoded_buf = NULL;
+}
+
+static void ram_save_cleanup(void *opaque)
{
RAMState **rsp = opaque;
RAMBlock *block;
@@ -2078,11 +2082,6 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
int xh_flags;
uint8_t *loaded_data;
- if (!xbzrle_decoded_buf) {
- xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
- }
- loaded_data = xbzrle_decoded_buf;
-
/* extract RLE header */
xh_flags = qemu_get_byte(f);
xh_len = qemu_get_be16(f);
@@ -2096,7 +2095,9 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
error_report("Failed to load XBZRLE page - len overflow!");
return -1;
}
+ loaded_data = XBZRLE.decoded_buf;
/* load data and decode */
+ /* it can change loaded_data to point to an internal buffer */
qemu_get_buffer_in_place(f, &loaded_data, xh_len);
/* decode RLE */
@@ -2310,6 +2311,26 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
}
/**
+ * ram_load_setup: Setup RAM for migration incoming side
+ *
+ * Returns zero to indicate success and negative for error
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: RAMState pointer
+ */
+static int ram_load_setup(QEMUFile *f, void *opaque)
+{
+ xbzrle_load_setup();
+ return 0;
+}
+
+static int ram_load_cleanup(void *opaque)
+{
+ xbzrle_load_cleanup();
+ return 0;
+}
+
+/**
* ram_postcopy_incoming_init: allocate postcopy data structures
*
* Returns 0 for success and negative if there was one error
@@ -2629,7 +2650,9 @@ static SaveVMHandlers savevm_ram_handlers = {
.save_live_complete_precopy = ram_save_complete,
.save_live_pending = ram_save_pending,
.load_state = ram_load,
- .save_cleanup = ram_migration_cleanup,
+ .save_cleanup = ram_save_cleanup,
+ .load_setup = ram_load_setup,
+ .load_cleanup = ram_load_cleanup,
};
void ram_mig_init(void)
diff --git a/migration/ram.h b/migration/ram.h
index 6272eb0..a8b79a4 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -47,7 +47,6 @@ void migrate_decompress_threads_join(void);
uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
void acct_update_position(QEMUFile *f, size_t size, bool zero);
-void free_xbzrle_decoded_buf(void);
void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
unsigned long pages);
void ram_postcopy_migrated_memory_release(MigrationState *ms);
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH v4 5/5] migration: Make compression_threads use save/load_setup/cleanup()
2017-06-28 9:52 [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
` (3 preceding siblings ...)
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 4/5] migration: Convert ram to use new load_setup()/load_cleanup() Juan Quintela
@ 2017-06-28 9:52 ` Juan Quintela
2017-07-10 13:32 ` [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side Dr. David Alan Gilbert
5 siblings, 0 replies; 12+ messages in thread
From: Juan Quintela @ 2017-06-28 9:52 UTC (permalink / raw)
To: qemu-devel; +Cc: dgilbert, lvivier, peterx, kwolf
Once there, be consistent and use
compress_thread_{save,load}_{setup,cleanup}.
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
migration/migration.c | 5 -----
migration/ram.c | 12 ++++++++----
migration/ram.h | 5 -----
3 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/migration/migration.c b/migration/migration.c
index e244876..9623dc1 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -291,7 +291,6 @@ static void process_incoming_migration_bh(void *opaque)
} else {
runstate_set(global_state_get_runstate());
}
- migrate_decompress_threads_join();
/*
* This must happen after any state changes since as soon as an external
* observer sees this event they might start to prod at the VM assuming
@@ -354,7 +353,6 @@ static void process_incoming_migration_co(void *opaque)
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_FAILED);
error_report("load of migration failed: %s", strerror(-ret));
- migrate_decompress_threads_join();
exit(EXIT_FAILURE);
}
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
@@ -365,7 +363,6 @@ void migration_fd_process_incoming(QEMUFile *f)
{
Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
- migrate_decompress_threads_create();
qemu_file_set_blocking(f, false);
qemu_coroutine_enter(co);
}
@@ -832,7 +829,6 @@ static void migrate_fd_cleanup(void *opaque)
}
qemu_mutex_lock_iothread();
- migrate_compress_threads_join();
qemu_fclose(s->to_dst_file);
s->to_dst_file = NULL;
}
@@ -1995,7 +1991,6 @@ void migrate_fd_connect(MigrationState *s)
}
}
- migrate_compress_threads_create();
qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
QEMU_THREAD_JOINABLE);
s->migration_thread_running = true;
diff --git a/migration/ram.c b/migration/ram.c
index 73aec34..1b08296 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -306,7 +306,7 @@ static inline void terminate_compression_threads(void)
}
}
-void migrate_compress_threads_join(void)
+static void compress_threads_save_cleanup(void)
{
int i, thread_count;
@@ -329,7 +329,7 @@ void migrate_compress_threads_join(void)
comp_param = NULL;
}
-void migrate_compress_threads_create(void)
+static void compress_threads_save_setup(void)
{
int i, thread_count;
@@ -1390,6 +1390,7 @@ static void ram_save_cleanup(void *opaque)
}
XBZRLE_cache_unlock();
migration_page_queue_free(*rsp);
+ compress_threads_save_cleanup();
g_free(*rsp);
*rsp = NULL;
}
@@ -1923,6 +1924,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
}
rcu_read_unlock();
+ compress_threads_save_setup();
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
@@ -2231,7 +2233,7 @@ static void wait_for_decompress_done(void)
qemu_mutex_unlock(&decomp_done_lock);
}
-void migrate_decompress_threads_create(void)
+static void compress_threads_load_setup(void)
{
int i, thread_count;
@@ -2255,7 +2257,7 @@ void migrate_decompress_threads_create(void)
}
}
-void migrate_decompress_threads_join(void)
+static void compress_threads_load_cleanup(void)
{
int i, thread_count;
@@ -2321,12 +2323,14 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
static int ram_load_setup(QEMUFile *f, void *opaque)
{
xbzrle_load_setup();
+ compress_threads_load_setup();
return 0;
}
static int ram_load_cleanup(void *opaque)
{
xbzrle_load_cleanup();
+ compress_threads_load_cleanup();
return 0;
}
diff --git a/migration/ram.h b/migration/ram.h
index a8b79a4..c081fde 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -39,11 +39,6 @@ int64_t xbzrle_cache_resize(int64_t new_size);
uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_total(void);
-void migrate_compress_threads_create(void);
-void migrate_compress_threads_join(void);
-void migrate_decompress_threads_create(void);
-void migrate_decompress_threads_join(void);
-
uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
void acct_update_position(QEMUFile *f, size_t size, bool zero);
--
2.9.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side
2017-06-28 9:52 [Qemu-devel] [PATCH v4 0/5] Create setup/cleanup methods for migration incoming side Juan Quintela
` (4 preceding siblings ...)
2017-06-28 9:52 ` [Qemu-devel] [PATCH v4 5/5] migration: Make compression_threads use save/load_setup/cleanup() Juan Quintela
@ 2017-07-10 13:32 ` Dr. David Alan Gilbert
5 siblings, 0 replies; 12+ messages in thread
From: Dr. David Alan Gilbert @ 2017-07-10 13:32 UTC (permalink / raw)
To: Juan Quintela; +Cc: qemu-devel, lvivier, peterx, kwolf
* Juan Quintela (quintela@redhat.com) wrote:
> Hi
Queued.
> Changes from v4:
> - make sure that we call save_cleanup() in postocpy (dave).
>
> Please, review.
>
> [v3]
> - rename htab_cleanup htab_save_cleanup (dave)
> - loaded_data was needed because caller can need it (dave)
> - improve error message (dave)
>
> Please, review.
>
> [v2]
> - Kevin detected that I didn't called load_cleanup(), fix it.
> - Be consistent and change the naming of the functions so they are all:
> qemu_savevm_state_* or qemu_loadvm_state*
> - the traces still used the all names of _begin, instead of _setup,
> fix that.
>
> Please, review.
>
> Later, Juan.
>
> PD: Yes, now that the includes are internal, we coauld rename
> "qemu_savevm_state_" to something shorter, like `savevm_". The
> same for the loadvm counterparts. But I am not doing any such
> changes soon, too much churn for so little gain.
>
> [v1]
> This series make:
> - use of cleanup/save methods generic, not only for save_live methods
> - create the equivalent methods for the load side (load_setup/cleanup)
> - Make ram use this methods to see how/when they are used.
>
> Stefan, Kevin, this were the methods that you asked for the block.c
> migration, right? Please, comment if they are enough for you.
>
> Juan Quintela (5):
> migration: Rename save_live_setup() to save_setup()
> migration: Rename cleanup() to save_cleanup()
> migration: Create load_setup()/cleanup() methods
> migration: Convert ram to use new load_setup()/load_cleanup()
> migration: Make compression_threads use save/load_setup/cleanup()
>
> hw/ppc/spapr.c | 6 ++---
> include/migration/register.h | 6 +++--
> migration/block.c | 4 +--
> migration/colo.c | 2 +-
> migration/migration.c | 10 +------
> migration/ram.c | 63 +++++++++++++++++++++++++++++++-------------
> migration/ram.h | 6 -----
> migration/savevm.c | 61 +++++++++++++++++++++++++++++++++++-------
> migration/savevm.h | 3 ++-
> migration/trace-events | 4 ++-
> 10 files changed, 113 insertions(+), 52 deletions(-)
>
> --
> 2.9.4
>
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
^ permalink raw reply [flat|nested] 12+ messages in thread