All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
To: Juan Quintela <quintela@redhat.com>
Cc: qemu-devel@nongnu.org, lvivier@redhat.com, peterx@redhat.com,
	berrange@redhat.com
Subject: Re: [Qemu-devel] [PATCH v5 07/17] migration: Create multifd migration threads
Date: Wed, 19 Jul 2017 17:49:45 +0100	[thread overview]
Message-ID: <20170719164945.GH3500@work-vm> (raw)
In-Reply-To: <20170717134238.1966-8-quintela@redhat.com>

* Juan Quintela (quintela@redhat.com) wrote:
> Creation of the threads, nothing inside yet.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> 
> --
> 
> Use pointers instead of long array names
> Move to use semaphores instead of conditions as paolo suggestion
> 
> Put all the state inside one struct.
> Use a counter for the number of threads created.  Needed during cancellation.
> 
> Add error return to thread creation
> 
> Add id field
> 
> Rename functions to multifd_save/load_setup/cleanup
> ---
>  migration/migration.c |  14 ++++
>  migration/ram.c       | 192 ++++++++++++++++++++++++++++++++++++++++++++++++++
>  migration/ram.h       |   5 ++
>  3 files changed, 211 insertions(+)
> 
> diff --git a/migration/migration.c b/migration/migration.c
> index ff3fc9d..5a82c1c 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -288,6 +288,7 @@ static void process_incoming_migration_bh(void *opaque)
>      } else {
>          runstate_set(global_state_get_runstate());
>      }
> +    multifd_load_cleanup();
>      /*
>       * This must happen after any state changes since as soon as an external
>       * observer sees this event they might start to prod at the VM assuming
> @@ -348,6 +349,7 @@ static void process_incoming_migration_co(void *opaque)
>          migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
>                            MIGRATION_STATUS_FAILED);
>          error_report("load of migration failed: %s", strerror(-ret));
> +        multifd_load_cleanup();
>          exit(EXIT_FAILURE);
>      }
>      mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
> @@ -358,6 +360,11 @@ void migration_fd_process_incoming(QEMUFile *f)
>  {
>      Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
>  
> +    if (multifd_load_setup() != 0) {
> +        /* We haven't been able to create multifd threads
> +           nothing better to do */
> +        exit(EXIT_FAILURE);
> +    }
>      qemu_file_set_blocking(f, false);
>      qemu_coroutine_enter(co);
>  }
> @@ -860,6 +867,7 @@ static void migrate_fd_cleanup(void *opaque)
>          }
>          qemu_mutex_lock_iothread();
>  
> +        multifd_save_cleanup();
>          qemu_fclose(s->to_dst_file);
>          s->to_dst_file = NULL;
>      }
> @@ -2049,6 +2057,12 @@ void migrate_fd_connect(MigrationState *s)
>          }
>      }
>  
> +    if (multifd_save_setup() != 0) {
> +        migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
> +                          MIGRATION_STATUS_FAILED);
> +        migrate_fd_cleanup(s);
> +        return;
> +    }
>      qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
>                         QEMU_THREAD_JOINABLE);
>      s->migration_thread_running = true;
> diff --git a/migration/ram.c b/migration/ram.c
> index 1b08296..8e87533 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -356,6 +356,198 @@ static void compress_threads_save_setup(void)
>      }
>  }
>  
> +/* Multiple fd's */
> +
> +struct MultiFDSendParams {
> +    uint8_t id;
> +    QemuThread thread;
> +    QemuSemaphore sem;
> +    QemuMutex mutex;
> +    bool quit;
> +};
> +typedef struct MultiFDSendParams MultiFDSendParams;
> +
> +struct {
> +    MultiFDSendParams *params;
> +    /* number of created threads */
> +    int count;
> +} *multifd_send_state;
> +
> +static void terminate_multifd_send_threads(void)
> +{
> +    int i;
> +
> +    for (i = 0; i < multifd_send_state->count; i++) {
> +        MultiFDSendParams *p = &multifd_send_state->params[i];
> +
> +        qemu_mutex_lock(&p->mutex);
> +        p->quit = true;
> +        qemu_sem_post(&p->sem);
> +        qemu_mutex_unlock(&p->mutex);

I don't think you need that lock/unlock pair - as long as no one
else is currently going around setting them to false; so as long
as you know you're safely after initialisation and no one is trying
to start a new migration at the moment then I think it's safe.

> +    }
> +}
> +
> +void multifd_save_cleanup(void)
> +{
> +    int i;
> +
> +    if (!migrate_use_multifd()) {
> +        return;
> +    }
> +    terminate_multifd_send_threads();
> +    for (i = 0; i < multifd_send_state->count; i++) {
> +        MultiFDSendParams *p = &multifd_send_state->params[i];
> +
> +        qemu_thread_join(&p->thread);
> +        qemu_mutex_destroy(&p->mutex);
> +        qemu_sem_destroy(&p->sem);
> +    }
> +    g_free(multifd_send_state->params);
> +    multifd_send_state->params = NULL;
> +    g_free(multifd_send_state);
> +    multifd_send_state = NULL;

I'd be tempted to add a few traces around here, and also some
protection against it being called twice.  Maybe it shouldn't
happen, but it would be nice to debug it when it does.

> +}
> +
> +static void *multifd_send_thread(void *opaque)
> +{
> +    MultiFDSendParams *p = opaque;
> +
> +    while (true) {
> +        qemu_mutex_lock(&p->mutex);
> +        if (p->quit) {
> +            qemu_mutex_unlock(&p->mutex);
> +            break;
> +        }
> +        qemu_mutex_unlock(&p->mutex);
> +        qemu_sem_wait(&p->sem);

Similar to above, I don't think you need those
locks around the quit check.

> +    }
> +
> +    return NULL;
> +}
> +
> +int multifd_save_setup(void)
> +{
> +    int thread_count;
> +    uint8_t i;
> +
> +    if (!migrate_use_multifd()) {
> +        return 0;
> +    }
> +    thread_count = migrate_multifd_threads();
> +    multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
> +    multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
> +    multifd_send_state->count = 0;
> +    for (i = 0; i < thread_count; i++) {
> +        char thread_name[16];
> +        MultiFDSendParams *p = &multifd_send_state->params[i];
> +
> +        qemu_mutex_init(&p->mutex);
> +        qemu_sem_init(&p->sem, 0);
> +        p->quit = false;
> +        p->id = i;
> +        snprintf(thread_name, sizeof(thread_name), "multifdsend_%d", i);
> +        qemu_thread_create(&p->thread, thread_name, multifd_send_thread, p,
> +                           QEMU_THREAD_JOINABLE);
> +        multifd_send_state->count++;
> +    }
> +    return 0;
> +}
> +
> +struct MultiFDRecvParams {
> +    uint8_t id;
> +    QemuThread thread;
> +    QemuSemaphore sem;
> +    QemuMutex mutex;
> +    bool quit;
> +};
> +typedef struct MultiFDRecvParams MultiFDRecvParams;
> +
> +struct {
> +    MultiFDRecvParams *params;
> +    /* number of created threads */
> +    int count;
> +} *multifd_recv_state;
> +
> +static void terminate_multifd_recv_threads(void)
> +{
> +    int i;
> +
> +    for (i = 0; i < multifd_recv_state->count; i++) {
> +        MultiFDRecvParams *p = &multifd_recv_state->params[i];
> +
> +        qemu_mutex_lock(&p->mutex);
> +        p->quit = true;
> +        qemu_sem_post(&p->sem);
> +        qemu_mutex_unlock(&p->mutex);
> +    }
> +}
> +
> +void multifd_load_cleanup(void)
> +{
> +    int i;
> +
> +    if (!migrate_use_multifd()) {
> +        return;
> +    }
> +    terminate_multifd_recv_threads();
> +    for (i = 0; i < multifd_recv_state->count; i++) {
> +        MultiFDRecvParams *p = &multifd_recv_state->params[i];
> +
> +        qemu_thread_join(&p->thread);
> +        qemu_mutex_destroy(&p->mutex);
> +        qemu_sem_destroy(&p->sem);
> +    }
> +    g_free(multifd_recv_state->params);
> +    multifd_recv_state->params = NULL;
> +    g_free(multifd_recv_state);
> +    multifd_recv_state = NULL;
> +}
> +
> +static void *multifd_recv_thread(void *opaque)
> +{
> +    MultiFDRecvParams *p = opaque;
> +
> +    while (true) {
> +        qemu_mutex_lock(&p->mutex);
> +        if (p->quit) {
> +            qemu_mutex_unlock(&p->mutex);
> +            break;
> +        }
> +        qemu_mutex_unlock(&p->mutex);
> +        qemu_sem_wait(&p->sem);
> +    }
> +
> +    return NULL;
> +}
> +
> +int multifd_load_setup(void)
> +{
> +    int thread_count;
> +    uint8_t i;
> +
> +    if (!migrate_use_multifd()) {
> +        return 0;
> +    }
> +    thread_count = migrate_multifd_threads();
> +    multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
> +    multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
> +    multifd_recv_state->count = 0;
> +    for (i = 0; i < thread_count; i++) {
> +        char thread_name[16];
> +        MultiFDRecvParams *p = &multifd_recv_state->params[i];
> +
> +        qemu_mutex_init(&p->mutex);
> +        qemu_sem_init(&p->sem, 0);
> +        p->quit = false;
> +        p->id = i;
> +        snprintf(thread_name, sizeof(thread_name), "multifdrecv_%d", i);
> +        qemu_thread_create(&p->thread, thread_name, multifd_recv_thread, p,
> +                           QEMU_THREAD_JOINABLE);
> +        multifd_recv_state->count++;
> +    }
> +    return 0;
> +}
> +

(It's a shame there's no way to wrap this boiler plate up to share
between send/receive threads).

However, all the above is minor, so:


Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

>  /**
>   * save_page_header: write page header to wire
>   *
> diff --git a/migration/ram.h b/migration/ram.h
> index c081fde..93c2bb4 100644
> --- a/migration/ram.h
> +++ b/migration/ram.h
> @@ -39,6 +39,11 @@ int64_t xbzrle_cache_resize(int64_t new_size);
>  uint64_t ram_bytes_remaining(void);
>  uint64_t ram_bytes_total(void);
>  
> +int multifd_save_setup(void);
> +void multifd_save_cleanup(void);
> +int multifd_load_setup(void);
> +void multifd_load_cleanup(void);
> +
>  uint64_t ram_pagesize_summary(void);
>  int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
>  void acct_update_position(QEMUFile *f, size_t size, bool zero);
> -- 
> 2.9.4
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

  reply	other threads:[~2017-07-19 16:50 UTC|newest]

Thread overview: 93+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-07-17 13:42 [Qemu-devel] [PATCH v5 00/17] Multifd Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 01/17] migrate: Add gboolean return type to migrate_channel_process_incoming Juan Quintela
2017-07-19 15:01   ` Dr. David Alan Gilbert
2017-07-20  7:00     ` Peter Xu
2017-07-20  8:47       ` Daniel P. Berrange
2017-07-24 10:18         ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 02/17] migration: Create migration_ioc_process_incoming() Juan Quintela
2017-07-19 13:38   ` Daniel P. Berrange
2017-07-24 11:09     ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 03/17] qio: Create new qio_channel_{readv, writev}_all Juan Quintela
2017-07-19 13:44   ` Daniel P. Berrange
2017-08-08  8:40     ` Juan Quintela
2017-08-08  9:25       ` Daniel P. Berrange
2017-07-19 15:42   ` Dr. David Alan Gilbert
2017-07-19 15:43     ` Daniel P. Berrange
2017-07-19 16:04       ` Dr. David Alan Gilbert
2017-07-19 16:08         ` Daniel P. Berrange
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 04/17] migration: Add multifd capability Juan Quintela
2017-07-19 15:44   ` Dr. David Alan Gilbert
2017-08-08  8:42     ` Juan Quintela
2017-07-19 17:14   ` Eric Blake
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 05/17] migration: Create x-multifd-threads parameter Juan Quintela
2017-07-19 16:00   ` Dr. David Alan Gilbert
2017-08-08  8:46     ` Juan Quintela
2017-08-08  9:44       ` Dr. David Alan Gilbert
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 06/17] migration: Create x-multifd-group parameter Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 07/17] migration: Create multifd migration threads Juan Quintela
2017-07-19 16:49   ` Dr. David Alan Gilbert [this message]
2017-08-08  8:58     ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 08/17] migration: Split migration_fd_process_incomming Juan Quintela
2017-07-19 17:08   ` Dr. David Alan Gilbert
2017-07-21 12:39     ` Eric Blake
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 09/17] migration: Start of multiple fd work Juan Quintela
2017-07-19 13:56   ` Daniel P. Berrange
2017-07-19 17:35   ` Dr. David Alan Gilbert
2017-08-08  9:35     ` Juan Quintela
2017-08-08  9:54       ` Dr. David Alan Gilbert
2017-07-20  9:34   ` Peter Xu
2017-08-08  9:19     ` Juan Quintela
2017-08-09  8:08       ` Peter Xu
2017-08-09 11:12         ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 10/17] migration: Create ram_multifd_page Juan Quintela
2017-07-19 19:02   ` Dr. David Alan Gilbert
2017-07-20  8:10     ` Peter Xu
2017-07-20 11:48       ` Dr. David Alan Gilbert
2017-08-08 15:58         ` Juan Quintela
2017-08-08 16:04       ` Juan Quintela
2017-08-09  7:42         ` Peter Xu
2017-08-08 15:56     ` Juan Quintela
2017-08-08 16:30       ` Dr. David Alan Gilbert
2017-08-08 18:02         ` Juan Quintela
2017-08-08 19:14           ` Dr. David Alan Gilbert
2017-08-09 16:48             ` Paolo Bonzini
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 11/17] migration: Really use multiple pages at a time Juan Quintela
2017-07-19 13:58   ` Daniel P. Berrange
2017-08-08 11:55     ` Juan Quintela
2017-07-20  9:44   ` Dr. David Alan Gilbert
2017-08-08 12:11     ` Juan Quintela
2017-07-20  9:49   ` Peter Xu
2017-07-20 10:09     ` Peter Xu
2017-08-08 16:06     ` Juan Quintela
2017-08-09  7:48       ` Peter Xu
2017-08-09  8:05         ` Juan Quintela
2017-08-09  8:12           ` Peter Xu
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 12/17] migration: Send the fd number which we are going to use for this page Juan Quintela
2017-07-20  9:58   ` Dr. David Alan Gilbert
2017-08-09 16:48   ` Paolo Bonzini
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 13/17] migration: Create thread infrastructure for multifd recv side Juan Quintela
2017-07-20 10:22   ` Peter Xu
2017-08-08 11:41     ` Juan Quintela
2017-08-09  5:53       ` Peter Xu
2017-07-20 10:29   ` Dr. David Alan Gilbert
2017-08-08 11:51     ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 14/17] migration: Delay the start of reception on main channel Juan Quintela
2017-07-20 10:56   ` Dr. David Alan Gilbert
2017-08-08 11:29     ` Juan Quintela
2017-07-20 11:10   ` Peter Xu
2017-08-08 11:30     ` Juan Quintela
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 15/17] migration: Test new fd infrastructure Juan Quintela
2017-07-20 11:20   ` Dr. David Alan Gilbert
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 16/17] migration: Transfer pages over new channels Juan Quintela
2017-07-20 11:31   ` Dr. David Alan Gilbert
2017-08-08 11:13     ` Juan Quintela
2017-08-08 11:32       ` Dr. David Alan Gilbert
2017-07-17 13:42 ` [Qemu-devel] [PATCH v5 17/17] migration: Flush receive queue Juan Quintela
2017-07-20 11:45   ` Dr. David Alan Gilbert
2017-08-08 10:43     ` Juan Quintela
2017-08-08 11:25       ` Dr. David Alan Gilbert
2017-07-21  2:40   ` Peter Xu
2017-08-08 11:40     ` Juan Quintela
2017-08-10  6:49       ` Peter Xu
2017-07-21  6:03   ` Peter Xu
2017-07-21 10:53     ` Juan Quintela

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170719164945.GH3500@work-vm \
    --to=dgilbert@redhat.com \
    --cc=berrange@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.