All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: dgilbert@redhat.com, lvivier@redhat.com, peterx@redhat.com
Subject: [Qemu-devel] [PATCH v13 02/12] migration: Create multifd packet
Date: Wed, 23 May 2018 13:18:07 +0200	[thread overview]
Message-ID: <20180523111817.1463-3-quintela@redhat.com> (raw)
In-Reply-To: <20180523111817.1463-1-quintela@redhat.com>

We still don't put anything there.

Signed-off-by: Juan Quintela <quintela@redhat.com>

--
fix magic (dave)
check offset/ramblock  (dave)
---
 migration/ram.c | 145 +++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 144 insertions(+), 1 deletion(-)

diff --git a/migration/ram.c b/migration/ram.c
index 23cc5625eb..54350db8b0 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -510,6 +510,17 @@ typedef struct {
     uint8_t id;
 } __attribute__((packed)) MultiFDInit_t;
 
+typedef struct {
+    uint32_t magic;
+    uint32_t version;
+    uint32_t flags;
+    uint32_t size;
+    uint32_t used;
+    uint32_t seq;
+    char ramblock[256];
+    uint64_t offset[];
+} __attribute__((packed)) MultiFDPacket_t;
+
 typedef struct {
     /* number of used pages */
     uint32_t used;
@@ -544,6 +555,14 @@ typedef struct {
     bool quit;
     /* array of pages to sent */
     MultiFDPages_t *pages;
+    /* packet allocated len */
+    uint32_t packet_len;
+    /* pointer to the packet */
+    MultiFDPacket_t *packet;
+    /* multifd flags for each packet */
+    uint32_t flags;
+    /* global number of generated multifd packets */
+    uint32_t seq;
 }  MultiFDSendParams;
 
 typedef struct {
@@ -566,6 +585,14 @@ typedef struct {
     bool quit;
     /* array of pages to receive */
     MultiFDPages_t *pages;
+    /* packet allocated len */
+    uint32_t packet_len;
+    /* pointer to the packet */
+    MultiFDPacket_t *packet;
+    /* multifd flags for each packet */
+    uint32_t flags;
+    /* global number of generated multifd packets */
+    uint32_t seq;
 } MultiFDRecvParams;
 
 static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
@@ -654,6 +681,99 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
     g_free(pages);
 }
 
+static void multifd_send_fill_packet(MultiFDSendParams *p)
+{
+    MultiFDPacket_t *packet = p->packet;
+    int i;
+
+    packet->magic = cpu_to_be32(MULTIFD_MAGIC);
+    packet->version = cpu_to_be32(MULTIFD_VERSION);
+    packet->flags = cpu_to_be32(p->flags);
+    packet->size = cpu_to_be32(migrate_multifd_page_count());
+    packet->used = cpu_to_be32(p->pages->used);
+    packet->seq = cpu_to_be32(p->seq);
+
+    if (p->pages->block) {
+        strncpy(packet->ramblock, p->pages->block->idstr, 256);
+    }
+
+    for (i = 0; i < p->pages->used; i++) {
+        packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
+    }
+}
+
+static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
+{
+    MultiFDPacket_t *packet = p->packet;
+    RAMBlock *block;
+    int i;
+
+    /* ToDo: We can't use it until we haven't received a message */
+    return 0;
+
+    be32_to_cpus(&packet->magic);
+    if (packet->magic != MULTIFD_MAGIC) {
+        error_setg(errp, "multifd: received packet "
+                   "magic %x and expected magic %x",
+                   packet->magic, MULTIFD_MAGIC);
+        return -1;
+    }
+
+    be32_to_cpus(&packet->version);
+    if (packet->version != MULTIFD_VERSION) {
+        error_setg(errp, "multifd: received packet "
+                   "version %d and expected version %d",
+                   packet->version, MULTIFD_VERSION);
+        return -1;
+    }
+
+    p->flags = be32_to_cpu(packet->flags);
+
+    be32_to_cpus(&packet->size);
+    if (packet->size > migrate_multifd_page_count()) {
+        error_setg(errp, "multifd: received packet "
+                   "with size %d and expected maximum size %d",
+                   packet->size, migrate_multifd_page_count()) ;
+        return -1;
+    }
+
+    p->pages->used = be32_to_cpu(packet->used);
+    if (p->pages->used > packet->size) {
+        error_setg(errp, "multifd: received packet "
+                   "with size %d and expected maximum size %d",
+                   p->pages->used, packet->size) ;
+        return -1;
+    }
+
+    p->seq = be32_to_cpu(packet->seq);
+
+    if (p->pages->used) {
+        /* make sure that ramblock is 0 terminated */
+        packet->ramblock[255] = 0;
+        block = qemu_ram_block_by_name(packet->ramblock);
+        if (!block) {
+            error_setg(errp, "multifd: unknown ram block %s",
+                       packet->ramblock);
+            return -1;
+        }
+    }
+
+    for (i = 0; i < p->pages->used; i++) {
+        ram_addr_t offset = be64_to_cpu(packet->offset[i]);
+
+        if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
+            error_setg(errp, "multifd: offset too long %" PRId64
+                       " (max %" PRId64 ")",
+                       offset, block->max_length);
+            return -1;
+        }
+        p->pages->iov[i].iov_base = block->host + offset;
+        p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
+    }
+
+    return 0;
+}
+
 struct {
     MultiFDSendParams *params;
     /* number of created threads */
@@ -711,6 +831,9 @@ int multifd_save_cleanup(Error **errp)
         p->name = NULL;
         multifd_pages_clear(p->pages);
         p->pages = NULL;
+        p->packet_len = 0;
+        g_free(p->packet);
+        p->packet = NULL;
     }
     g_free(multifd_send_state->params);
     multifd_send_state->params = NULL;
@@ -732,6 +855,7 @@ static void *multifd_send_thread(void *opaque)
 
     while (true) {
         qemu_mutex_lock(&p->mutex);
+        multifd_send_fill_packet(p);
         if (p->quit) {
             qemu_mutex_unlock(&p->mutex);
             break;
@@ -796,6 +920,9 @@ int multifd_save_setup(void)
         p->quit = false;
         p->id = i;
         p->pages = multifd_pages_init(page_count);
+        p->packet_len = sizeof(MultiFDPacket_t)
+                      + sizeof(ram_addr_t) * page_count;
+        p->packet = g_malloc0(p->packet_len);
         p->name = g_strdup_printf("multifdsend_%d", i);
         socket_send_channel_create(multifd_new_send_channel_async, p);
     }
@@ -855,6 +982,9 @@ int multifd_load_cleanup(Error **errp)
         p->name = NULL;
         multifd_pages_clear(p->pages);
         p->pages = NULL;
+        p->packet_len = 0;
+        g_free(p->packet);
+        p->packet = NULL;
     }
     g_free(multifd_recv_state->params);
     multifd_recv_state->params = NULL;
@@ -867,10 +997,20 @@ int multifd_load_cleanup(Error **errp)
 static void *multifd_recv_thread(void *opaque)
 {
     MultiFDRecvParams *p = opaque;
+    Error *local_err = NULL;
+    int ret;
 
     while (true) {
         qemu_mutex_lock(&p->mutex);
-        if (p->quit) {
+        if (false)  {
+            /* ToDo: Packet reception goes here */
+
+            ret = multifd_recv_unfill_packet(p, &local_err);
+            qemu_mutex_unlock(&p->mutex);
+            if (ret) {
+                break;
+            }
+        } else if (p->quit) {
             qemu_mutex_unlock(&p->mutex);
             break;
         }
@@ -907,6 +1047,9 @@ int multifd_load_setup(void)
         p->quit = false;
         p->id = i;
         p->pages = multifd_pages_init(page_count);
+        p->packet_len = sizeof(MultiFDPacket_t)
+                      + sizeof(ram_addr_t) * page_count;
+        p->packet = g_malloc0(p->packet_len);
         p->name = g_strdup_printf("multifdrecv_%d", i);
     }
     return 0;
-- 
2.17.0

  parent reply	other threads:[~2018-05-23 11:18 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-23 11:18 [Qemu-devel] [PATCH v13 00/12] Multifd Juan Quintela
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 01/12] migration: Create multipage support Juan Quintela
2018-06-11 16:58   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` Juan Quintela [this message]
2018-06-11 11:10   ` [Qemu-devel] [PATCH v13 02/12] migration: Create multifd packet Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 03/12] migration: Add multifd traces for start/end thread Juan Quintela
2018-05-31 16:11   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 04/12] migration: Calculate transferred ram correctly Juan Quintela
2018-05-31 17:14   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 05/12] migration: Multifd channels always wait on the sem Juan Quintela
2018-06-11 17:13   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 06/12] migration: Add block where to send/receive packets Juan Quintela
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 07/12] migration: Synchronize multifd threads with main thread Juan Quintela
2018-06-11 11:53   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 08/12] migration: Create ram_save_multifd_page Juan Quintela
2018-06-11 17:33   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 09/12] migration: Start sending messages Juan Quintela
2018-06-11 15:49   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 10/12] migration: Wait for blocking IO Juan Quintela
2018-06-11 15:54   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 11/12] migration: Remove not needed semaphore and quit Juan Quintela
2018-06-11 16:45   ` Dr. David Alan Gilbert
2018-06-20  7:20     ` Juan Quintela
2018-06-20  9:01       ` Dr. David Alan Gilbert
2018-06-20  9:42         ` Juan Quintela
2018-06-20  9:46           ` Dr. David Alan Gilbert
2018-06-20  9:48             ` Juan Quintela
2018-06-20 10:38               ` Dr. David Alan Gilbert
2018-06-20 11:07                 ` Juan Quintela
2018-06-20 11:25                   ` Dr. David Alan Gilbert
2018-05-23 11:18 ` [Qemu-devel] [PATCH v13 12/12] migration: Stop sending whole pages through main channel Juan Quintela
2018-05-23 11:41 ` [Qemu-devel] [PATCH v13 00/12] Multifd no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180523111817.1463-3-quintela@redhat.com \
    --to=quintela@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.