From: Olaf Hering <olaf@aepfle.de>
To: xen-devel@lists.xenproject.org
Cc: Olaf Hering <olaf@aepfle.de>, Ian Jackson <iwj@xenproject.org>,
Wei Liu <wl@xen.org>, Juergen Gross <jgross@suse.com>
Subject: [PATCH v20210713 22/31] tools: restore: write data directly into guest
Date: Tue, 13 Jul 2021 20:05:56 +0200 [thread overview]
Message-ID: <20210713180605.12096-23-olaf@aepfle.de> (raw)
In-Reply-To: <20210713180605.12096-1-olaf@aepfle.de>
Read incoming migration stream directly into the guest memory.
This avoids the memory allocation and copying, and the resulting
performance penalty.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
---
tools/libs/guest/xg_sr_common.h | 3 +
tools/libs/guest/xg_sr_restore.c | 155 ++++++++++++++++++++++++++++++-
2 files changed, 153 insertions(+), 5 deletions(-)
diff --git a/tools/libs/guest/xg_sr_common.h b/tools/libs/guest/xg_sr_common.h
index 020b41ea8b..8981c21acd 100644
--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -263,6 +263,8 @@ struct xc_sr_context
xen_pfn_t *pp_pfns;
xen_pfn_t *pp_mfns;
void **guest_data;
+ struct iovec *iov;
+ struct xc_sr_rec_page_data_header *pages;
void *guest_mapping;
uint32_t nr_mapped_pages;
@@ -311,6 +313,7 @@ struct xc_sr_context
/* Sender has invoked verify mode on the stream. */
bool verify;
+ void *verify_buf;
} restore;
};
diff --git a/tools/libs/guest/xg_sr_restore.c b/tools/libs/guest/xg_sr_restore.c
index 777d005fbd..d4012bcffd 100644
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -381,6 +381,129 @@ err:
return rc;
}
+/*
+ * Handle PAGE_DATA record from the stream.
+ * Given a list of pfns, their types, and a block of page data from the
+ * stream, populate and record their types, map the relevant subset and copy
+ * the data into the guest.
+ */
+static int handle_incoming_page_data(struct xc_sr_context *ctx,
+ struct xc_sr_rhdr *rhdr)
+{
+ xc_interface *xch = ctx->xch;
+ struct xc_sr_rec_page_data_header *pages = ctx->restore.pages;
+ uint64_t *pfn_nums = &pages->pfn[0];
+ uint32_t i;
+ int rc, iov_idx;
+
+ rc = handle_static_data_end_v2(ctx);
+ if ( rc )
+ goto err;
+
+ /* First read and verify the header */
+ rc = read_exact(ctx->fd, pages, sizeof(*pages));
+ if ( rc )
+ {
+ PERROR("Could not read rec_pfn header");
+ goto err;
+ }
+
+ if ( !verify_rec_page_hdr(ctx, rhdr->length, pages) )
+ {
+ rc = -1;
+ goto err;
+ }
+
+ /* Then read and verify the incoming pfn numbers */
+ rc = read_exact(ctx->fd, pfn_nums, sizeof(*pfn_nums) * pages->count);
+ if ( rc )
+ {
+ PERROR("Could not read rec_pfn data");
+ goto err;
+ }
+
+ if ( !verify_rec_page_pfns(ctx, rhdr->length, pages) )
+ {
+ rc = -1;
+ goto err;
+ }
+
+ /* Finally read and verify the incoming pfn data */
+ rc = map_guest_pages(ctx, pages);
+ if ( rc )
+ goto err;
+
+ /* Prepare read buffers, either guest or throw-away memory */
+ for ( i = 0, iov_idx = 0; i < pages->count; i++ )
+ {
+ struct iovec *iov;
+
+ if ( !ctx->restore.guest_data[i] )
+ continue;
+
+ iov = &ctx->restore.iov[iov_idx];
+ iov->iov_len = PAGE_SIZE;
+ if ( ctx->restore.verify )
+ iov->iov_base = ctx->restore.verify_buf + (i * PAGE_SIZE);
+ else
+ iov->iov_base = ctx->restore.guest_data[i];
+ iov_idx++;
+ }
+
+ if ( !iov_idx )
+ goto done;
+
+ rc = readv_exact(ctx->fd, ctx->restore.iov, iov_idx);
+ if ( rc )
+ {
+ PERROR("read of %d pages failed", iov_idx);
+ goto err;
+ }
+
+ /* Post-processing of pfn data */
+ for ( i = 0, iov_idx = 0; i < pages->count; i++ )
+ {
+ void *addr;
+
+ if ( !ctx->restore.guest_data[i] )
+ continue;
+
+ addr = ctx->restore.iov[iov_idx].iov_base;
+ rc = ctx->restore.ops.localise_page(ctx, ctx->restore.types[i], addr);
+ if ( rc )
+ {
+ ERROR("Failed to localise pfn %#"PRIpfn" (type %#"PRIx32")",
+ ctx->restore.pfns[i],
+ ctx->restore.types[i] >> XEN_DOMCTL_PFINFO_LTAB_SHIFT);
+ goto err;
+
+ }
+
+ if ( ctx->restore.verify )
+ {
+ if ( memcmp(ctx->restore.guest_data[i], addr, PAGE_SIZE) )
+ {
+ ERROR("verify pfn %#"PRIpfn" failed (type %#"PRIx32")",
+ ctx->restore.pfns[i],
+ ctx->restore.types[i] >> XEN_DOMCTL_PFINFO_LTAB_SHIFT);
+ }
+ }
+
+ iov_idx++;
+ }
+
+done:
+ rc = 0;
+
+err:
+ if ( ctx->restore.guest_mapping )
+ {
+ xenforeignmemory_unmap(xch->fmem, ctx->restore.guest_mapping, ctx->restore.nr_mapped_pages);
+ ctx->restore.guest_mapping = NULL;
+ }
+ return rc;
+}
+
/*
* Handle PAGE_DATA record from an existing buffer
* Given a list of pfns, their types, and a block of page data from the
@@ -726,6 +849,15 @@ static int process_buffered_record(struct xc_sr_context *ctx, struct xc_sr_recor
case REC_TYPE_VERIFY:
DPRINTF("Verify mode enabled");
ctx->restore.verify = true;
+ if ( !ctx->restore.verify_buf )
+ {
+ ctx->restore.verify_buf = malloc(MAX_BATCH_SIZE * PAGE_SIZE);
+ if ( !ctx->restore.verify_buf )
+ {
+ PERROR("Unable to allocate verify_buf");
+ rc = -1;
+ }
+ }
break;
case REC_TYPE_CHECKPOINT:
@@ -752,11 +884,19 @@ static int process_incoming_record_header(struct xc_sr_context *ctx, struct xc_s
struct xc_sr_record rec;
int rc;
- rc = read_record_data(ctx, ctx->fd, rhdr, &rec);
- if ( rc )
- return rc;
+ switch ( rhdr->type )
+ {
+ case REC_TYPE_PAGE_DATA:
+ rc = handle_incoming_page_data(ctx, rhdr);
+ break;
+ default:
+ rc = read_record_data(ctx, ctx->fd, rhdr, &rec);
+ if ( rc == 0 )
+ rc = process_buffered_record(ctx, &rec);;
+ break;
+ }
- return process_buffered_record(ctx, &rec);
+ return rc;
}
@@ -801,9 +941,12 @@ static int setup(struct xc_sr_context *ctx)
ctx->restore.pp_pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pp_pfns));
ctx->restore.pp_mfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pp_mfns));
ctx->restore.guest_data = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.guest_data));
+ ctx->restore.iov = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.iov));
+ ctx->restore.pages = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pages->pfn) + sizeof(*ctx->restore.pages));
if ( !ctx->restore.pfns || !ctx->restore.types || !ctx->restore.mfns ||
!ctx->restore.map_errs || !ctx->restore.pp_pfns ||
- !ctx->restore.pp_mfns || !ctx->restore.guest_data )
+ !ctx->restore.pp_mfns || !ctx->restore.guest_data ||
+ !ctx->restore.iov || !ctx->restore.pages )
{
ERROR("Unable to allocate memory");
rc = -1;
@@ -840,6 +983,8 @@ static void cleanup(struct xc_sr_context *ctx)
free(ctx->restore.buffered_records);
free(ctx->restore.populated_pfns);
+ free(ctx->restore.pages);
+ free(ctx->restore.iov);
free(ctx->restore.guest_data);
free(ctx->restore.pp_mfns);
free(ctx->restore.pp_pfns);
next prev parent reply other threads:[~2021-07-13 18:16 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-13 18:05 [PATCH v20210713 00/31] leftover from 2020 Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 01/31] tools: fix make rpmball Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 02/31] hotplug/Linux: fix starting of xenstored with restarting systemd Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 03/31] tools: add API to work with sevaral bits at once Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 04/31] xl: fix description of migrate --debug Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 05/31] tools: add readv_exact to libxenctrl Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 06/31] tools: show migration transfer rate in send_dirty_pages Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 07/31] tools: save: preallocate mfns array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 08/31] tools: save: preallocate types array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 09/31] tools: save: preallocate errors array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 10/31] tools: save: preallocate iov array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 11/31] tools: save: preallocate rec_pfns array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 12/31] tools: save: preallocate guest_data array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 13/31] tools: save: preallocate local_pages array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 14/31] tools: restore: preallocate pfns array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 15/31] tools: restore: preallocate types array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 16/31] tools: restore: preallocate mfns array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 17/31] tools: restore: preallocate map_errs array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 18/31] tools: restore: preallocate populate_pfns pfns array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 19/31] tools: restore: preallocate populate_pfns mfns array Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 20/31] tools: restore: split record processing Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 21/31] tools: restore: split handle_page_data Olaf Hering
2021-07-13 18:05 ` Olaf Hering [this message]
2021-07-13 18:05 ` [PATCH v20210713 23/31] tools: recognize LIBXL_API_VERSION for 4.16 Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 24/31] tools: adjust libxl_domain_suspend to receive a struct props Olaf Hering
2021-07-13 18:05 ` [PATCH v20210713 25/31] tools: add callback to libxl for precopy_policy and precopy_stats Olaf Hering
2021-07-13 18:06 ` [PATCH v20210713 26/31] tools: add --max_iters to libxl_domain_suspend Olaf Hering
2021-07-13 18:06 ` [PATCH v20210713 27/31] tools: add --min_remaining " Olaf Hering
2021-07-13 18:06 ` [PATCH v20210713 28/31] tools: add --abort_if_busy " Olaf Hering
2021-07-13 18:06 ` [PATCH v20210713 29/31] tools: add API for expandable bitmaps Olaf Hering
2021-07-13 18:06 ` [PATCH v20210713 30/31] tools: use xg_sr_bitmap for populated_pfns Olaf Hering
2021-07-13 18:06 ` [PATCH v20210713 31/31] tools: use superpages during restore of HVM guest Olaf Hering
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210713180605.12096-23-olaf@aepfle.de \
--to=olaf@aepfle.de \
--cc=iwj@xenproject.org \
--cc=jgross@suse.com \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).