All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"Juergen Gross" <jgross@suse.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Ian Jackson" <iwj@xenproject.org>
Subject: [PATCH v2 09/13] libxenguest: restrict PV guest size
Date: Mon, 5 Jul 2021 17:15:47 +0200	[thread overview]
Message-ID: <94076a26-c19b-4da8-02d5-6c7ce1759969@suse.com> (raw)
In-Reply-To: <0bebfe8c-6897-dc8b-7fe0-9127d4996eb8@suse.com>

The P2M, the use of PFNs, and hence the maximum valid PFN are purely
software constructs in PV. In principle a guest is free to use arbitrary
PFNs. However, at least page table normalization requires that PFN space
be, like MFN space, limited to the architectural 40 bits (52 address
bits). And of course a 32-bit tool stack places further constraints.

Bounding the values also makes sure that various subsequent calculations
won't truncate values and then continue with inconsistencies (see e.g.
fl_entries vs ctx->x86.pv.p2m_frames in map_p2m_tree()).

While there correct an adjacent error message with wrong way round
wording in restore code and another slightly malformed and misleading
(off by one) one in core dumping code.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Integrate into series.
---
In case the save/restore changes don't make it obvious enough: It
escapes me why struct xc_sr_rec_x86_pv_p2m_frames has p2m_pfns[] with
uint64_t element type but {start,end}_pfn both as uint32_t. Imo all
three can sensibly only ever be of the same type.

--- a/tools/include/xen-tools/libs.h
+++ b/tools/include/xen-tools/libs.h
@@ -13,6 +13,10 @@
 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(*a))
 #endif
 
+#ifndef sizeof_field
+#define sizeof_field(type, field) sizeof(((type *)0)->field)
+#endif
+
 #ifndef MAX
 #define MAX(x, y) ((x) > (y) ? (x) : (y))
 #endif
--- a/tools/libs/guest/xg_core_x86.c
+++ b/tools/libs/guest/xg_core_x86.c
@@ -59,6 +59,43 @@ xc_core_arch_memory_map_get(xc_interface
     if ( xc_domain_nr_gpfns(xch, info->domid, &p2m_size) < 0 )
         return -1;
 
+    if ( !p2m_size )
+    {
+        ERROR("Cannot map a guest without P2M");
+        errno = ENODATA;
+        return -1;
+    }
+
+    if ( !info->hvm )
+    {
+        unsigned int guest_width;
+
+        if ( xc_domain_get_guest_width(xch, info->domid, &guest_width) != 0 )
+        {
+            PERROR("Cannot get address size for PV guest");
+            return -1;
+        }
+
+        if ( p2m_size == (guest_width > 4 ? ~0UL : ~0U) )
+        {
+            ERROR("Cannot map a PV guest with invalid P2M");
+            errno = ENODATA;
+            return -1;
+        }
+    }
+
+#ifndef __i386__
+    if ( (p2m_size - 1) >> 40 )
+#else
+    /* Very large domains (> 1TB) will exhaust virtual address space. */
+    if ( (p2m_size - 1) >> 28 )
+#endif
+    {
+        ERROR("Cannot map a guest with P2M size %#lx", p2m_size);
+        errno = EOPNOTSUPP;
+        return -1;
+    }
+
     map = malloc(sizeof(*map));
     if ( map == NULL )
     {
@@ -333,10 +370,30 @@ xc_core_arch_map_p2m_rw(xc_interface *xc
 
     if ( dinfo->p2m_size < info->nr_pages  )
     {
-        ERROR("p2m_size < nr_pages -1 (%lx < %lx", dinfo->p2m_size, info->nr_pages - 1);
+        ERROR("p2m_size < nr_pages (%lx < %lx)", dinfo->p2m_size, info->nr_pages);
         goto out;
     }
 
+    if ( !info->hvm && dinfo->p2m_size == (dinfo->guest_width > 4 ? ~0UL : ~0U) )
+    {
+        ERROR("Cannot r/%c-map a PV guest with invalid P2M", rw ? 'w' : 'o');
+        errno = ENODATA;
+        return -1;
+    }
+
+#ifndef __i386__
+    if ( (dinfo->p2m_size - 1) >> 40 )
+#else
+    /* Very large domains (> 1TB) will exhaust virtual address space. */
+    if ( (dinfo->p2m_size - 1) >> 28 )
+#endif
+    {
+        ERROR("Cannot r/%c-map a guest with P2M size %#lx",
+              rw ? 'w' : 'o', dinfo->p2m_size);
+        errno = EOPNOTSUPP;
+        return -1;
+    }
+
     p2m_cr3 = GET_FIELD(live_shinfo, arch.p2m_cr3, dinfo->guest_width);
 
     p2m_frame_list = p2m_cr3 ? xc_core_arch_map_p2m_list_rw(xch, dinfo, dom, live_shinfo, p2m_cr3)
--- a/tools/libs/guest/xg_sr_restore_x86_pv.c
+++ b/tools/libs/guest/xg_sr_restore_x86_pv.c
@@ -709,10 +709,23 @@ static int handle_x86_pv_p2m_frames(stru
         return -1;
     }
 
+#ifdef __i386__
+    /* Very large domains (> 1TB) will exhaust virtual address space. */
+    if ( data->end_pfn >> 28 )
+#elif 0 /* sizeof(data->end_pfn) > 4 */
+    if ( data->end_pfn >> (ctx->x86.pv.width > 4 ? 40 : 32) )
+#else
+    if ( 0 )
+#endif
+    {
+        ERROR("End pfn in stream (%#x) too large", data->end_pfn);
+        return -1;
+    }
+
     if ( data->start_pfn > data->end_pfn )
     {
-        ERROR("End pfn in stream (%#x) exceeds Start (%#x)",
-              data->end_pfn, data->start_pfn);
+        ERROR("Start pfn in stream (%#x) exceeds End (%#x)",
+              data->start_pfn, data->end_pfn);
         return -1;
     }
 
--- a/tools/libs/guest/xg_sr_save_x86_pv.c
+++ b/tools/libs/guest/xg_sr_save_x86_pv.c
@@ -464,11 +464,40 @@ static int map_p2m_list(struct xc_sr_con
  */
 static int map_p2m(struct xc_sr_context *ctx)
 {
+    xc_interface *xch = ctx->xch;
     uint64_t p2m_cr3;
+    uint64_t max_pfn = GET_FIELD(ctx->x86.pv.shinfo, arch.max_pfn,
+                                 ctx->x86.pv.width);
+
+    if ( !max_pfn )
+    {
+        ERROR("Cannot save a guest without P2M");
+        errno = ENODATA;
+        return -1;
+    }
+
+    if ( max_pfn-- == (ctx->x86.pv.width > 4 ? ~0UL : ~0U) )
+    {
+        ERROR("Cannot save a guest with invalid P2M");
+        errno = ENODATA;
+        return -1;
+    }
+
+#ifndef __i386__
+    if ( max_pfn >> (sizeof_field(struct xc_sr_rec_x86_pv_p2m_frames,
+                                  end_pfn) > 4 ? 40 : 32) )
+#else
+    /* Very large domains (> 1TB) will exhaust virtual address space. */
+    if ( max_pfn >> 28 )
+#endif
+    {
+        ERROR("Cannot save a guest with maximum PFN %#"PRIx64, max_pfn);
+        errno = EOPNOTSUPP;
+        return -1;
+    }
 
     ctx->x86.pv.p2m_generation = ~0ULL;
-    ctx->x86.pv.max_pfn = GET_FIELD(ctx->x86.pv.shinfo, arch.max_pfn,
-                                    ctx->x86.pv.width) - 1;
+    ctx->x86.pv.max_pfn = max_pfn;
     p2m_cr3 = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_cr3, ctx->x86.pv.width);
 
     return p2m_cr3 ? map_p2m_list(ctx, p2m_cr3) : map_p2m_tree(ctx);



  parent reply	other threads:[~2021-07-05 15:16 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-05 15:09 [PATCH v2 00/13] x86: more or less log-dirty related improvements Jan Beulich
2021-07-05 15:11 ` [PATCH v2 01/13] libxl/x86: check return value of SHADOW_OP_SET_ALLOCATION domctl Jan Beulich
2021-07-16 13:03   ` Anthony PERARD
2021-07-05 15:12 ` [PATCH v2 02/13] libxc: split xc_logdirty_control() from xc_shadow_control() Jan Beulich
2021-08-19  9:11   ` Juergen Gross
2021-08-19  9:24     ` Jan Beulich
2021-08-19  9:50       ` Juergen Gross
2021-09-02 17:04         ` [PATCH v2 02/13] libxc: split xc_logdirty_control() from xc_shadow_control() [and 1 more messages] Ian Jackson
2021-07-05 15:13 ` [PATCH v2 03/13] libxenguest: deal with log-dirty op stats overflow Jan Beulich
2021-07-05 15:41   ` Andrew Cooper
2021-07-05 15:53     ` Jan Beulich
2021-07-05 17:26   ` Olaf Hering
2021-07-06  6:39     ` Jan Beulich
2021-07-06  6:46       ` Olaf Hering
2021-07-06  6:58         ` Jan Beulich
2021-07-06  7:15           ` Olaf Hering
2021-08-19 10:20   ` Juergen Gross
2021-08-19 11:06     ` Jan Beulich
2021-08-19 11:25       ` Juergen Gross
2021-08-19 11:51         ` Jan Beulich
2021-08-19 11:53           ` Jan Beulich
2021-09-02 16:57             ` Ian Jackson
2021-08-19 14:29           ` Juergen Gross
2021-07-05 15:13 ` [PATCH v2 04/13] libxenguest: short-circuit "all-dirty" handling Jan Beulich
2021-08-19 14:21   ` Juergen Gross
2021-07-05 15:14 ` [PATCH v2 05/13] libxenguest: avoid allocating unused deferred-pages bitmap Jan Beulich
2021-08-19 14:22   ` Juergen Gross
2021-07-05 15:14 ` [PATCH v2 06/13] libxenguest: complete loops in xc_map_domain_meminfo() Jan Beulich
2021-07-05 15:15 ` [PATCH v2 07/13] libxenguest: guard against overflow from too large p2m when checkpointing Jan Beulich
2021-07-05 15:15 ` [PATCH v2 08/13] libxenguest: fix off-by-1 in colo-secondary-bitmap merging Jan Beulich
2021-07-05 15:15 ` Jan Beulich [this message]
2021-07-05 15:16 ` [PATCH v2 10/13] libxc: simplify HYPERCALL_BUFFER() Jan Beulich
2021-07-05 15:47   ` Andrew Cooper
2021-07-05 15:17 ` [PATCH v2 11/13] x86/paging: supply more useful log-dirty page count Jan Beulich
2021-07-05 15:18 ` [PATCH v2 12/13] x86/mm: update log-dirty bitmap when manipulating P2M Jan Beulich
2021-07-05 15:18 ` [PATCH v2 13/13] SUPPORT.md: write down restriction of 32-bit tool stacks Jan Beulich
2021-07-14 18:16   ` Julien Grall
2021-07-15  6:38     ` Jan Beulich
2021-07-15  9:05       ` Julien Grall
2021-07-15 11:36         ` Jan Beulich
2021-07-16  7:50           ` Julien Grall
2021-07-19  7:46 ` Ping: [PATCH v2 00/13] x86: more or less log-dirty related improvements Jan Beulich
2021-08-13  9:24   ` Jan Beulich
2021-08-20  7:20   ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=94076a26-c19b-4da8-02d5-6c7ce1759969@suse.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=iwj@xenproject.org \
    --cc=jgross@suse.com \
    --cc=roger.pau@citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.