qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Greg Kurz <groug@kaod.org>
To: qemu-devel@nongnu.org
Cc: Daniel Henrique Barboza <danielhb@linux.ibm.com>,
	qemu-ppc@nongnu.org, qemu-stable@nongnu.org,
	David Gibson <david@gibson.dropbear.id.au>
Subject: [PATCH] spapr: Fix buffer overflow in spapr_numa_associativity_init()
Date: Fri, 18 Dec 2020 14:53:24 +0100	[thread overview]
Message-ID: <160829960428.734871.12634150161215429514.stgit@bahia.lan> (raw)

Running a guest with 128 NUMA nodes crashes QEMU:

../../util/error.c:59: error_setv: Assertion `*errp == NULL' failed.

The crash happens when setting the FWNMI migration blocker:

2861	    if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) {
2862	        /* Create the error string for live migration blocker */
2863	        error_setg(&spapr->fwnmi_migration_blocker,
2864	            "A machine check is being handled during migration. The handler"
2865	            "may run and log hardware error on the destination");
2866	    }

Inspection reveals that papr->fwnmi_migration_blocker isn't NULL:

(gdb) p spapr->fwnmi_migration_blocker
$1 = (Error *) 0x8000000004000000

Since this is the only place where papr->fwnmi_migration_blocker is
set, this means someone wrote there in our back. Further analysis
points to spapr_numa_associativity_init(), especially the part
that initializes the associative arrays for NVLink GPUs:

    max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM;

ie. max_nodes_with_gpus = 128 + 6, but the array isn't sized to
accommodate the 6 extra nodes:

#define MAX_NODES 128

struct SpaprMachineState {
    .
    .
    .
    uint32_t numa_assoc_array[MAX_NODES][NUMA_ASSOC_SIZE];

    Error *fwnmi_migration_blocker;
};

and the following loops happily overwrite spapr->fwnmi_migration_blocker,
and probably more:

    for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) {
        spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);

        for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) {
            uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ?
                                 SPAPR_GPU_NUMA_ID : cpu_to_be32(i);
            spapr->numa_assoc_array[i][j] = gpu_assoc;
        }

        spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i);
    }

Fix the size of the array. This requires "hw/ppc/spapr.h" to see
NVGPU_MAX_NUM. Including "hw/pci-host/spapr.h" introduces a
circular dependency that breaks the build, so this moves the
definition of NVGPU_MAX_NUM to "hw/ppc/spapr.h" instead.

Reported-by: Min Deng <mdeng@redhat.com>
BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1908693
Fixes: dd7e1d7ae431 ("spapr_numa: move NVLink2 associativity handling to spapr_numa.c")
Cc: danielhb413@gmail.com
Signed-off-by: Greg Kurz <groug@kaod.org>
---
 include/hw/pci-host/spapr.h |    2 --
 include/hw/ppc/spapr.h      |    5 ++++-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
index 4f58f0223b56..bd014823a933 100644
--- a/include/hw/pci-host/spapr.h
+++ b/include/hw/pci-host/spapr.h
@@ -115,8 +115,6 @@ struct SpaprPhbState {
 #define SPAPR_PCI_NV2RAM64_WIN_BASE  SPAPR_PCI_LIMIT
 #define SPAPR_PCI_NV2RAM64_WIN_SIZE  (2 * TiB) /* For up to 6 GPUs 256GB each */
 
-/* Max number of these GPUsper a physical box */
-#define NVGPU_MAX_NUM                6
 /* Max number of NVLinks per GPU in any physical box */
 #define NVGPU_MAX_LINKS              3
 
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 06a5b4259f20..1cc19575f548 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -112,6 +112,9 @@ typedef enum {
 #define NUMA_ASSOC_SIZE            (MAX_DISTANCE_REF_POINTS + 1)
 #define VCPU_ASSOC_SIZE            (NUMA_ASSOC_SIZE + 1)
 
+/* Max number of these GPUsper a physical box */
+#define NVGPU_MAX_NUM                6
+
 typedef struct SpaprCapabilities SpaprCapabilities;
 struct SpaprCapabilities {
     uint8_t caps[SPAPR_CAP_NUM];
@@ -240,7 +243,7 @@ struct SpaprMachineState {
     unsigned gpu_numa_id;
     SpaprTpmProxy *tpm_proxy;
 
-    uint32_t numa_assoc_array[MAX_NODES][NUMA_ASSOC_SIZE];
+    uint32_t numa_assoc_array[MAX_NODES + NVGPU_MAX_NUM][NUMA_ASSOC_SIZE];
 
     Error *fwnmi_migration_blocker;
 };




             reply	other threads:[~2020-12-18 14:12 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-18 13:53 Greg Kurz [this message]
2020-12-18 13:59 ` [PATCH] spapr: Fix buffer overflow in spapr_numa_associativity_init() Daniel Henrique Barboza
2020-12-18 15:16 ` Philippe Mathieu-Daudé
2020-12-18 15:58   ` Greg Kurz
2020-12-28  7:13 ` David Gibson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=160829960428.734871.12634150161215429514.stgit@bahia.lan \
    --to=groug@kaod.org \
    --cc=danielhb@linux.ibm.com \
    --cc=david@gibson.dropbear.id.au \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    --cc=qemu-stable@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).