From: David Gibson <david@gibson.dropbear.id.au>
To: Reza Arbab <arbab@linux.ibm.com>
Cc: Daniel Henrique Barboza <danielhb@linux.ibm.com>,
Leonardo Augusto Guimaraes Garcia <lagarcia@linux.ibm.com>,
qemu-ppc@nongnu.org, qemu-devel@nongnu.org,
Greg Kurz <groug@kaod.org>
Subject: Re: [PATCH v3] spapr: Add a new level of NUMA for GPUs
Date: Mon, 25 May 2020 15:05:50 +1000 [thread overview]
Message-ID: <20200525050550.GA23110@umbus.fritz.box> (raw)
In-Reply-To: <1590177213-4513-1-git-send-email-arbab@linux.ibm.com>
[-- Attachment #1: Type: text/plain, Size: 6839 bytes --]
On Fri, May 22, 2020 at 02:53:33PM -0500, Reza Arbab wrote:
> NUMA nodes corresponding to GPU memory currently have the same
> affinity/distance as normal memory nodes. Add a third NUMA associativity
> reference point enabling us to give GPU nodes more distance.
>
> This is guest visible information, which shouldn't change under a
> running guest across migration between different qemu versions, so make
> the change effective only in new (pseries > 5.0) machine types.
>
> Before, `numactl -H` output in a guest with 4 GPUs (nodes 2-5):
>
> node distances:
> node 0 1 2 3 4 5
> 0: 10 40 40 40 40 40
> 1: 40 10 40 40 40 40
> 2: 40 40 10 40 40 40
> 3: 40 40 40 10 40 40
> 4: 40 40 40 40 10 40
> 5: 40 40 40 40 40 10
>
> After:
>
> node distances:
> node 0 1 2 3 4 5
> 0: 10 40 80 80 80 80
> 1: 40 10 80 80 80 80
> 2: 80 80 10 80 80 80
> 3: 80 80 80 10 80 80
> 4: 80 80 80 80 10 80
> 5: 80 80 80 80 80 10
>
> These are the same distances as on the host, mirroring the change made
> to host firmware in skiboot commit f845a648b8cb ("numa/associativity:
> Add a new level of NUMA for GPU's").
>
> Signed-off-by: Reza Arbab <arbab@linux.ibm.com>
> ---
> v3:
> * Squash into one patch
> * Add PHB compat property
> ---
> hw/ppc/spapr.c | 21 +++++++++++++++++++--
> hw/ppc/spapr_pci.c | 2 ++
> hw/ppc/spapr_pci_nvlink2.c | 7 ++++++-
> include/hw/pci-host/spapr.h | 1 +
> include/hw/ppc/spapr.h | 1 +
> 5 files changed, 29 insertions(+), 3 deletions(-)
>
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index c18eab0a2305..7c304b6c389d 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -889,10 +889,16 @@ static int spapr_dt_rng(void *fdt)
> static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
> {
> MachineState *ms = MACHINE(spapr);
> + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
> int rtas;
> GString *hypertas = g_string_sized_new(256);
> GString *qemu_hypertas = g_string_sized_new(256);
> - uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
> + uint32_t refpoints[] = {
> + cpu_to_be32(0x4),
> + cpu_to_be32(0x4),
> + cpu_to_be32(0x2),
> + };
> + uint32_t nr_refpoints = 3;
> uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
> memory_region_size(&MACHINE(spapr)->device_memory->mr);
> uint32_t lrdr_capacity[] = {
> @@ -944,8 +950,12 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
> qemu_hypertas->str, qemu_hypertas->len));
> g_string_free(qemu_hypertas, TRUE);
>
> + if (smc->pre_5_1_assoc_refpoints) {
> + nr_refpoints = 2;
> + }
> +
> _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
> - refpoints, sizeof(refpoints)));
> + refpoints, nr_refpoints * sizeof(refpoints[0])));
>
> _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains",
> maxdomains, sizeof(maxdomains)));
> @@ -4607,8 +4617,15 @@ DEFINE_SPAPR_MACHINE(5_1, "5.1", true);
> */
> static void spapr_machine_5_0_class_options(MachineClass *mc)
> {
> + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
> + static GlobalProperty compat[] = {
> + { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" },
> + };
> +
> spapr_machine_5_1_class_options(mc);
> compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len);
> + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
> + smc->pre_5_1_assoc_refpoints = true;
> }
>
> DEFINE_SPAPR_MACHINE(5_0, "5.0", false);
> diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
> index 61b84a392d65..bcdf1a25ae8b 100644
> --- a/hw/ppc/spapr_pci.c
> +++ b/hw/ppc/spapr_pci.c
> @@ -2092,6 +2092,8 @@ static Property spapr_phb_properties[] = {
> pcie_ecs, true),
> DEFINE_PROP_UINT64("gpa", SpaprPhbState, nv2_gpa_win_addr, 0),
> DEFINE_PROP_UINT64("atsd", SpaprPhbState, nv2_atsd_win_addr, 0),
> + DEFINE_PROP_BOOL("pre-5.1-associativity", SpaprPhbState,
> + pre_5_1_assoc, false),
> DEFINE_PROP_END_OF_LIST(),
> };
>
> diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c
> index 8332d5694e46..3394ac425eee 100644
> --- a/hw/ppc/spapr_pci_nvlink2.c
> +++ b/hw/ppc/spapr_pci_nvlink2.c
> @@ -362,7 +362,7 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
> uint32_t associativity[] = {
> cpu_to_be32(0x4),
> SPAPR_GPU_NUMA_ID,
> - SPAPR_GPU_NUMA_ID,
> + cpu_to_be32(nvslot->numa_id),
> SPAPR_GPU_NUMA_ID,
> cpu_to_be32(nvslot->numa_id)
This doesn't look quite right. In the new case we'll get {
GPU_NUMA_ID, nvslot->numa_id, GPU_NUMA_ID, nvslot->numa_id }.
> };
> @@ -374,6 +374,11 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
> _FDT(off);
> _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
> _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg))));
> +
> + if (sphb->pre_5_1_assoc) {
> + associativity[2] = SPAPR_GPU_NUMA_ID;
> + }
> +
> _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
> sizeof(associativity))));
>
> diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
> index 8877ff51fbf7..600eb55c3488 100644
> --- a/include/hw/pci-host/spapr.h
> +++ b/include/hw/pci-host/spapr.h
> @@ -94,6 +94,7 @@ struct SpaprPhbState {
> hwaddr nv2_gpa_win_addr;
> hwaddr nv2_atsd_win_addr;
> SpaprPhbPciNvGpuConfig *nvgpus;
> + bool pre_5_1_assoc;
> };
>
> #define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL
> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
> index e579eaf28c05..8316d9eea405 100644
> --- a/include/hw/ppc/spapr.h
> +++ b/include/hw/ppc/spapr.h
> @@ -129,6 +129,7 @@ struct SpaprMachineClass {
> bool linux_pci_probe;
> bool smp_threads_vsmt; /* set VSMT to smp_threads by default */
> hwaddr rma_limit; /* clamp the RMA to this size */
> + bool pre_5_1_assoc_refpoints;
>
> void (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
> uint64_t *buid, hwaddr *pio,
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
next prev parent reply other threads:[~2020-05-25 5:10 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-22 19:53 [PATCH v3] spapr: Add a new level of NUMA for GPUs Reza Arbab
2020-05-22 20:08 ` Reza Arbab
2020-05-25 5:06 ` David Gibson
2020-05-25 5:05 ` David Gibson [this message]
2020-05-25 17:49 ` Reza Arbab
2020-07-16 5:04 ` David Gibson
2020-07-16 9:42 ` Daniel Henrique Barboza
2020-07-16 16:00 ` Reza Arbab
2020-07-16 16:40 ` Daniel Henrique Barboza
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200525050550.GA23110@umbus.fritz.box \
--to=david@gibson.dropbear.id.au \
--cc=arbab@linux.ibm.com \
--cc=danielhb@linux.ibm.com \
--cc=groug@kaod.org \
--cc=lagarcia@linux.ibm.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).