From: Julien Grall <julien@xen.org>
To: Wei Chen <wei.chen@arm.com>,
xen-devel@lists.xenproject.org, sstabellini@kernel.org,
jbeulich@suse.com
Cc: Bertrand.Marquis@arm.com
Subject: Re: [XEN RFC PATCH 08/40] xen/x86: Move NUMA memory node map functions to common
Date: Mon, 23 Aug 2021 18:47:03 +0100 [thread overview]
Message-ID: <a12849ee-9549-f495-93b5-376f987177ad@xen.org> (raw)
In-Reply-To: <20210811102423.28908-9-wei.chen@arm.com>
Hi Wei,
On 11/08/2021 11:23, Wei Chen wrote:
> In the later patches we will add NUMA support to Arm. Arm
> NUMA support will follow current memory node map management
> as x86. So this part of code can be common, in this case,
> we move this part of code from arch/x86 to common.
I would add "No functional changes intended" to make clear this patch is
only moving code.
>
> Signed-off-by: Wei Chen <wei.chen@arm.com>
> ---
> xen/arch/x86/numa.c | 114 --------------------------------
> xen/common/Makefile | 1 +
> xen/common/numa.c | 131 +++++++++++++++++++++++++++++++++++++
> xen/include/asm-x86/numa.h | 29 --------
> xen/include/xen/numa.h | 35 ++++++++++
> 5 files changed, 167 insertions(+), 143 deletions(-)
> create mode 100644 xen/common/numa.c
>
> diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
> index d23f4f7919..a6211be121 100644
> --- a/xen/arch/x86/numa.c
> +++ b/xen/arch/x86/numa.c
> @@ -29,14 +29,6 @@ custom_param("numa", numa_setup);
> /* from proto.h */
> #define round_up(x,y) ((((x)+(y))-1) & (~((y)-1)))
>
> -struct node_data node_data[MAX_NUMNODES];
> -
> -/* Mapping from pdx to node id */
> -int memnode_shift;
> -static typeof(*memnodemap) _memnodemap[64];
> -unsigned long memnodemapsize;
> -u8 *memnodemap;
> -
> nodeid_t cpu_to_node[NR_CPUS] __read_mostly = {
> [0 ... NR_CPUS-1] = NUMA_NO_NODE
> };
> @@ -58,112 +50,6 @@ int srat_disabled(void)
> return numa_off || acpi_numa < 0;
> }
>
> -/*
> - * Given a shift value, try to populate memnodemap[]
> - * Returns :
> - * 1 if OK
> - * 0 if memnodmap[] too small (of shift too small)
> - * -1 if node overlap or lost ram (shift too big)
> - */
> -static int __init populate_memnodemap(const struct node *nodes,
> - int numnodes, int shift, nodeid_t *nodeids)
> -{
> - unsigned long spdx, epdx;
> - int i, res = -1;
> -
> - memset(memnodemap, NUMA_NO_NODE, memnodemapsize * sizeof(*memnodemap));
> - for ( i = 0; i < numnodes; i++ )
> - {
> - spdx = paddr_to_pdx(nodes[i].start);
> - epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
> - if ( spdx >= epdx )
> - continue;
> - if ( (epdx >> shift) >= memnodemapsize )
> - return 0;
> - do {
> - if ( memnodemap[spdx >> shift] != NUMA_NO_NODE )
> - return -1;
> -
> - if ( !nodeids )
> - memnodemap[spdx >> shift] = i;
> - else
> - memnodemap[spdx >> shift] = nodeids[i];
> -
> - spdx += (1UL << shift);
> - } while ( spdx < epdx );
> - res = 1;
> - }
> -
> - return res;
> -}
> -
> -static int __init allocate_cachealigned_memnodemap(void)
> -{
> - unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
> - unsigned long mfn = mfn_x(alloc_boot_pages(size, 1));
> -
> - memnodemap = mfn_to_virt(mfn);
> - mfn <<= PAGE_SHIFT;
> - size <<= PAGE_SHIFT;
> - printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
> - mfn, mfn + size);
> - memnodemapsize = size / sizeof(*memnodemap);
> -
> - return 0;
> -}
> -
> -/*
> - * The LSB of all start and end addresses in the node map is the value of the
> - * maximum possible shift.
> - */
> -static int __init extract_lsb_from_nodes(const struct node *nodes,
> - int numnodes)
> -{
> - int i, nodes_used = 0;
> - unsigned long spdx, epdx;
> - unsigned long bitfield = 0, memtop = 0;
> -
> - for ( i = 0; i < numnodes; i++ )
> - {
> - spdx = paddr_to_pdx(nodes[i].start);
> - epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
> - if ( spdx >= epdx )
> - continue;
> - bitfield |= spdx;
> - nodes_used++;
> - if ( epdx > memtop )
> - memtop = epdx;
> - }
> - if ( nodes_used <= 1 )
> - i = BITS_PER_LONG - 1;
> - else
> - i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
> - memnodemapsize = (memtop >> i) + 1;
> - return i;
> -}
> -
> -int __init compute_hash_shift(struct node *nodes, int numnodes,
> - nodeid_t *nodeids)
> -{
> - int shift;
> -
> - shift = extract_lsb_from_nodes(nodes, numnodes);
> - if ( memnodemapsize <= ARRAY_SIZE(_memnodemap) )
> - memnodemap = _memnodemap;
> - else if ( allocate_cachealigned_memnodemap() )
> - return -1;
> - printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", shift);
> -
> - if ( populate_memnodemap(nodes, numnodes, shift, nodeids) != 1 )
> - {
> - printk(KERN_INFO "Your memory is not aligned you need to "
> - "rebuild your hypervisor with a bigger NODEMAPSIZE "
> - "shift=%d\n", shift);
> - return -1;
> - }
> -
> - return shift;
> -}
> /* initialize NODE_DATA given nodeid and start/end */
> void __init setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end)
> {
> diff --git a/xen/common/Makefile b/xen/common/Makefile
> index 54de70d422..f8f667e90a 100644
> --- a/xen/common/Makefile
> +++ b/xen/common/Makefile
> @@ -54,6 +54,7 @@ obj-y += wait.o
> obj-bin-y += warning.init.o
> obj-$(CONFIG_XENOPROF) += xenoprof.o
> obj-y += xmalloc_tlsf.o
> +obj-$(CONFIG_NUMA) += numa.o
AFAICT, the Makefile is listing the file in alphabetical order. So
please add numa.o in the correct position.
>
> obj-bin-$(CONFIG_X86) += $(foreach n,decompress bunzip2 unxz unlzma lzo unlzo unlz4 unzstd earlycpio,$(n).init.o)
>
> diff --git a/xen/common/numa.c b/xen/common/numa.c
> new file mode 100644
> index 0000000000..e65b6a6676
> --- /dev/null
> +++ b/xen/common/numa.c
> @@ -0,0 +1,131 @@
> +/*
> + * Generic VM initialization for x86-64 NUMA setups.
> + * Copyright 2002,2003 Andi Kleen, SuSE Labs.
> + * Adapted for Xen: Ryan Harper <ryanh@us.ibm.com>
> + */
> +
> +#include <xen/mm.h>
> +#include <xen/string.h>
> +#include <xen/init.h>
> +#include <xen/ctype.h>
You don't seem to use any helpers./types directly defined by at least
this header...
> +#include <xen/nodemask.h>
> +#include <xen/numa.h>
> +#include <xen/time.h>
... this one and ...
> +#include <xen/smp.h>
... this one. Can you check the list of headers and introduce the
minimum? If the dependency is required by another headers, then I think
that dependency should be moved in the header requiring it.
> +#include <xen/pfn.h>
> +#include <xen/sched.h>
Please sort the includes in alphabetical order.
> +
> +struct node_data node_data[MAX_NUMNODES];
> +
> +/* Mapping from pdx to node id */
> +int memnode_shift;
> +typeof(*memnodemap) _memnodemap[64];
> +unsigned long memnodemapsize;
> +u8 *memnodemap;
> +
> +/*
> + * Given a shift value, try to populate memnodemap[]
> + * Returns :
> + * 1 if OK
> + * 0 if memnodmap[] too small (of shift too small)
> + * -1 if node overlap or lost ram (shift too big)
> + */
> +static int __init populate_memnodemap(const struct node *nodes,
> + int numnodes, int shift, nodeid_t *nodeids)
> +{
> + unsigned long spdx, epdx;
> + int i, res = -1;
> +
> + memset(memnodemap, NUMA_NO_NODE, memnodemapsize * sizeof(*memnodemap));
> + for ( i = 0; i < numnodes; i++ )
> + {
> + spdx = paddr_to_pdx(nodes[i].start);
> + epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
> + if ( spdx >= epdx )
> + continue;
> + if ( (epdx >> shift) >= memnodemapsize )
> + return 0;
> + do {
> + if ( memnodemap[spdx >> shift] != NUMA_NO_NODE )
> + return -1;
> +
> + if ( !nodeids )
> + memnodemap[spdx >> shift] = i;
> + else
> + memnodemap[spdx >> shift] = nodeids[i];
> +
> + spdx += (1UL << shift);
> + } while ( spdx < epdx );
> + res = 1;
> + }
> +
> + return res;
> +}
> +
> +static int __init allocate_cachealigned_memnodemap(void)
> +{
> + unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
> + unsigned long mfn = mfn_x(alloc_boot_pages(size, 1));
> +
> + memnodemap = mfn_to_virt(mfn);
> + mfn <<= PAGE_SHIFT;
> + size <<= PAGE_SHIFT;
> + printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
> + mfn, mfn + size);
> + memnodemapsize = size / sizeof(*memnodemap);
> +
> + return 0;
> +}
> +
> +/*
> + * The LSB of all start and end addresses in the node map is the value of the
> + * maximum possible shift.
> + */
> +static int __init extract_lsb_from_nodes(const struct node *nodes,
> + int numnodes)
> +{
> + int i, nodes_used = 0;
> + unsigned long spdx, epdx;
> + unsigned long bitfield = 0, memtop = 0;
> +
> + for ( i = 0; i < numnodes; i++ )
> + {
> + spdx = paddr_to_pdx(nodes[i].start);
> + epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
> + if ( spdx >= epdx )
> + continue;
> + bitfield |= spdx;
> + nodes_used++;
> + if ( epdx > memtop )
> + memtop = epdx;
> + }
> + if ( nodes_used <= 1 )
> + i = BITS_PER_LONG - 1;
> + else
> + i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
> + memnodemapsize = (memtop >> i) + 1;
> + return i;
> +}
> +
> +int __init compute_hash_shift(struct node *nodes, int numnodes,
> + nodeid_t *nodeids)
> +{
> + int shift;
> +
> + shift = extract_lsb_from_nodes(nodes, numnodes);
> + if ( memnodemapsize <= ARRAY_SIZE(_memnodemap) )
> + memnodemap = _memnodemap;
> + else if ( allocate_cachealigned_memnodemap() )
> + return -1;
> + printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", shift);
> +
> + if ( populate_memnodemap(nodes, numnodes, shift, nodeids) != 1 )
> + {
> + printk(KERN_INFO "Your memory is not aligned you need to "
> + "rebuild your hypervisor with a bigger NODEMAPSIZE "
> + "shift=%d\n", shift);
> + return -1;
> + }
> +
> + return shift;
> +}
> diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h
> index bada2c0bb9..abe5617d01 100644
> --- a/xen/include/asm-x86/numa.h
> +++ b/xen/include/asm-x86/numa.h
> @@ -26,7 +26,6 @@ extern int compute_hash_shift(struct node *nodes, int numnodes,
> extern nodeid_t pxm_to_node(unsigned int pxm);
>
> #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
> -#define VIRTUAL_BUG_ON(x)
>
> extern void numa_add_cpu(int cpu);
> extern void numa_init_array(void);
> @@ -47,34 +46,6 @@ static inline void clear_node_cpumask(int cpu)
> cpumask_clear_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
> }
>
> -/* Simple perfect hash to map pdx to node numbers */
> -extern int memnode_shift;
> -extern unsigned long memnodemapsize;
> -extern u8 *memnodemap;
> -
> -struct node_data {
> - unsigned long node_start_pfn;
> - unsigned long node_spanned_pages;
> -};
> -
> -extern struct node_data node_data[];
> -
> -static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
> -{
> - nodeid_t nid;
> - VIRTUAL_BUG_ON((paddr_to_pdx(addr) >> memnode_shift) >= memnodemapsize);
> - nid = memnodemap[paddr_to_pdx(addr) >> memnode_shift];
> - VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
> - return nid;
> -}
> -
> -#define NODE_DATA(nid) (&(node_data[nid]))
> -
> -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
> -#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
> -#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
> - NODE_DATA(nid)->node_spanned_pages)
> -
> extern int valid_numa_range(u64 start, u64 end, nodeid_t node);
>
> void srat_parse_regions(u64 addr);
> diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
> index 7aef1a88dc..39e8a4e00a 100644
> --- a/xen/include/xen/numa.h
> +++ b/xen/include/xen/numa.h
> @@ -18,4 +18,39 @@
> (((d)->vcpu != NULL && (d)->vcpu[0] != NULL) \
> ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
>
> +/* The following content can be used when NUMA feature is enabled */
> +#if defined(CONFIG_NUMA)
Please use #ifdef CONFIG_NUMA
> +
> +/* Simple perfect hash to map pdx to node numbers */
> +extern int memnode_shift;
> +extern unsigned long memnodemapsize;
> +extern u8 *memnodemap;
> +extern typeof(*memnodemap) _memnodemap[64];
AFAICT, this will be turned static against in a follow-up patch. Can
this be avoided?
> +
> +struct node_data {
> + unsigned long node_start_pfn;
> + unsigned long node_spanned_pages;
> +};
> +
> +extern struct node_data node_data[];
> +#define VIRTUAL_BUG_ON(x)
> +
> +static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
> +{
> + nodeid_t nid;
> + VIRTUAL_BUG_ON((paddr_to_pdx(addr) >> memnode_shift) >= memnodemapsize);
> + nid = memnodemap[paddr_to_pdx(addr) >> memnode_shift];
> + VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
> + return nid;
> +}
> +
> +#define NODE_DATA(nid) (&(node_data[nid]))
> +
> +#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
> +#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
> +#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
> + NODE_DATA(nid)->node_spanned_pages)
> +
> +#endif /* CONFIG_NUMA */
> +
> #endif /* _XEN_NUMA_H */
>
Cheers,
--
Julien Grall
next prev parent reply other threads:[~2021-08-23 17:47 UTC|newest]
Thread overview: 196+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-11 10:23 [XEN RFC PATCH 00/40] Add device tree based NUMA support to Arm64 Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 01/40] tools: Fix -Werror=maybe-uninitialized for xlu_pci_parse_bdf Wei Chen
2021-08-11 10:49 ` Jan Beulich
2021-08-13 6:28 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 02/40] xen/arm: Print a 64-bit number in hex from early uart Wei Chen
2021-08-19 13:05 ` Julien Grall
2021-08-20 1:13 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 03/40] xen/x86: Initialize memnodemapsize while faking NUMA node Wei Chen
2021-08-12 15:32 ` Jan Beulich
2021-08-13 7:26 ` Wei Chen
2021-08-13 8:29 ` Jan Beulich
2021-08-11 10:23 ` [XEN RFC PATCH 04/40] xen/arm: return default DMA bit width when platform is not set Wei Chen
2021-08-11 10:54 ` Jan Beulich
2021-08-13 6:54 ` Wei Chen
2021-08-13 6:56 ` Jan Beulich
2021-08-19 13:28 ` Julien Grall
2021-08-20 2:04 ` Wei Chen
2021-08-20 8:20 ` Julien Grall
2021-08-20 9:37 ` Wei Chen
2021-08-20 11:18 ` Julien Grall
2021-08-20 11:58 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 05/40] xen/arm: Fix lowmem_bitsize when arch_get_dma_bitsize return 0 Wei Chen
2021-08-19 13:32 ` Julien Grall
2021-08-20 2:05 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 06/40] xen: decouple NUMA from ACPI in Kconfig Wei Chen
2021-08-12 15:36 ` Jan Beulich
2021-08-13 7:27 ` Wei Chen
2021-08-12 16:54 ` Julien Grall
2021-08-13 7:28 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 07/40] xen/arm: use !CONFIG_NUMA to keep fake NUMA API Wei Chen
2021-08-19 13:34 ` Julien Grall
2021-08-20 2:08 ` Wei Chen
2021-08-20 8:23 ` Julien Grall
2021-08-20 10:24 ` Wei Chen
2021-08-20 11:24 ` Julien Grall
2021-08-20 12:23 ` Wei Chen
2021-08-20 14:41 ` Julien Grall
2021-08-11 10:23 ` [XEN RFC PATCH 08/40] xen/x86: Move NUMA memory node map functions to common Wei Chen
2021-08-23 17:47 ` Julien Grall [this message]
2021-08-24 4:07 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 09/40] xen/x86: Move numa_add_cpu_node " Wei Chen
2021-08-23 17:54 ` Julien Grall
2021-08-24 4:18 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 10/40] xen/x86: Move NR_NODE_MEMBLKS macro " Wei Chen
2021-08-23 17:58 ` Julien Grall
2021-08-11 10:23 ` [XEN RFC PATCH 11/40] xen/x86: Move NUMA nodes and memory block ranges " Wei Chen
2021-08-24 17:40 ` Julien Grall
2021-08-25 0:57 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 12/40] xen/x86: Move numa_initmem_init " Wei Chen
2021-08-25 10:21 ` Julien Grall
2021-08-25 11:15 ` Wei Chen
2021-08-25 13:26 ` Julien Grall
2021-08-11 10:23 ` [XEN RFC PATCH 13/40] xen/arm: introduce numa_set_node for Arm Wei Chen
2021-08-25 10:36 ` Julien Grall
2021-08-25 12:07 ` Wei Chen
2021-08-25 13:24 ` Julien Grall
2021-08-26 5:13 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 14/40] xen/arm: set NUMA nodes max number to 64 by default Wei Chen
2021-08-25 13:28 ` Julien Grall
2021-08-25 13:36 ` Jan Beulich
2021-08-26 2:26 ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 15/40] xen/x86: move NUMA API from x86 header to common header Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 16/40] xen/arm: Create a fake NUMA node to use common code Wei Chen
2021-08-26 23:10 ` Stefano Stabellini
2021-08-27 1:15 ` Wei Chen
2021-08-27 6:18 ` Jan Beulich
2021-08-27 9:32 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 17/40] xen/arm: Introduce DEVICE_TREE_NUMA Kconfig for arm64 Wei Chen
2021-08-19 13:38 ` Julien Grall
2021-08-20 2:30 ` Wei Chen
2021-08-20 8:41 ` Julien Grall
2021-08-20 10:49 ` Wei Chen
2021-08-20 11:28 ` Julien Grall
2021-08-20 12:25 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 18/40] xen/arm: Keep memory nodes in dtb for NUMA when boot from EFI Wei Chen
2021-08-19 17:35 ` Julien Grall
2021-08-20 2:18 ` Wei Chen
2021-08-26 23:24 ` Stefano Stabellini
2021-08-27 7:41 ` Julien Grall
2021-08-27 23:10 ` Stefano Stabellini
2021-08-27 9:23 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 19/40] xen: fdt: Introduce a helper to check fdt node type Wei Chen
2021-08-25 13:39 ` Julien Grall
2021-08-26 6:00 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 20/40] xen/arm: implement node distance helpers for Arm64 Wei Chen
2021-08-26 23:52 ` Stefano Stabellini
2021-08-27 9:30 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 21/40] xen/arm: introduce device_tree_numa as a switch for device tree NUMA Wei Chen
2021-08-19 17:45 ` Julien Grall
2021-08-20 2:21 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 22/40] xen/arm: introduce a helper to parse device tree processor node Wei Chen
2021-08-19 18:09 ` Julien Grall
2021-08-23 8:42 ` Wei Chen
2021-08-19 18:10 ` Julien Grall
2021-08-23 8:47 ` Wei Chen
2021-08-23 10:59 ` Julien Grall
2021-08-24 4:09 ` Wei Chen
2021-08-19 18:13 ` Julien Grall
2021-08-20 2:23 ` Wei Chen
2021-08-20 8:44 ` Julien Grall
2021-08-20 11:53 ` Wei Chen
2021-08-27 0:06 ` Stefano Stabellini
2021-08-27 9:31 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 23/40] xen/arm: introduce a helper to parse device tree memory node Wei Chen
2021-08-25 13:48 ` Julien Grall
2021-08-26 6:35 ` Wei Chen
2021-08-26 8:21 ` Julien Grall
2021-08-26 11:54 ` Wei Chen
2021-08-28 1:06 ` Stefano Stabellini
2021-08-28 3:56 ` Wei Chen
2021-08-28 10:33 ` Julien Grall
2021-08-28 13:58 ` Wei Chen
2021-09-08 7:34 ` Wei Chen
2021-09-08 22:31 ` Stefano Stabellini
2021-09-09 3:54 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 24/40] xen/arm: introduce a helper to parse device tree NUMA distance map Wei Chen
2021-08-25 13:56 ` Julien Grall
2021-08-26 7:01 ` Wei Chen
2021-08-31 0:48 ` Stefano Stabellini
2021-08-31 10:17 ` Wei Chen
2021-08-31 21:36 ` Stefano Stabellini
2021-09-01 11:04 ` Wei Chen
2021-09-01 16:21 ` Stefano Stabellini
2021-09-02 2:30 ` Wei Chen
2021-09-02 15:19 ` Stefano Stabellini
2021-09-02 6:00 ` Jan Beulich
2021-09-02 14:14 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 25/40] xen/arm: unified entry to parse all NUMA data from device tree Wei Chen
2021-08-31 0:54 ` Stefano Stabellini
2021-08-31 17:47 ` Julien Grall
2021-09-01 18:30 ` Stefano Stabellini
2021-09-02 2:48 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 26/40] xen/arm: Add boot and secondary CPU to NUMA system Wei Chen
2021-08-25 16:58 ` Julien Grall
2021-08-26 7:24 ` Wei Chen
2021-08-26 8:49 ` Julien Grall
2021-08-26 9:39 ` Jan Beulich
2021-08-26 12:08 ` Wei Chen
2021-08-26 12:26 ` Jan Beulich
2021-08-11 10:24 ` [XEN RFC PATCH 27/40] xen/arm: build CPU NUMA node map while creating cpu_logical_map Wei Chen
2021-08-25 17:06 ` Julien Grall
2021-08-26 7:26 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 28/40] xen/x86: decouple nodes_cover_memory with E820 map Wei Chen
2021-08-31 1:07 ` Stefano Stabellini
2021-08-31 10:19 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 29/40] xen/arm: implement Arm arch helpers Arm to get memory map info Wei Chen
2021-08-25 17:09 ` Julien Grall
2021-08-26 7:27 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 30/40] xen: move NUMA memory and CPU parsed nodemasks to common Wei Chen
2021-08-25 17:16 ` Julien Grall
2021-08-26 7:29 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 31/40] xen/x86: move nodes_cover_memory " Wei Chen
2021-08-31 1:16 ` Stefano Stabellini
2021-08-31 13:43 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 32/40] xen/x86: make acpi_scan_nodes to be neutral Wei Chen
2021-08-27 14:08 ` Julien Grall
2021-08-28 2:11 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 33/40] xen: export bad_srat and srat_disabled to extern Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common Wei Chen
2021-08-27 14:14 ` Julien Grall
2021-08-28 2:12 ` Wei Chen
2021-08-31 1:26 ` Stefano Stabellini
2021-08-31 13:43 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 35/40] xen: enable numa_scan_nodes for device tree based NUMA Wei Chen
2021-08-27 14:19 ` Julien Grall
2021-08-28 2:13 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 36/40] xen/arm: keep guest still be NUMA unware Wei Chen
2021-08-27 14:28 ` Julien Grall
2021-08-28 2:19 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 37/40] xen: introduce an arch helper to do NUMA init failed fallback Wei Chen
2021-08-27 14:30 ` Julien Grall
2021-08-28 3:09 ` Wei Chen
2021-08-28 3:45 ` Wei Chen
2021-08-30 9:52 ` Jan Beulich
2021-08-30 10:38 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 38/40] xen/arm: enable device tree based NUMA in system init Wei Chen
2021-08-27 14:32 ` Julien Grall
2021-08-28 3:17 ` Wei Chen
2021-08-28 10:45 ` Julien Grall
2021-08-28 14:02 ` Wei Chen
2021-08-31 1:50 ` Stefano Stabellini
2021-08-31 13:43 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 39/40] xen/x86: move numa_setup to common to support NUMA switch in command line Wei Chen
2021-08-27 14:37 ` Julien Grall
2021-08-28 3:22 ` Wei Chen
2021-08-31 1:53 ` Stefano Stabellini
2021-08-31 13:44 ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 40/40] xen/x86: move dump_numa info hotkey to common Wei Chen
2021-08-11 10:41 ` [XEN RFC PATCH 00/40] Add device tree based NUMA support to Arm64 Jan Beulich
2021-08-13 2:33 ` Wei Chen
2021-08-13 6:53 ` Jan Beulich
2021-08-19 13:42 ` Julien Grall
2021-08-19 14:05 ` Bertrand Marquis
2021-08-19 17:11 ` Julien Grall
2021-08-26 0:09 ` Stefano Stabellini
2021-08-26 7:31 ` Wei Chen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a12849ee-9549-f495-93b5-376f987177ad@xen.org \
--to=julien@xen.org \
--cc=Bertrand.Marquis@arm.com \
--cc=jbeulich@suse.com \
--cc=sstabellini@kernel.org \
--cc=wei.chen@arm.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).