xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Julien Grall <julien@xen.org>
To: Wei Chen <Wei.Chen@arm.com>,
	"xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>,
	"sstabellini@kernel.org" <sstabellini@kernel.org>
Cc: Bertrand Marquis <Bertrand.Marquis@arm.com>
Subject: Re: [XEN RFC PATCH 12/40] xen/x86: Move numa_initmem_init to common
Date: Wed, 25 Aug 2021 14:26:45 +0100	[thread overview]
Message-ID: <b5b32a29-0cfe-53fa-4f43-9c8455719acc@xen.org> (raw)
In-Reply-To: <DB9PR08MB685735416FFC85344717DF2D9EC69@DB9PR08MB6857.eurprd08.prod.outlook.com>



On 25/08/2021 12:15, Wei Chen wrote:
> Hi Julien,

Hi Wei,

>> -----Original Message-----
>> From: Julien Grall <julien@xen.org>
>> Sent: 2021年8月25日 18:22
>> To: Wei Chen <Wei.Chen@arm.com>; xen-devel@lists.xenproject.org;
>> sstabellini@kernel.org; jbeulich@suse.com
>> Cc: Bertrand Marquis <Bertrand.Marquis@arm.com>
>> Subject: Re: [XEN RFC PATCH 12/40] xen/x86: Move numa_initmem_init to
>> common
>>
>> Hi Wei,
>>
>> On 11/08/2021 11:23, Wei Chen wrote:
>>> This function can be reused by Arm device tree based
>>> NUMA support. So we move it from x86 to common, as well
>>> as its related variables and functions:
>>> setup_node_bootmem, numa_init_array and numa_emulation.
>>>
>>> As numa_initmem_init has been moved to common, _memnodemap
>>> is not used cross files. We can restore _memnodemap to
>>> static.
>>
>> As we discussed on a previous patch, we should try to avoid this kind of
>> dance. I can help to find a split that would achieve that.
>>
> 
> Yes, thanks!
> 
>>>
>>> Signed-off-by: Wei Chen <wei.chen@arm.com>
>>> ---
>>>    xen/arch/x86/numa.c         | 118 ----------------------------------
>>>    xen/common/numa.c           | 122 +++++++++++++++++++++++++++++++++++-
>>>    xen/include/asm-x86/numa.h  |   5 --
>>>    xen/include/asm-x86/setup.h |   1 -
>>>    xen/include/xen/numa.h      |   8 ++-
>>>    5 files changed, 128 insertions(+), 126 deletions(-)
>>>
>>> diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
>>> index f2626b3968..6908738305 100644
>>> --- a/xen/arch/x86/numa.c
>>> +++ b/xen/arch/x86/numa.c
>>> @@ -38,7 +38,6 @@ nodeid_t apicid_to_node[MAX_LOCAL_APIC] = {
>>>
>>>    nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
>>>
>>> -bool numa_off;
>>>    s8 acpi_numa = 0;
>>>
>>>    int srat_disabled(void)
>>> @@ -46,123 +45,6 @@ int srat_disabled(void)
>>>        return numa_off || acpi_numa < 0;
>>>    }
>>>
>>> -/* initialize NODE_DATA given nodeid and start/end */
>>> -void __init setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end)
>>> -{
>>> -    unsigned long start_pfn, end_pfn;
>>> -
>>> -    start_pfn = start >> PAGE_SHIFT;
>>> -    end_pfn = end >> PAGE_SHIFT;
>>> -
>>> -    NODE_DATA(nodeid)->node_start_pfn = start_pfn;
>>> -    NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
>>> -
>>> -    node_set_online(nodeid);
>>> -}
>>> -
>>> -void __init numa_init_array(void)
>>> -{
>>> -    int rr, i;
>>> -
>>> -    /* There are unfortunately some poorly designed mainboards around
>>> -       that only connect memory to a single CPU. This breaks the 1:1
>> cpu->node
>>> -       mapping. To avoid this fill in the mapping for all possible
>>> -       CPUs, as the number of CPUs is not known yet.
>>> -       We round robin the existing nodes. */
>>> -    rr = first_node(node_online_map);
>>> -    for ( i = 0; i < nr_cpu_ids; i++ )
>>> -    {
>>> -        if ( cpu_to_node[i] != NUMA_NO_NODE )
>>> -            continue;
>>> -        numa_set_node(i, rr);
>>> -        rr = cycle_node(rr, node_online_map);
>>> -    }
>>> -}
>>> -
>>> -#ifdef CONFIG_NUMA_EMU
>>> -static int numa_fake __initdata = 0;
>>> -
>>> -/* Numa emulation */
>>> -static int __init numa_emulation(u64 start_pfn, u64 end_pfn)
>>> -{
>>> -    int i;
>>> -    struct node nodes[MAX_NUMNODES];
>>> -    u64 sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
>>> -
>>> -    /* Kludge needed for the hash function */
>>> -    if ( hweight64(sz) > 1 )
>>> -    {
>>> -        u64 x = 1;
>>> -        while ( (x << 1) < sz )
>>> -            x <<= 1;
>>> -        if ( x < sz/2 )
>>> -            printk(KERN_ERR "Numa emulation unbalanced. Complain to
>> maintainer\n");
>>> -        sz = x;
>>> -    }
>>> -
>>> -    memset(&nodes,0,sizeof(nodes));
>>> -    for ( i = 0; i < numa_fake; i++ )
>>> -    {
>>> -        nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
>>> -        if ( i == numa_fake - 1 )
>>> -            sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
>>> -        nodes[i].end = nodes[i].start + sz;
>>> -        printk(KERN_INFO "Faking node %d at %"PRIx64"-%"PRIx64"
>> (%"PRIu64"MB)\n",
>>> -               i,
>>> -               nodes[i].start, nodes[i].end,
>>> -               (nodes[i].end - nodes[i].start) >> 20);
>>> -        node_set_online(i);
>>> -    }
>>> -    memnode_shift = compute_hash_shift(nodes, numa_fake, NULL);
>>> -    if ( memnode_shift < 0 )
>>> -    {
>>> -        memnode_shift = 0;
>>> -        printk(KERN_ERR "No NUMA hash function found. Emulation
>> disabled.\n");
>>> -        return -1;
>>> -    }
>>> -    for_each_online_node ( i )
>>> -        setup_node_bootmem(i, nodes[i].start, nodes[i].end);
>>> -    numa_init_array();
>>> -
>>> -    return 0;
>>> -}
>>> -#endif
>>> -
>>> -void __init numa_initmem_init(unsigned long start_pfn, unsigned long
>> end_pfn)
>>> -{
>>> -    int i;
>>> -
>>> -#ifdef CONFIG_NUMA_EMU
>>> -    if ( numa_fake && !numa_emulation(start_pfn, end_pfn) )
>>> -        return;
>>> -#endif
>>> -
>>> -#ifdef CONFIG_ACPI_NUMA
>>> -    if ( !numa_off && !acpi_scan_nodes((u64)start_pfn << PAGE_SHIFT,
>>> -         (u64)end_pfn << PAGE_SHIFT) )
>>> -        return;
>>> -#endif
>>> -
>>> -    printk(KERN_INFO "%s\n",
>>> -           numa_off ? "NUMA turned off" : "No NUMA configuration
>> found");
>>> -
>>> -    printk(KERN_INFO "Faking a node at %016"PRIx64"-%016"PRIx64"\n",
>>> -           (u64)start_pfn << PAGE_SHIFT,
>>> -           (u64)end_pfn << PAGE_SHIFT);
>>> -    /* setup dummy node covering all memory */
>>> -    memnode_shift = BITS_PER_LONG - 1;
>>> -    memnodemap = _memnodemap;
>>> -    memnodemapsize = ARRAY_SIZE(_memnodemap);
>>> -
>>> -    nodes_clear(node_online_map);
>>> -    node_set_online(0);
>>> -    for ( i = 0; i < nr_cpu_ids; i++ )
>>> -        numa_set_node(i, 0);
>>> -    cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
>>> -    setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT,
>>> -                    (u64)end_pfn << PAGE_SHIFT);
>>> -}
>>> -
>>>    void numa_set_node(int cpu, nodeid_t node)
>>>    {
>>>        cpu_to_node[cpu] = node;
>>> diff --git a/xen/common/numa.c b/xen/common/numa.c
>>> index 1facc8fe2b..26c0006d04 100644
>>> --- a/xen/common/numa.c
>>> +++ b/xen/common/numa.c
>>> @@ -14,12 +14,13 @@
>>>    #include <xen/smp.h>
>>>    #include <xen/pfn.h>
>>>    #include <xen/sched.h>
>>
>> NIT: We tend to add a newline betwen <xen/...> headers and <asm/...>
>> headers.
>>
> 
> got it
> 
>>> +#include <asm/acpi.h>
>>>
>>>    struct node_data node_data[MAX_NUMNODES];
>>>
>>>    /* Mapping from pdx to node id */
>>>    int memnode_shift;
>>> -typeof(*memnodemap) _memnodemap[64];
>>> +static typeof(*memnodemap) _memnodemap[64];
>>>    unsigned long memnodemapsize;
>>>    u8 *memnodemap;
>>>
>>> @@ -34,6 +35,8 @@ int num_node_memblks;
>>>    struct node node_memblk_range[NR_NODE_MEMBLKS];
>>>    nodeid_t memblk_nodeid[NR_NODE_MEMBLKS];
>>>
>>> +bool numa_off;
>>> +
>>>    /*
>>>     * Given a shift value, try to populate memnodemap[]
>>>     * Returns :
>>> @@ -191,3 +194,120 @@ void numa_add_cpu(int cpu)
>>>    {
>>>        cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
>>>    }
>>> +
>>> +/* initialize NODE_DATA given nodeid and start/end */
>>> +void __init setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end)
>>
>>   From an abstract PoV, start and end should be paddr_t. This should be
>> done on a separate patch though.
>>
> 
> Ok.
> 
>>> +{
>>> +    unsigned long start_pfn, end_pfn;
>>> +
>>> +    start_pfn = start >> PAGE_SHIFT;
>>> +    end_pfn = end >> PAGE_SHIFT;
>>> +
>>> +    NODE_DATA(nodeid)->node_start_pfn = start_pfn;
>>> +    NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
>>> +
>>> +    node_set_online(nodeid);
>>> +}
>>> +
>>> +void __init numa_init_array(void)
>>> +{
>>> +    int rr, i;
>>> +
>>> +    /* There are unfortunately some poorly designed mainboards around
>>> +       that only connect memory to a single CPU. This breaks the 1:1
>> cpu->node
>>> +       mapping. To avoid this fill in the mapping for all possible
>>> +       CPUs, as the number of CPUs is not known yet.
>>> +       We round robin the existing nodes. */
>>> +    rr = first_node(node_online_map);
>>> +    for ( i = 0; i < nr_cpu_ids; i++ )
>>> +    {
>>> +        if ( cpu_to_node[i] != NUMA_NO_NODE )
>>> +            continue;
>>> +        numa_set_node(i, rr);
>>> +        rr = cycle_node(rr, node_online_map);
>>> +    }
>>> +}
>>> +
>>> +#ifdef CONFIG_NUMA_EMU
>>> +int numa_fake __initdata = 0;
>>> +
>>> +/* Numa emulation */
>>> +static int __init numa_emulation(u64 start_pfn, u64 end_pfn)
>>
>> Here, this should be either "unsigned long" or ideally "mfn_t".
>> Although, if you use "unsigned long", you will need to...
>>
> 
> Do we need a separate patch to do it?

I would prefer if the cleanups are done separately as this makes easier 
to review code movement.

> 
>>> +{
>>> +    int i;
>>> +    struct node nodes[MAX_NUMNODES];
>>> +    u64 sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
>>
>> ... cast "(end_pfn - start_pfn)" to uin64_t or use pfn_to_paddr().
>>
> 
> Ok
> 
>>> +
>>> +    /* Kludge needed for the hash function */
>>> +    if ( hweight64(sz) > 1 )
>>> +    {
>>> +        u64 x = 1;
>>> +        while ( (x << 1) < sz )
>>> +            x <<= 1;
>>> +        if ( x < sz/2 )
>>> +            printk(KERN_ERR "Numa emulation unbalanced. Complain to
>> maintainer\n");
>>> +        sz = x;
>>> +    }
>>> +
>>> +    memset(&nodes,0,sizeof(nodes));
>>> +    for ( i = 0; i < numa_fake; i++ )
>>> +    {
>>> +        nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
>>> +        if ( i == numa_fake - 1 )
>>> +            sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
>>> +        nodes[i].end = nodes[i].start + sz;
>>> +        printk(KERN_INFO "Faking node %d at %"PRIx64"-%"PRIx64"
>> (%"PRIu64"MB)\n",
>>> +               i,
>>> +               nodes[i].start, nodes[i].end,
>>> +               (nodes[i].end - nodes[i].start) >> 20);
>>> +        node_set_online(i);
>>> +    }
>>> +    memnode_shift = compute_hash_shift(nodes, numa_fake, NULL);
>>> +    if ( memnode_shift < 0 )
>>> +    {
>>> +        memnode_shift = 0;
>>> +        printk(KERN_ERR "No NUMA hash function found. Emulation
>> disabled.\n");
>>> +        return -1;
>>> +    }
>>> +    for_each_online_node ( i )
>>> +        setup_node_bootmem(i, nodes[i].start, nodes[i].end);
>>> +    numa_init_array();
>>> +
>>> +    return 0;
>>> +}
>>> +#endif
>>> +
>>> +void __init numa_initmem_init(unsigned long start_pfn, unsigned long
>> end_pfn)
>>> +{
>>> +    int i;
>>> +
>>> +#ifdef CONFIG_NUMA_EMU
>>> +    if ( numa_fake && !numa_emulation(start_pfn, end_pfn) )
>>> +        return;
>>> +#endif
>>> +
>>> +#ifdef CONFIG_ACPI_NUMA
>>> +    if ( !numa_off && !acpi_scan_nodes((u64)start_pfn << PAGE_SHIFT,
>>> +         (u64)end_pfn << PAGE_SHIFT) )
>>
>> (u64)v << PAGE_SHIFT should be switched to use pfn_to_paddr() or
>> mfn_to_paddr() if you decide to make start_pfn and end_pfn typesafe.
>>
> 
> Still need a separate patch to change it before move?

Yes.

Cheers,

-- 
Julien Grall


  reply	other threads:[~2021-08-25 13:26 UTC|newest]

Thread overview: 196+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-11 10:23 [XEN RFC PATCH 00/40] Add device tree based NUMA support to Arm64 Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 01/40] tools: Fix -Werror=maybe-uninitialized for xlu_pci_parse_bdf Wei Chen
2021-08-11 10:49   ` Jan Beulich
2021-08-13  6:28     ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 02/40] xen/arm: Print a 64-bit number in hex from early uart Wei Chen
2021-08-19 13:05   ` Julien Grall
2021-08-20  1:13     ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 03/40] xen/x86: Initialize memnodemapsize while faking NUMA node Wei Chen
2021-08-12 15:32   ` Jan Beulich
2021-08-13  7:26     ` Wei Chen
2021-08-13  8:29       ` Jan Beulich
2021-08-11 10:23 ` [XEN RFC PATCH 04/40] xen/arm: return default DMA bit width when platform is not set Wei Chen
2021-08-11 10:54   ` Jan Beulich
2021-08-13  6:54     ` Wei Chen
2021-08-13  6:56       ` Jan Beulich
2021-08-19 13:28   ` Julien Grall
2021-08-20  2:04     ` Wei Chen
2021-08-20  8:20       ` Julien Grall
2021-08-20  9:37         ` Wei Chen
2021-08-20 11:18           ` Julien Grall
2021-08-20 11:58             ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 05/40] xen/arm: Fix lowmem_bitsize when arch_get_dma_bitsize return 0 Wei Chen
2021-08-19 13:32   ` Julien Grall
2021-08-20  2:05     ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 06/40] xen: decouple NUMA from ACPI in Kconfig Wei Chen
2021-08-12 15:36   ` Jan Beulich
2021-08-13  7:27     ` Wei Chen
2021-08-12 16:54   ` Julien Grall
2021-08-13  7:28     ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 07/40] xen/arm: use !CONFIG_NUMA to keep fake NUMA API Wei Chen
2021-08-19 13:34   ` Julien Grall
2021-08-20  2:08     ` Wei Chen
2021-08-20  8:23       ` Julien Grall
2021-08-20 10:24         ` Wei Chen
2021-08-20 11:24           ` Julien Grall
2021-08-20 12:23             ` Wei Chen
2021-08-20 14:41               ` Julien Grall
2021-08-11 10:23 ` [XEN RFC PATCH 08/40] xen/x86: Move NUMA memory node map functions to common Wei Chen
2021-08-23 17:47   ` Julien Grall
2021-08-24  4:07     ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 09/40] xen/x86: Move numa_add_cpu_node " Wei Chen
2021-08-23 17:54   ` Julien Grall
2021-08-24  4:18     ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 10/40] xen/x86: Move NR_NODE_MEMBLKS macro " Wei Chen
2021-08-23 17:58   ` Julien Grall
2021-08-11 10:23 ` [XEN RFC PATCH 11/40] xen/x86: Move NUMA nodes and memory block ranges " Wei Chen
2021-08-24 17:40   ` Julien Grall
2021-08-25  0:57     ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 12/40] xen/x86: Move numa_initmem_init " Wei Chen
2021-08-25 10:21   ` Julien Grall
2021-08-25 11:15     ` Wei Chen
2021-08-25 13:26       ` Julien Grall [this message]
2021-08-11 10:23 ` [XEN RFC PATCH 13/40] xen/arm: introduce numa_set_node for Arm Wei Chen
2021-08-25 10:36   ` Julien Grall
2021-08-25 12:07     ` Wei Chen
2021-08-25 13:24       ` Julien Grall
2021-08-26  5:13         ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 14/40] xen/arm: set NUMA nodes max number to 64 by default Wei Chen
2021-08-25 13:28   ` Julien Grall
2021-08-25 13:36     ` Jan Beulich
2021-08-26  2:26       ` Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 15/40] xen/x86: move NUMA API from x86 header to common header Wei Chen
2021-08-11 10:23 ` [XEN RFC PATCH 16/40] xen/arm: Create a fake NUMA node to use common code Wei Chen
2021-08-26 23:10   ` Stefano Stabellini
2021-08-27  1:15     ` Wei Chen
2021-08-27  6:18     ` Jan Beulich
2021-08-27  9:32       ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 17/40] xen/arm: Introduce DEVICE_TREE_NUMA Kconfig for arm64 Wei Chen
2021-08-19 13:38   ` Julien Grall
2021-08-20  2:30     ` Wei Chen
2021-08-20  8:41       ` Julien Grall
2021-08-20 10:49         ` Wei Chen
2021-08-20 11:28           ` Julien Grall
2021-08-20 12:25             ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 18/40] xen/arm: Keep memory nodes in dtb for NUMA when boot from EFI Wei Chen
2021-08-19 17:35   ` Julien Grall
2021-08-20  2:18     ` Wei Chen
2021-08-26 23:24   ` Stefano Stabellini
2021-08-27  7:41     ` Julien Grall
2021-08-27 23:10       ` Stefano Stabellini
2021-08-27  9:23     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 19/40] xen: fdt: Introduce a helper to check fdt node type Wei Chen
2021-08-25 13:39   ` Julien Grall
2021-08-26  6:00     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 20/40] xen/arm: implement node distance helpers for Arm64 Wei Chen
2021-08-26 23:52   ` Stefano Stabellini
2021-08-27  9:30     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 21/40] xen/arm: introduce device_tree_numa as a switch for device tree NUMA Wei Chen
2021-08-19 17:45   ` Julien Grall
2021-08-20  2:21     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 22/40] xen/arm: introduce a helper to parse device tree processor node Wei Chen
2021-08-19 18:09   ` Julien Grall
2021-08-23  8:42     ` Wei Chen
2021-08-19 18:10   ` Julien Grall
2021-08-23  8:47     ` Wei Chen
2021-08-23 10:59       ` Julien Grall
2021-08-24  4:09         ` Wei Chen
2021-08-19 18:13   ` Julien Grall
2021-08-20  2:23     ` Wei Chen
2021-08-20  8:44       ` Julien Grall
2021-08-20 11:53         ` Wei Chen
2021-08-27  0:06   ` Stefano Stabellini
2021-08-27  9:31     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 23/40] xen/arm: introduce a helper to parse device tree memory node Wei Chen
2021-08-25 13:48   ` Julien Grall
2021-08-26  6:35     ` Wei Chen
2021-08-26  8:21       ` Julien Grall
2021-08-26 11:54         ` Wei Chen
2021-08-28  1:06   ` Stefano Stabellini
2021-08-28  3:56     ` Wei Chen
2021-08-28 10:33       ` Julien Grall
2021-08-28 13:58         ` Wei Chen
2021-09-08  7:34           ` Wei Chen
2021-09-08 22:31             ` Stefano Stabellini
2021-09-09  3:54               ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 24/40] xen/arm: introduce a helper to parse device tree NUMA distance map Wei Chen
2021-08-25 13:56   ` Julien Grall
2021-08-26  7:01     ` Wei Chen
2021-08-31  0:48   ` Stefano Stabellini
2021-08-31 10:17     ` Wei Chen
2021-08-31 21:36       ` Stefano Stabellini
2021-09-01 11:04         ` Wei Chen
2021-09-01 16:21           ` Stefano Stabellini
2021-09-02  2:30             ` Wei Chen
2021-09-02 15:19               ` Stefano Stabellini
2021-09-02  6:00             ` Jan Beulich
2021-09-02 14:14               ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 25/40] xen/arm: unified entry to parse all NUMA data from device tree Wei Chen
2021-08-31  0:54   ` Stefano Stabellini
2021-08-31 17:47     ` Julien Grall
2021-09-01 18:30       ` Stefano Stabellini
2021-09-02  2:48         ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 26/40] xen/arm: Add boot and secondary CPU to NUMA system Wei Chen
2021-08-25 16:58   ` Julien Grall
2021-08-26  7:24     ` Wei Chen
2021-08-26  8:49       ` Julien Grall
2021-08-26  9:39         ` Jan Beulich
2021-08-26 12:08           ` Wei Chen
2021-08-26 12:26             ` Jan Beulich
2021-08-11 10:24 ` [XEN RFC PATCH 27/40] xen/arm: build CPU NUMA node map while creating cpu_logical_map Wei Chen
2021-08-25 17:06   ` Julien Grall
2021-08-26  7:26     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 28/40] xen/x86: decouple nodes_cover_memory with E820 map Wei Chen
2021-08-31  1:07   ` Stefano Stabellini
2021-08-31 10:19     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 29/40] xen/arm: implement Arm arch helpers Arm to get memory map info Wei Chen
2021-08-25 17:09   ` Julien Grall
2021-08-26  7:27     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 30/40] xen: move NUMA memory and CPU parsed nodemasks to common Wei Chen
2021-08-25 17:16   ` Julien Grall
2021-08-26  7:29     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 31/40] xen/x86: move nodes_cover_memory " Wei Chen
2021-08-31  1:16   ` Stefano Stabellini
2021-08-31 13:43     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 32/40] xen/x86: make acpi_scan_nodes to be neutral Wei Chen
2021-08-27 14:08   ` Julien Grall
2021-08-28  2:11     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 33/40] xen: export bad_srat and srat_disabled to extern Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common Wei Chen
2021-08-27 14:14   ` Julien Grall
2021-08-28  2:12     ` Wei Chen
2021-08-31  1:26   ` Stefano Stabellini
2021-08-31 13:43     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 35/40] xen: enable numa_scan_nodes for device tree based NUMA Wei Chen
2021-08-27 14:19   ` Julien Grall
2021-08-28  2:13     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 36/40] xen/arm: keep guest still be NUMA unware Wei Chen
2021-08-27 14:28   ` Julien Grall
2021-08-28  2:19     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 37/40] xen: introduce an arch helper to do NUMA init failed fallback Wei Chen
2021-08-27 14:30   ` Julien Grall
2021-08-28  3:09     ` Wei Chen
2021-08-28  3:45       ` Wei Chen
2021-08-30  9:52         ` Jan Beulich
2021-08-30 10:38           ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 38/40] xen/arm: enable device tree based NUMA in system init Wei Chen
2021-08-27 14:32   ` Julien Grall
2021-08-28  3:17     ` Wei Chen
2021-08-28 10:45       ` Julien Grall
2021-08-28 14:02         ` Wei Chen
2021-08-31  1:50   ` Stefano Stabellini
2021-08-31 13:43     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 39/40] xen/x86: move numa_setup to common to support NUMA switch in command line Wei Chen
2021-08-27 14:37   ` Julien Grall
2021-08-28  3:22     ` Wei Chen
2021-08-31  1:53   ` Stefano Stabellini
2021-08-31 13:44     ` Wei Chen
2021-08-11 10:24 ` [XEN RFC PATCH 40/40] xen/x86: move dump_numa info hotkey to common Wei Chen
2021-08-11 10:41 ` [XEN RFC PATCH 00/40] Add device tree based NUMA support to Arm64 Jan Beulich
2021-08-13  2:33   ` Wei Chen
2021-08-13  6:53     ` Jan Beulich
2021-08-19 13:42 ` Julien Grall
2021-08-19 14:05   ` Bertrand Marquis
2021-08-19 17:11     ` Julien Grall
2021-08-26  0:09 ` Stefano Stabellini
2021-08-26  7:31   ` Wei Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b5b32a29-0cfe-53fa-4f43-9c8455719acc@xen.org \
    --to=julien@xen.org \
    --cc=Bertrand.Marquis@arm.com \
    --cc=Wei.Chen@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).