From mboxrd@z Thu Jan 1 00:00:00 1970 From: Zhen Lei Subject: [PATCH v4 09/14] arm64/numa: support HAVE_SETUP_PER_CPU_AREA Date: Tue, 7 Jun 2016 16:08:13 +0800 Message-ID: <1465286898-13828-10-git-send-email-thunder.leizhen@huawei.com> References: <1465286898-13828-1-git-send-email-thunder.leizhen@huawei.com> Mime-Version: 1.0 Content-Type: text/plain Return-path: In-Reply-To: <1465286898-13828-1-git-send-email-thunder.leizhen@huawei.com> Sender: linux-kernel-owner@vger.kernel.org To: Catalin Marinas , Will Deacon , linux-arm-kernel , Ganapatrao Kulkarni , Robert Richter , David Daney , Rob Herring , Frank Rowand , Grant Likely , devicetree , linux-kernel Cc: Zefan Li , Xinwei Hu , Tianhong Ding , Hanjun Guo , Zhen Lei List-Id: devicetree@vger.kernel.org To make each percpu area allocated from its local numa node. Without this patch, all percpu areas will be allocated from the node which cpu0 belongs to. Signed-off-by: Zhen Lei --- arch/arm64/Kconfig | 8 ++++++++ arch/arm64/mm/numa.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 76747d9..05c1bf1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -573,6 +573,14 @@ config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA +config HAVE_SETUP_PER_CPU_AREA + def_bool y + depends on NUMA + +config NEED_PER_CPU_EMBED_FIRST_CHUNK + def_bool y + depends on NUMA + source kernel/Kconfig.preempt source kernel/Kconfig.hz diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 8be5ba3..99401aa 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -25,6 +25,8 @@ #include #include +#include + struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); nodemask_t numa_nodes_parsed __initdata; @@ -129,6 +131,60 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid) cpu_to_node_map[cpu] = nid; } +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); + +static int __init early_cpu_to_node(int cpu) +{ + return cpu_to_node_map[cpu]; +} + +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + if (early_cpu_to_node(from) == early_cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; +} + +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, + size_t align) +{ + int nid = early_cpu_to_node(cpu); + + return memblock_virt_alloc_try_nid(size, align, + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + memblock_free_early(__pa(ptr), size); +} + +void __init setup_per_cpu_areas(void) +{ + unsigned long delta; + unsigned int cpu; + int rc; + + /* + * Always reserve area for module percpu variables. That's + * what the legacy allocator did. + */ + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, + pcpu_cpu_distance, + pcpu_fc_alloc, pcpu_fc_free); + if (rc < 0) + panic("Failed to initialize percpu areas."); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; +} +#endif + /** * numa_add_memblk - Set node id to memblk * @nid: NUMA node ID of the new memblk