* [PATCH 1/3] powerpc/memhotplug: Add add_pages override for PPC
@ 2022-06-23 12:29 Aneesh Kumar K.V
2022-06-23 12:29 ` [PATCH 2/3] powerpc/mm: Update max/min_low_pfn in the same function Aneesh Kumar K.V
2022-06-23 12:29 ` [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr Aneesh Kumar K.V
0 siblings, 2 replies; 9+ messages in thread
From: Aneesh Kumar K.V @ 2022-06-23 12:29 UTC (permalink / raw)
To: linuxppc-dev, mpe; +Cc: linux-mm, Kefeng Wang, Michal Hocko, Aneesh Kumar K.V
With commit ffa0b64e3be5 ("powerpc: Fix virt_addr_valid() for 64-bit Book3E & 32-bit")
the kernel now validate the addr against high_memory value. This results
in the below BUG_ON with dax pfns.
[ 635.798741][T26531] kernel BUG at mm/page_alloc.c:5521!
1:mon> e
cpu 0x1: Vector: 700 (Program Check) at [c000000007287630]
pc: c00000000055ed48: free_pages.part.0+0x48/0x110
lr: c00000000053ca70: tlb_finish_mmu+0x80/0xd0
sp: c0000000072878d0
msr: 800000000282b033
current = 0xc00000000afabe00
paca = 0xc00000037ffff300 irqmask: 0x03 irq_happened: 0x05
pid = 26531, comm = 50-landscape-sy
kernel BUG at :5521!
Linux version 5.19.0-rc3-14659-g4ec05be7c2e1 (kvaneesh@ltc-boston8) (gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0, GNU ld (GNU Binutils for Ubuntu) 2.34) #625 SMP Thu Jun 23 00:35:43 CDT 2022
1:mon> t
[link register ] c00000000053ca70 tlb_finish_mmu+0x80/0xd0
[c0000000072878d0] c00000000053ca54 tlb_finish_mmu+0x64/0xd0 (unreliable)
[c000000007287900] c000000000539424 exit_mmap+0xe4/0x2a0
[c0000000072879e0] c00000000019fc1c mmput+0xcc/0x210
[c000000007287a20] c000000000629230 begin_new_exec+0x5e0/0xf40
[c000000007287ae0] c00000000070b3cc load_elf_binary+0x3ac/0x1e00
[c000000007287c10] c000000000627af0 bprm_execve+0x3b0/0xaf0
[c000000007287cd0] c000000000628414 do_execveat_common.isra.0+0x1e4/0x310
[c000000007287d80] c00000000062858c sys_execve+0x4c/0x60
[c000000007287db0] c00000000002c1b0 system_call_exception+0x160/0x2c0
[c000000007287e10] c00000000000c53c system_call_common+0xec/0x250
The fix is to make sure we update high_memory on memory hotplug.
This is similar to what x86 does in commit 3072e413e305 ("mm/memory_hotplug: introduce add_pages")
Fixes: ffa0b64e3be5 ("powerpc: Fix virt_addr_valid() for 64-bit Book3E & 32-bit")
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/mm/mem.c | 32 +++++++++++++++++++++++++++++++-
arch/x86/Kconfig | 5 +----
mm/Kconfig | 3 +++
4 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c2ce2e60c8f0..20c1f8e26c96 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -112,6 +112,7 @@ config PPC
select ARCH_DISABLE_KASAN_INLINE if PPC_RADIX_MMU
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
+ select ARCH_HAS_ADD_PAGES if ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_HAS_COPY_MC if PPC64
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 52b77684acda..2a63920c369d 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -105,6 +105,36 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size)
vm_unmap_aliases();
}
+/*
+ * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
+ * updating.
+ */
+static void update_end_of_memory_vars(u64 start, u64 size)
+{
+ unsigned long end_pfn = PFN_UP(start + size);
+
+ if (end_pfn > max_pfn) {
+ max_pfn = end_pfn;
+ max_low_pfn = end_pfn;
+ high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
+ }
+}
+
+int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params)
+{
+ int ret;
+
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
+ WARN_ON_ONCE(ret);
+
+ /* update max_pfn, max_low_pfn and high_memory */
+ update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
+ nr_pages << PAGE_SHIFT);
+
+ return ret;
+}
+
int __ref arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params)
{
@@ -115,7 +145,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
rc = arch_create_linear_mapping(nid, start, size, params);
if (rc)
return rc;
- rc = __add_pages(nid, start_pfn, nr_pages, params);
+ rc = add_pages(nid, start_pfn, nr_pages, params);
if (rc)
arch_remove_linear_mapping(start, size);
return rc;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index be0b95e51df6..151ddb96ae46 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -68,6 +68,7 @@ config X86
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if (PGTABLE_LEVELS > 2) && (X86_64 || X86_PAE)
select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_ADD_PAGES if ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
@@ -2453,10 +2454,6 @@ source "kernel/livepatch/Kconfig"
endmenu
-config ARCH_HAS_ADD_PAGES
- def_bool y
- depends on ARCH_ENABLE_MEMORY_HOTPLUG
-
config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
def_bool y
diff --git a/mm/Kconfig b/mm/Kconfig
index 169e64192e48..af4e3f9a3019 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -475,6 +475,9 @@ config EXCLUSIVE_SYSTEM_RAM
config HAVE_BOOTMEM_INFO_NODE
def_bool n
+config ARCH_HAS_ADD_PAGES
+ bool
+
config ARCH_ENABLE_MEMORY_HOTPLUG
bool
--
2.36.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 2/3] powerpc/mm: Update max/min_low_pfn in the same function
2022-06-23 12:29 [PATCH 1/3] powerpc/memhotplug: Add add_pages override for PPC Aneesh Kumar K.V
@ 2022-06-23 12:29 ` Aneesh Kumar K.V
2022-06-23 12:29 ` [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr Aneesh Kumar K.V
1 sibling, 0 replies; 9+ messages in thread
From: Aneesh Kumar K.V @ 2022-06-23 12:29 UTC (permalink / raw)
To: linuxppc-dev, mpe; +Cc: linux-mm, Michal Hocko, Aneesh Kumar K.V
For both CONFIG_NUMA enabled/disabled use mem_topology_setup to
update max/min_low_pfn.
This also add min_low_pfn update to CONFIG_NUMA which was initialized
to zero before.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
arch/powerpc/mm/numa.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0801b2ce9b7d..b44ce71917d7 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1160,6 +1160,9 @@ void __init mem_topology_setup(void)
{
int cpu;
+ max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
+ min_low_pfn = MEMORY_START >> PAGE_SHIFT;
+
/*
* Linux/mm assumes node 0 to be online at boot. However this is not
* true on PowerPC, where node 0 is similar to any other node, it
@@ -1204,9 +1207,6 @@ void __init initmem_init(void)
{
int nid;
- max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
- max_pfn = max_low_pfn;
-
memblock_dump_all();
for_each_online_node(nid) {
--
2.36.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr
2022-06-23 12:29 [PATCH 1/3] powerpc/memhotplug: Add add_pages override for PPC Aneesh Kumar K.V
2022-06-23 12:29 ` [PATCH 2/3] powerpc/mm: Update max/min_low_pfn in the same function Aneesh Kumar K.V
@ 2022-06-23 12:29 ` Aneesh Kumar K.V
2022-06-24 2:45 ` Michael Ellerman
` (2 more replies)
1 sibling, 3 replies; 9+ messages in thread
From: Aneesh Kumar K.V @ 2022-06-23 12:29 UTC (permalink / raw)
To: linuxppc-dev, mpe; +Cc: linux-mm, Kefeng Wang, Michal Hocko, Aneesh Kumar K.V
Instead of high_memory use VMALLOC_START to validate that the address is
not in the vmalloc range.
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
arch/powerpc/include/asm/page.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index e5f75c70eda8..256cad69e42e 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -134,7 +134,7 @@ static inline bool pfn_valid(unsigned long pfn)
#define virt_addr_valid(vaddr) ({ \
unsigned long _addr = (unsigned long)vaddr; \
- _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
+ _addr >= PAGE_OFFSET && _addr < (unsigned long)VMALLOC_START && \
pfn_valid(virt_to_pfn(_addr)); \
})
--
2.36.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr
2022-06-23 12:29 ` [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr Aneesh Kumar K.V
@ 2022-06-24 2:45 ` Michael Ellerman
2022-06-27 4:44 ` Aneesh Kumar K.V
2022-06-24 11:51 ` Christophe Leroy
2022-06-27 5:42 ` Christophe Leroy
2 siblings, 1 reply; 9+ messages in thread
From: Michael Ellerman @ 2022-06-24 2:45 UTC (permalink / raw)
To: Aneesh Kumar K.V, linuxppc-dev
Cc: linux-mm, Kefeng Wang, Michal Hocko, Aneesh Kumar K.V
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:
> Instead of high_memory use VMALLOC_START to validate that the address is
> not in the vmalloc range.
>
> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Isn't this really the fix for ffa0b64e3be5 ("powerpc: Fix
virt_addr_valid() for 64-bit Book3E & 32-bit") ?
cheers
> diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
> index e5f75c70eda8..256cad69e42e 100644
> --- a/arch/powerpc/include/asm/page.h
> +++ b/arch/powerpc/include/asm/page.h
> @@ -134,7 +134,7 @@ static inline bool pfn_valid(unsigned long pfn)
>
> #define virt_addr_valid(vaddr) ({ \
> unsigned long _addr = (unsigned long)vaddr; \
> - _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
> + _addr >= PAGE_OFFSET && _addr < (unsigned long)VMALLOC_START && \
> pfn_valid(virt_to_pfn(_addr)); \
> })
>
> --
> 2.36.1
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr
2022-06-23 12:29 ` [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr Aneesh Kumar K.V
2022-06-24 2:45 ` Michael Ellerman
@ 2022-06-24 11:51 ` Christophe Leroy
2022-06-27 4:49 ` Aneesh Kumar K.V
2022-06-27 5:42 ` Christophe Leroy
2 siblings, 1 reply; 9+ messages in thread
From: Christophe Leroy @ 2022-06-24 11:51 UTC (permalink / raw)
To: Aneesh Kumar K.V, linuxppc-dev, mpe; +Cc: linux-mm, Kefeng Wang, Michal Hocko
Le 23/06/2022 à 14:29, Aneesh Kumar K.V a écrit :
> Instead of high_memory use VMALLOC_START to validate that the address is
> not in the vmalloc range.
What's the reason for using VMALLOC_START instead ?
The gap between high_memory and VMALLOC_START should not be seen as
valid memory either, should it ?
If the problem is book3s/64, commit ffa0b64e3be5 ("powerpc: Fix
virt_addr_valid() for 64-bit Book3E & 32-bit") says that those
additional tests are superfluous for boo3s/64. Maybe it's time to drop
unnecessary tests for book3s/64 ?
>
> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
> arch/powerpc/include/asm/page.h | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
> index e5f75c70eda8..256cad69e42e 100644
> --- a/arch/powerpc/include/asm/page.h
> +++ b/arch/powerpc/include/asm/page.h
> @@ -134,7 +134,7 @@ static inline bool pfn_valid(unsigned long pfn)
>
> #define virt_addr_valid(vaddr) ({ \
> unsigned long _addr = (unsigned long)vaddr; \
> - _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
> + _addr >= PAGE_OFFSET && _addr < (unsigned long)VMALLOC_START && \
> pfn_valid(virt_to_pfn(_addr)); \
> })
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr
2022-06-24 2:45 ` Michael Ellerman
@ 2022-06-27 4:44 ` Aneesh Kumar K.V
0 siblings, 0 replies; 9+ messages in thread
From: Aneesh Kumar K.V @ 2022-06-27 4:44 UTC (permalink / raw)
To: Michael Ellerman, linuxppc-dev; +Cc: linux-mm, Kefeng Wang, Michal Hocko
Michael Ellerman <mpe@ellerman.id.au> writes:
> "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:
>> Instead of high_memory use VMALLOC_START to validate that the address is
>> not in the vmalloc range.
>>
>> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
>> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>
> Isn't this really the fix for ffa0b64e3be5 ("powerpc: Fix
> virt_addr_valid() for 64-bit Book3E & 32-bit") ?
If we are looking for a simpler backport yet. But what commit ffa0b64e3be5
does is correct. high_memory is suppose to be the top of direct mapped
address range. Hence checking for high_meory will also avoid vmalloc address.
If we take patch 1 then patch 3 is not really a fix. I would consider it
a cleanup to switch to a more familiar VMALLOC_START variable.
>
> cheers
>
>> diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
>> index e5f75c70eda8..256cad69e42e 100644
>> --- a/arch/powerpc/include/asm/page.h
>> +++ b/arch/powerpc/include/asm/page.h
>> @@ -134,7 +134,7 @@ static inline bool pfn_valid(unsigned long pfn)
>>
>> #define virt_addr_valid(vaddr) ({ \
>> unsigned long _addr = (unsigned long)vaddr; \
>> - _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
>> + _addr >= PAGE_OFFSET && _addr < (unsigned long)VMALLOC_START && \
>> pfn_valid(virt_to_pfn(_addr)); \
>> })
>>
>> --
>> 2.36.1
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr
2022-06-24 11:51 ` Christophe Leroy
@ 2022-06-27 4:49 ` Aneesh Kumar K.V
0 siblings, 0 replies; 9+ messages in thread
From: Aneesh Kumar K.V @ 2022-06-27 4:49 UTC (permalink / raw)
To: Christophe Leroy, linuxppc-dev, mpe; +Cc: linux-mm, Kefeng Wang, Michal Hocko
Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> Le 23/06/2022 à 14:29, Aneesh Kumar K.V a écrit :
>> Instead of high_memory use VMALLOC_START to validate that the address is
>> not in the vmalloc range.
>
> What's the reason for using VMALLOC_START instead ?
> The gap between high_memory and VMALLOC_START should not be seen as
> valid memory either, should it ?
Yes and that invalid range should be captured by the pfn_valid check.
Commit ffa0b64e3be5 intended to skip the vmalloc range.
Unfortunately, that resulted in kernel crash due to architecture not
updating high_memory after a memory hotplug. That should be fixed by
patch 1 in this series. patch 3 was added merely as a cleanup to
switch from high_memory to a more familiar VMALLOC_START variable.
>
> If the problem is book3s/64, commit ffa0b64e3be5 ("powerpc: Fix
> virt_addr_valid() for 64-bit Book3E & 32-bit") says that those
> additional tests are superfluous for boo3s/64. Maybe it's time to drop
> unnecessary tests for book3s/64 ?
They are not specific book3s/64. IIUC virt_addr_valid will return false
for an addr after memory hotplug on other platforms too. Patch 1
describe those details.
>
>>
>> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
>> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>> ---
>> arch/powerpc/include/asm/page.h | 2 +-
>> 1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
>> index e5f75c70eda8..256cad69e42e 100644
>> --- a/arch/powerpc/include/asm/page.h
>> +++ b/arch/powerpc/include/asm/page.h
>> @@ -134,7 +134,7 @@ static inline bool pfn_valid(unsigned long pfn)
>>
>> #define virt_addr_valid(vaddr) ({ \
>> unsigned long _addr = (unsigned long)vaddr; \
>> - _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
>> + _addr >= PAGE_OFFSET && _addr < (unsigned long)VMALLOC_START && \
>> pfn_valid(virt_to_pfn(_addr)); \
>> })
>>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr
2022-06-23 12:29 ` [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr Aneesh Kumar K.V
2022-06-24 2:45 ` Michael Ellerman
2022-06-24 11:51 ` Christophe Leroy
@ 2022-06-27 5:42 ` Christophe Leroy
2022-06-27 6:49 ` Aneesh Kumar K.V
2 siblings, 1 reply; 9+ messages in thread
From: Christophe Leroy @ 2022-06-27 5:42 UTC (permalink / raw)
To: Aneesh Kumar K.V, linuxppc-dev, mpe; +Cc: linux-mm, Kefeng Wang, Michal Hocko
Le 23/06/2022 à 14:29, Aneesh Kumar K.V a écrit :
> Instead of high_memory use VMALLOC_START to validate that the address is
> not in the vmalloc range.
>
> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
> arch/powerpc/include/asm/page.h | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
> index e5f75c70eda8..256cad69e42e 100644
> --- a/arch/powerpc/include/asm/page.h
> +++ b/arch/powerpc/include/asm/page.h
> @@ -134,7 +134,7 @@ static inline bool pfn_valid(unsigned long pfn)
>
> #define virt_addr_valid(vaddr) ({ \
> unsigned long _addr = (unsigned long)vaddr; \
> - _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
> + _addr >= PAGE_OFFSET && _addr < (unsigned long)VMALLOC_START && \
> pfn_valid(virt_to_pfn(_addr)); \
> })
>
What about booke/64 ?
The test will be _addr >= 0xc000000000000000 && _addr <
0x8000000000000000 so the test will be always false.
Christophe
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr
2022-06-27 5:42 ` Christophe Leroy
@ 2022-06-27 6:49 ` Aneesh Kumar K.V
0 siblings, 0 replies; 9+ messages in thread
From: Aneesh Kumar K.V @ 2022-06-27 6:49 UTC (permalink / raw)
To: Christophe Leroy, linuxppc-dev, mpe; +Cc: linux-mm, Kefeng Wang, Michal Hocko
Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> Le 23/06/2022 à 14:29, Aneesh Kumar K.V a écrit :
>> Instead of high_memory use VMALLOC_START to validate that the address is
>> not in the vmalloc range.
>>
>> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
>> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>> ---
>> arch/powerpc/include/asm/page.h | 2 +-
>> 1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
>> index e5f75c70eda8..256cad69e42e 100644
>> --- a/arch/powerpc/include/asm/page.h
>> +++ b/arch/powerpc/include/asm/page.h
>> @@ -134,7 +134,7 @@ static inline bool pfn_valid(unsigned long pfn)
>>
>> #define virt_addr_valid(vaddr) ({ \
>> unsigned long _addr = (unsigned long)vaddr; \
>> - _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
>> + _addr >= PAGE_OFFSET && _addr < (unsigned long)VMALLOC_START && \
>> pfn_valid(virt_to_pfn(_addr)); \
>> })
>>
>
> What about booke/64 ?
>
> The test will be _addr >= 0xc000000000000000 && _addr <
> 0x8000000000000000 so the test will be always false.
>
Ok, I didn't realize that booke/64 have vmalloc range below direct map.
I guess we should drop patch 3.
-aneesh
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2022-06-27 6:50 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-23 12:29 [PATCH 1/3] powerpc/memhotplug: Add add_pages override for PPC Aneesh Kumar K.V
2022-06-23 12:29 ` [PATCH 2/3] powerpc/mm: Update max/min_low_pfn in the same function Aneesh Kumar K.V
2022-06-23 12:29 ` [PATCH 3/3] powerpc/mm: Use VMALLOC_START to validate addr Aneesh Kumar K.V
2022-06-24 2:45 ` Michael Ellerman
2022-06-27 4:44 ` Aneesh Kumar K.V
2022-06-24 11:51 ` Christophe Leroy
2022-06-27 4:49 ` Aneesh Kumar K.V
2022-06-27 5:42 ` Christophe Leroy
2022-06-27 6:49 ` Aneesh Kumar K.V
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).