All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC
@ 2021-10-13 21:34 Joel Stanley
  2021-10-14  8:45 ` Christophe Leroy
  2021-11-02 10:11 ` Michael Ellerman
  0 siblings, 2 replies; 4+ messages in thread
From: Joel Stanley @ 2021-10-13 21:34 UTC (permalink / raw)
  To: Jordan Niethe, Christophe Leroy; +Cc: linuxppc-dev

The page_alloc.c code will call into __kernel_map_pages when
DEBUG_PAGEALLOC is configured and enabled.

As the implementation assumes hash, this should crash spectacularly if
not for a bit of luck in __kernel_map_pages. In this function
linear_map_hash_count is always zero, the for loop exits without doing
any damage.

There are no other platforms that determine if they support
debug_pagealloc at runtime. Instead of adding code to mm/page_alloc.c to
do that, this change turns the map/unmap into a noop when in radix
mode and prints a warning once.

Signed-off-by: Joel Stanley <joel@jms.id.au>
---
v2: Put __kernel_map_pages in pgtable.h

 arch/powerpc/include/asm/book3s/64/hash.h    |  2 ++
 arch/powerpc/include/asm/book3s/64/pgtable.h | 11 +++++++++++
 arch/powerpc/include/asm/book3s/64/radix.h   |  3 +++
 arch/powerpc/mm/book3s64/hash_utils.c        |  2 +-
 arch/powerpc/mm/book3s64/radix_pgtable.c     |  7 +++++++
 5 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index d959b0195ad9..674fe0e890dc 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
 				 int nid, pgprot_t prot);
 int hash__remove_section_mapping(unsigned long start, unsigned long end);
 
+void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 5d34a8646f08..265661ded238 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1101,6 +1101,17 @@ static inline void vmemmap_remove_mapping(unsigned long start,
 }
 #endif
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	if (radix_enabled()) {
+		radix__kernel_map_pages(page, numpages, enable);
+		return;
+	}
+	hash__kernel_map_pages(page, numpages, enable);
+}
+#endif
+
 static inline pte_t pmd_pte(pmd_t pmd)
 {
 	return __pte_raw(pmd_raw(pmd));
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 59cab558e2f0..d090d9612348 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -316,5 +316,8 @@ int radix__create_section_mapping(unsigned long start, unsigned long end,
 				  int nid, pgprot_t prot);
 int radix__remove_section_mapping(unsigned long start, unsigned long end);
 #endif /* CONFIG_MEMORY_HOTPLUG */
+
+void radix__kernel_map_pages(struct page *page, int numpages, int enable);
+
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index c145776d3ae5..cfd45245d009 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1988,7 +1988,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 				     mmu_kernel_ssize, 0);
 }
 
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void hash__kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	unsigned long flags, vaddr, lmi;
 	int i;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index ae20add7954a..83b33418ad28 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -920,6 +920,13 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
 #endif
 #endif
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void radix__kernel_map_pages(struct page *page, int numpages, int enable)
+{
+        pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
+}
+#endif
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC
  2021-10-13 21:34 [PATCH v2] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC Joel Stanley
@ 2021-10-14  8:45 ` Christophe Leroy
  2021-10-15 10:41   ` Michael Ellerman
  2021-11-02 10:11 ` Michael Ellerman
  1 sibling, 1 reply; 4+ messages in thread
From: Christophe Leroy @ 2021-10-14  8:45 UTC (permalink / raw)
  To: Joel Stanley, Jordan Niethe; +Cc: linuxppc-dev



Le 13/10/2021 à 23:34, Joel Stanley a écrit :
> The page_alloc.c code will call into __kernel_map_pages when
> DEBUG_PAGEALLOC is configured and enabled.
> 
> As the implementation assumes hash, this should crash spectacularly if
> not for a bit of luck in __kernel_map_pages. In this function
> linear_map_hash_count is always zero, the for loop exits without doing
> any damage.
> 
> There are no other platforms that determine if they support
> debug_pagealloc at runtime. Instead of adding code to mm/page_alloc.c to
> do that, this change turns the map/unmap into a noop when in radix
> mode and prints a warning once.
> 
> Signed-off-by: Joel Stanley <joel@jms.id.au>
> ---
> v2: Put __kernel_map_pages in pgtable.h
> 
>   arch/powerpc/include/asm/book3s/64/hash.h    |  2 ++
>   arch/powerpc/include/asm/book3s/64/pgtable.h | 11 +++++++++++
>   arch/powerpc/include/asm/book3s/64/radix.h   |  3 +++
>   arch/powerpc/mm/book3s64/hash_utils.c        |  2 +-
>   arch/powerpc/mm/book3s64/radix_pgtable.c     |  7 +++++++
>   5 files changed, 24 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
> index d959b0195ad9..674fe0e890dc 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
> @@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
>   				 int nid, pgprot_t prot);
>   int hash__remove_section_mapping(unsigned long start, unsigned long end);
>   
> +void hash__kernel_map_pages(struct page *page, int numpages, int enable);
> +
>   #endif /* !__ASSEMBLY__ */
>   #endif /* __KERNEL__ */
>   #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 5d34a8646f08..265661ded238 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1101,6 +1101,17 @@ static inline void vmemmap_remove_mapping(unsigned long start,
>   }
>   #endif
>   
> +#ifdef CONFIG_DEBUG_PAGEALLOC
> +static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
> +{
> +	if (radix_enabled()) {
> +		radix__kernel_map_pages(page, numpages, enable);
> +		return;
> +	}
> +	hash__kernel_map_pages(page, numpages, enable);

I'd have prefered something like below

	if (radix_enabled())
		radix__kernel_map_pages(page, numpages, enable);
	else
		hash__kernel_map_pages(page, numpages, enable);


But regardless,

Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>



> +}
> +#endif
> +
>   static inline pte_t pmd_pte(pmd_t pmd)
>   {
>   	return __pte_raw(pmd_raw(pmd));
> diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
> index 59cab558e2f0..d090d9612348 100644
> --- a/arch/powerpc/include/asm/book3s/64/radix.h
> +++ b/arch/powerpc/include/asm/book3s/64/radix.h
> @@ -316,5 +316,8 @@ int radix__create_section_mapping(unsigned long start, unsigned long end,
>   				  int nid, pgprot_t prot);
>   int radix__remove_section_mapping(unsigned long start, unsigned long end);
>   #endif /* CONFIG_MEMORY_HOTPLUG */
> +
> +void radix__kernel_map_pages(struct page *page, int numpages, int enable);
> +
>   #endif /* __ASSEMBLY__ */
>   #endif
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
> index c145776d3ae5..cfd45245d009 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -1988,7 +1988,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
>   				     mmu_kernel_ssize, 0);
>   }
>   
> -void __kernel_map_pages(struct page *page, int numpages, int enable)
> +void hash__kernel_map_pages(struct page *page, int numpages, int enable)
>   {
>   	unsigned long flags, vaddr, lmi;
>   	int i;
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index ae20add7954a..83b33418ad28 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -920,6 +920,13 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
>   #endif
>   #endif
>   
> +#ifdef CONFIG_DEBUG_PAGEALLOC
> +void radix__kernel_map_pages(struct page *page, int numpages, int enable)
> +{
> +        pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
> +}
> +#endif
> +
>   #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>   
>   unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC
  2021-10-14  8:45 ` Christophe Leroy
@ 2021-10-15 10:41   ` Michael Ellerman
  0 siblings, 0 replies; 4+ messages in thread
From: Michael Ellerman @ 2021-10-15 10:41 UTC (permalink / raw)
  To: Christophe Leroy, Joel Stanley, Jordan Niethe; +Cc: linuxppc-dev

Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> Le 13/10/2021 à 23:34, Joel Stanley a écrit :
>> The page_alloc.c code will call into __kernel_map_pages when
>> DEBUG_PAGEALLOC is configured and enabled.
>> 
>> As the implementation assumes hash, this should crash spectacularly if
>> not for a bit of luck in __kernel_map_pages. In this function
>> linear_map_hash_count is always zero, the for loop exits without doing
>> any damage.
>> 
>> There are no other platforms that determine if they support
>> debug_pagealloc at runtime. Instead of adding code to mm/page_alloc.c to
>> do that, this change turns the map/unmap into a noop when in radix
>> mode and prints a warning once.
>> 
>> Signed-off-by: Joel Stanley <joel@jms.id.au>
>> ---
>> v2: Put __kernel_map_pages in pgtable.h
>> 
>>   arch/powerpc/include/asm/book3s/64/hash.h    |  2 ++
>>   arch/powerpc/include/asm/book3s/64/pgtable.h | 11 +++++++++++
>>   arch/powerpc/include/asm/book3s/64/radix.h   |  3 +++
>>   arch/powerpc/mm/book3s64/hash_utils.c        |  2 +-
>>   arch/powerpc/mm/book3s64/radix_pgtable.c     |  7 +++++++
>>   5 files changed, 24 insertions(+), 1 deletion(-)
>> 
>> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
>> index d959b0195ad9..674fe0e890dc 100644
>> --- a/arch/powerpc/include/asm/book3s/64/hash.h
>> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
>> @@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
>>   				 int nid, pgprot_t prot);
>>   int hash__remove_section_mapping(unsigned long start, unsigned long end);
>>   
>> +void hash__kernel_map_pages(struct page *page, int numpages, int enable);
>> +
>>   #endif /* !__ASSEMBLY__ */
>>   #endif /* __KERNEL__ */
>>   #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
>> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> index 5d34a8646f08..265661ded238 100644
>> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
>> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> @@ -1101,6 +1101,17 @@ static inline void vmemmap_remove_mapping(unsigned long start,
>>   }
>>   #endif
>>   
>> +#ifdef CONFIG_DEBUG_PAGEALLOC
>> +static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
>> +{
>> +	if (radix_enabled()) {
>> +		radix__kernel_map_pages(page, numpages, enable);
>> +		return;
>> +	}
>> +	hash__kernel_map_pages(page, numpages, enable);
>
> I'd have prefered something like below
>
> 	if (radix_enabled())
> 		radix__kernel_map_pages(page, numpages, enable);
> 	else
> 		hash__kernel_map_pages(page, numpages, enable);

I did that when applying.

cheers

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC
  2021-10-13 21:34 [PATCH v2] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC Joel Stanley
  2021-10-14  8:45 ` Christophe Leroy
@ 2021-11-02 10:11 ` Michael Ellerman
  1 sibling, 0 replies; 4+ messages in thread
From: Michael Ellerman @ 2021-11-02 10:11 UTC (permalink / raw)
  To: Jordan Niethe, Joel Stanley, Christophe Leroy; +Cc: linuxppc-dev

On Thu, 14 Oct 2021 08:04:38 +1030, Joel Stanley wrote:
> The page_alloc.c code will call into __kernel_map_pages when
> DEBUG_PAGEALLOC is configured and enabled.
> 
> As the implementation assumes hash, this should crash spectacularly if
> not for a bit of luck in __kernel_map_pages. In this function
> linear_map_hash_count is always zero, the for loop exits without doing
> any damage.
> 
> [...]

Applied to powerpc/next.

[1/1] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC
      https://git.kernel.org/powerpc/c/4f703e7faa67a116016c4678fc88b507c12670c9

cheers

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-11-02 11:47 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-13 21:34 [PATCH v2] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC Joel Stanley
2021-10-14  8:45 ` Christophe Leroy
2021-10-15 10:41   ` Michael Ellerman
2021-11-02 10:11 ` Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.