* [PATCH] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC
@ 2021-10-12 1:13 Joel Stanley
2021-10-12 6:02 ` Christophe Leroy
0 siblings, 1 reply; 2+ messages in thread
From: Joel Stanley @ 2021-10-12 1:13 UTC (permalink / raw)
To: Jordan Niethe; +Cc: linuxppc-dev
The page_alloc.c code will call into __kernel_map_pages when
DEBUG_PAGEALLOC is configured and enabled.
As the implementation assumes hash, this should crash spectacularly if
not for a bit of luck in __kernel_map_pages. In this function
linear_map_hash_count is always zero, the for loop exits without doing
any damage.
There are no other platforms that determine if they support
debug_pagealloc at runtime. Instead of adding code to mm/page_alloc.c to
do that, this change turns the map/unmap into a noop when in radix
mode and prints a warning once.
Signed-off-by: Joel Stanley <joel@jms.id.au>
---
I noticed this when I was looking at adding kfence support a while back.
I've put that work aside and jpn has since gotten further than me, but I
think this is a fix worth considering.
arch/powerpc/include/asm/book3s/64/hash.h | 2 ++
arch/powerpc/mm/book3s64/hash_utils.c | 2 +-
arch/powerpc/mm/book3s64/pgtable.c | 12 ++++++++++++
3 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index d959b0195ad9..674fe0e890dc 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
int hash__remove_section_mapping(unsigned long start, unsigned long end);
+void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index c145776d3ae5..cfd45245d009 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1988,7 +1988,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 9e16c7b1a6c5..0aefc272cd03 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -526,3 +526,15 @@ static int __init pgtable_debugfs_setup(void)
return 0;
}
arch_initcall(pgtable_debugfs_setup);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (radix_enabled()) {
+ pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
+ return;
+ }
+
+ hash__kernel_map_pages(page, numpages, enable);
+}
+#endif
--
2.33.0
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC
2021-10-12 1:13 [PATCH] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC Joel Stanley
@ 2021-10-12 6:02 ` Christophe Leroy
0 siblings, 0 replies; 2+ messages in thread
From: Christophe Leroy @ 2021-10-12 6:02 UTC (permalink / raw)
To: Joel Stanley, Jordan Niethe; +Cc: linuxppc-dev
Le 12/10/2021 à 03:13, Joel Stanley a écrit :
> The page_alloc.c code will call into __kernel_map_pages when
> DEBUG_PAGEALLOC is configured and enabled.
>
> As the implementation assumes hash, this should crash spectacularly if
> not for a bit of luck in __kernel_map_pages. In this function
> linear_map_hash_count is always zero, the for loop exits without doing
> any damage.
>
> There are no other platforms that determine if they support
> debug_pagealloc at runtime. Instead of adding code to mm/page_alloc.c to
> do that, this change turns the map/unmap into a noop when in radix
> mode and prints a warning once.
>
> Signed-off-by: Joel Stanley <joel@jms.id.au>
> ---
> I noticed this when I was looking at adding kfence support a while back.
> I've put that work aside and jpn has since gotten further than me, but I
> think this is a fix worth considering.
>
> arch/powerpc/include/asm/book3s/64/hash.h | 2 ++
> arch/powerpc/mm/book3s64/hash_utils.c | 2 +-
> arch/powerpc/mm/book3s64/pgtable.c | 12 ++++++++++++
> 3 files changed, 15 insertions(+), 1 deletion(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
> index d959b0195ad9..674fe0e890dc 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
> @@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
> int nid, pgprot_t prot);
> int hash__remove_section_mapping(unsigned long start, unsigned long end);
>
> +void hash__kernel_map_pages(struct page *page, int numpages, int enable);
> +
> #endif /* !__ASSEMBLY__ */
> #endif /* __KERNEL__ */
> #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
> index c145776d3ae5..cfd45245d009 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -1988,7 +1988,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
> mmu_kernel_ssize, 0);
> }
>
> -void __kernel_map_pages(struct page *page, int numpages, int enable)
> +void hash__kernel_map_pages(struct page *page, int numpages, int enable)
> {
> unsigned long flags, vaddr, lmi;
> int i;
> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> index 9e16c7b1a6c5..0aefc272cd03 100644
> --- a/arch/powerpc/mm/book3s64/pgtable.c
> +++ b/arch/powerpc/mm/book3s64/pgtable.c
> @@ -526,3 +526,15 @@ static int __init pgtable_debugfs_setup(void)
> return 0;
> }
> arch_initcall(pgtable_debugfs_setup);
> +
> +#ifdef CONFIG_DEBUG_PAGEALLOC
> +void __kernel_map_pages(struct page *page, int numpages, int enable)
> +{
> + if (radix_enabled()) {
> + pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
> + return;
> + }
> +
> + hash__kernel_map_pages(page, numpages, enable);
> +}
I think it would be better to do similar to most other functions (like
map_kernel_page() for instance):
In arch/powerpc/include/asm/book3s/64/pgtable.h do:
static inline void __kernel_map_pages(struct page *page, int numpages,
int enable)
{
if (radix_enabled())
radix__kernel_map_pages(...);
else
hash__kernel_map_pages(...);
}
Then in arch/powerpc/include/asm/book3s/64/radix.h do (or in
/arch/powerpc/mm/book3s64/radix_pgtable.c in you prefer ?):
static inline void radix__kernel_map_pages(struct page *page, int
numpages, int enable)
{
pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
}
> +#endif
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-10-12 6:02 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-12 1:13 [PATCH] powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC Joel Stanley
2021-10-12 6:02 ` Christophe Leroy
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.