linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] arm64/mm: Add pud_sect_supported()
@ 2021-09-15  3:44 Anshuman Khandual
  2021-09-16 16:16 ` Catalin Marinas
  0 siblings, 1 reply; 3+ messages in thread
From: Anshuman Khandual @ 2021-09-15  3:44 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: mark.rutland, suzuki.poulose, Anshuman Khandual, Catalin Marinas,
	Will Deacon, linux-kernel

Section mapping at PUD level is supported only on 4K pages and currently it
gets verified with explicit #ifdef or IS_ENABLED() constructs. This adds a
new helper pud_sect_supported() for this purpose, which particularly cleans
up the HugeTLB code path. It updates relevant switch statements with checks
for __PAGETABLE_PUD_FOLDED in order to avoid build failures caused with two
identical switch case values in those code blocks.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
This applies on v5.15-rc1

 arch/arm64/include/asm/pgtable.h |  5 +++++
 arch/arm64/include/asm/vmalloc.h |  4 ++--
 arch/arm64/mm/hugetlbpage.c      | 26 +++++++++++++++-----------
 3 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index dfa76afa0ccf..84fbb52b4224 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1022,6 +1022,11 @@ static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
 	return PAGE_READONLY_EXEC;
 }
 
+static inline bool pud_sect_supported(void)
+{
+	return PAGE_SIZE == SZ_4K;
+}
+
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
index 7a22aeea9bb5..b9185503feae 100644
--- a/arch/arm64/include/asm/vmalloc.h
+++ b/arch/arm64/include/asm/vmalloc.h
@@ -2,6 +2,7 @@
 #define _ASM_ARM64_VMALLOC_H
 
 #include <asm/page.h>
+#include <asm/pgtable.h>
 
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 
@@ -9,10 +10,9 @@
 static inline bool arch_vmap_pud_supported(pgprot_t prot)
 {
 	/*
-	 * Only 4k granule supports level 1 block mappings.
 	 * SW table walks can't handle removal of intermediate entries.
 	 */
-	return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
+	return pud_sect_supported() &&
 	       !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 23505fc35324..641854f0e8ee 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -40,11 +40,10 @@ void __init arm64_hugetlb_cma_reserve(void)
 {
 	int order;
 
-#ifdef CONFIG_ARM64_4K_PAGES
-	order = PUD_SHIFT - PAGE_SHIFT;
-#else
-	order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
-#endif
+	if (pud_sect_supported())
+		order = PUD_SHIFT - PAGE_SHIFT;
+	else
+		order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
 	/*
 	 * HugeTLB CMA reservation is required for gigantic
 	 * huge pages which could not be allocated via the
@@ -62,8 +61,9 @@ bool arch_hugetlb_migration_supported(struct hstate *h)
 	size_t pagesize = huge_page_size(h);
 
 	switch (pagesize) {
-#ifdef CONFIG_ARM64_4K_PAGES
+#ifndef __PAGETABLE_PUD_FOLDED
 	case PUD_SIZE:
+		return pud_sect_supported();
 #endif
 	case PMD_SIZE:
 	case CONT_PMD_SIZE:
@@ -126,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
 	*pgsize = size;
 
 	switch (size) {
-#ifdef CONFIG_ARM64_4K_PAGES
+#ifndef __PAGETABLE_PUD_FOLDED
 	case PUD_SIZE:
+		if (pud_sect_supported())
+			contig_ptes = 1;
+		break;
 #endif
 	case PMD_SIZE:
 		contig_ptes = 1;
@@ -489,9 +492,9 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
 
 static int __init hugetlbpage_init(void)
 {
-#ifdef CONFIG_ARM64_4K_PAGES
-	hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
-#endif
+	if (pud_sect_supported())
+		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+
 	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
 	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
 	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
@@ -503,8 +506,9 @@ arch_initcall(hugetlbpage_init);
 bool __init arch_hugetlb_valid_size(unsigned long size)
 {
 	switch (size) {
-#ifdef CONFIG_ARM64_4K_PAGES
+#ifndef __PAGETABLE_PUD_FOLDED
 	case PUD_SIZE:
+		return pud_sect_supported();
 #endif
 	case CONT_PMD_SIZE:
 	case PMD_SIZE:
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] arm64/mm: Add pud_sect_supported()
  2021-09-15  3:44 [PATCH] arm64/mm: Add pud_sect_supported() Anshuman Khandual
@ 2021-09-16 16:16 ` Catalin Marinas
  2021-09-17  5:12   ` Anshuman Khandual
  0 siblings, 1 reply; 3+ messages in thread
From: Catalin Marinas @ 2021-09-16 16:16 UTC (permalink / raw)
  To: Anshuman Khandual
  Cc: linux-arm-kernel, mark.rutland, suzuki.poulose, Will Deacon,
	linux-kernel

On Wed, Sep 15, 2021 at 09:14:19AM +0530, Anshuman Khandual wrote:
> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
> index 23505fc35324..641854f0e8ee 100644
> --- a/arch/arm64/mm/hugetlbpage.c
> +++ b/arch/arm64/mm/hugetlbpage.c
> @@ -40,11 +40,10 @@ void __init arm64_hugetlb_cma_reserve(void)
>  {
>  	int order;
>  
> -#ifdef CONFIG_ARM64_4K_PAGES
> -	order = PUD_SHIFT - PAGE_SHIFT;
> -#else
> -	order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
> -#endif
> +	if (pud_sect_supported())
> +		order = PUD_SHIFT - PAGE_SHIFT;
> +	else
> +		order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
>  	/*
>  	 * HugeTLB CMA reservation is required for gigantic
>  	 * huge pages which could not be allocated via the
> @@ -62,8 +61,9 @@ bool arch_hugetlb_migration_supported(struct hstate *h)
>  	size_t pagesize = huge_page_size(h);
>  
>  	switch (pagesize) {
> -#ifdef CONFIG_ARM64_4K_PAGES
> +#ifndef __PAGETABLE_PUD_FOLDED
>  	case PUD_SIZE:
> +		return pud_sect_supported();
>  #endif
>  	case PMD_SIZE:
>  	case CONT_PMD_SIZE:

Is this the same thing? With 4K pages and 3-levels (39-bit VA), the PUD
is folded but we do have a valid PUD_SIZE == PGDIR_SIZE and different
from PMD_SIZE. Do we disallow section mappings at the top level in this
case? If not, we should have check for __PAGETABLE_PMD_FOLDED instead.

> @@ -126,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
>  	*pgsize = size;
>  
>  	switch (size) {
> -#ifdef CONFIG_ARM64_4K_PAGES
> +#ifndef __PAGETABLE_PUD_FOLDED
>  	case PUD_SIZE:
> +		if (pud_sect_supported())
> +			contig_ptes = 1;
> +		break;
>  #endif
>  	case PMD_SIZE:
>  		contig_ptes = 1;

Same here.

> @@ -489,9 +492,9 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
>  
>  static int __init hugetlbpage_init(void)
>  {
> -#ifdef CONFIG_ARM64_4K_PAGES
> -	hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
> -#endif
> +	if (pud_sect_supported())
> +		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
> +
>  	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
>  	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
>  	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
> @@ -503,8 +506,9 @@ arch_initcall(hugetlbpage_init);
>  bool __init arch_hugetlb_valid_size(unsigned long size)
>  {
>  	switch (size) {
> -#ifdef CONFIG_ARM64_4K_PAGES
> +#ifndef __PAGETABLE_PUD_FOLDED
>  	case PUD_SIZE:
> +		return pud_sect_supported();
>  #endif
>  	case CONT_PMD_SIZE:
>  	case PMD_SIZE:

And here.

-- 
Catalin

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] arm64/mm: Add pud_sect_supported()
  2021-09-16 16:16 ` Catalin Marinas
@ 2021-09-17  5:12   ` Anshuman Khandual
  0 siblings, 0 replies; 3+ messages in thread
From: Anshuman Khandual @ 2021-09-17  5:12 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: linux-arm-kernel, mark.rutland, suzuki.poulose, Will Deacon,
	linux-kernel



On 9/16/21 9:46 PM, Catalin Marinas wrote:
> On Wed, Sep 15, 2021 at 09:14:19AM +0530, Anshuman Khandual wrote:
>> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
>> index 23505fc35324..641854f0e8ee 100644
>> --- a/arch/arm64/mm/hugetlbpage.c
>> +++ b/arch/arm64/mm/hugetlbpage.c
>> @@ -40,11 +40,10 @@ void __init arm64_hugetlb_cma_reserve(void)
>>  {
>>  	int order;
>>  
>> -#ifdef CONFIG_ARM64_4K_PAGES
>> -	order = PUD_SHIFT - PAGE_SHIFT;
>> -#else
>> -	order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
>> -#endif
>> +	if (pud_sect_supported())
>> +		order = PUD_SHIFT - PAGE_SHIFT;
>> +	else
>> +		order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
>>  	/*
>>  	 * HugeTLB CMA reservation is required for gigantic
>>  	 * huge pages which could not be allocated via the
>> @@ -62,8 +61,9 @@ bool arch_hugetlb_migration_supported(struct hstate *h)
>>  	size_t pagesize = huge_page_size(h);
>>  
>>  	switch (pagesize) {
>> -#ifdef CONFIG_ARM64_4K_PAGES
>> +#ifndef __PAGETABLE_PUD_FOLDED
>>  	case PUD_SIZE:
>> +		return pud_sect_supported();
>>  #endif
>>  	case PMD_SIZE:
>>  	case CONT_PMD_SIZE:
> 
> Is this the same thing? With 4K pages and 3-levels (39-bit VA), the PUD

No. These huge page sizes are different at each level for the above config.
Seems like the SOFT_OFFLINE based HugeTLB migration test, which I normally
run for all HugeTLB related changes some how missed this particular config
(4K|39V|48PA) where it mattered.

> is folded but we do have a valid PUD_SIZE == PGDIR_SIZE and different
> from PMD_SIZE. Do we disallow section mappings at the top level in this
> case? If not, we should have check for __PAGETABLE_PMD_FOLDED instead.

Right, should dont disallow such PUD_SIZE which is different from PMD_SIZE.
Will replace the check with __PAGETABLE_PMD_FOLDED.

> 
>> @@ -126,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
>>  	*pgsize = size;
>>  
>>  	switch (size) {
>> -#ifdef CONFIG_ARM64_4K_PAGES
>> +#ifndef __PAGETABLE_PUD_FOLDED
>>  	case PUD_SIZE:
>> +		if (pud_sect_supported())
>> +			contig_ptes = 1;
>> +		break;
>>  #endif
>>  	case PMD_SIZE:
>>  		contig_ptes = 1;
> 
> Same here.

Sure, will change.

> 
>> @@ -489,9 +492,9 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
>>  
>>  static int __init hugetlbpage_init(void)
>>  {
>> -#ifdef CONFIG_ARM64_4K_PAGES
>> -	hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
>> -#endif
>> +	if (pud_sect_supported())
>> +		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
>> +
>>  	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
>>  	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
>>  	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
>> @@ -503,8 +506,9 @@ arch_initcall(hugetlbpage_init);
>>  bool __init arch_hugetlb_valid_size(unsigned long size)
>>  {
>>  	switch (size) {
>> -#ifdef CONFIG_ARM64_4K_PAGES
>> +#ifndef __PAGETABLE_PUD_FOLDED
>>  	case PUD_SIZE:
>> +		return pud_sect_supported();
>>  #endif
>>  	case CONT_PMD_SIZE:
>>  	case PMD_SIZE:
> 
> And here.

Sure, will change.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-09-17  5:11 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-15  3:44 [PATCH] arm64/mm: Add pud_sect_supported() Anshuman Khandual
2021-09-16 16:16 ` Catalin Marinas
2021-09-17  5:12   ` Anshuman Khandual

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).