All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/4] Add Sv57 page table support
@ 2022-01-27  2:48 panqinglin2020
  2022-01-27  2:48 ` [PATCH v2 1/4] riscv: mm: Control p4d's folding by pgtable_l5_enabled panqinglin2020
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: panqinglin2020 @ 2022-01-27  2:48 UTC (permalink / raw)
  To: paul.walmsley, palmer, aou, linux-riscv; +Cc: jeff, xuyinan, Qinglin Pan

From: Qinglin Pan <panqinglin2020@iscas.ac.cn>

This implements Sv57 support at runtime. The kernel will try to boot with
5-level page table firstly , and will fallback to 4-level if the HW does
not support it. And it will finally fallback to 3-level if the HW alse does
not support sv48.

Tested on:
 - qemu rv64
 - qemu rv64 with CONFIG_KASAN and CONFIG_KASAN_VMALLOC on.
 - qemu rv32
 - sifive unmatched with CONFIG_KASAN and CONFIG_KASAN_VMALLOC on.

Changes in v2:
  - Fix KASAN for sv57, thanks to Alex

Qinglin Pan (4):
  riscv: mm: Control p4d's folding by pgtable_l5_enabled
  riscv: mm: Prepare pt_ops helper functions for sv57
  riscv: mm: Set sv57 on defaultly
  riscv: mm: Support kasan for sv57

 arch/riscv/Kconfig                  |   4 +-
 arch/riscv/include/asm/csr.h        |   1 +
 arch/riscv/include/asm/fixmap.h     |   1 +
 arch/riscv/include/asm/page.h       |   1 +
 arch/riscv/include/asm/pgalloc.h    |  49 ++++++++
 arch/riscv/include/asm/pgtable-64.h | 106 +++++++++++++++++-
 arch/riscv/include/asm/pgtable.h    |   6 +-
 arch/riscv/kernel/cpu.c             |   4 +-
 arch/riscv/mm/init.c                | 166 ++++++++++++++++++++++++----
 arch/riscv/mm/kasan_init.c          | 155 +++++++++++++++++++++++---
 10 files changed, 451 insertions(+), 42 deletions(-)

-- 
2.34.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/4] riscv: mm: Control p4d's folding by pgtable_l5_enabled
  2022-01-27  2:48 [PATCH v2 0/4] Add Sv57 page table support panqinglin2020
@ 2022-01-27  2:48 ` panqinglin2020
  2022-01-27  2:48 ` [PATCH v2 2/4] riscv: mm: Prepare pt_ops helper functions for sv57 panqinglin2020
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: panqinglin2020 @ 2022-01-27  2:48 UTC (permalink / raw)
  To: paul.walmsley, palmer, aou, linux-riscv; +Cc: jeff, xuyinan, Qinglin Pan

From: Qinglin Pan <panqinglin2020@iscas.ac.cn>

To determine pgtable level at boot time, we can not use helper functions
in include/asm-generic/pgtable-nop4d.h and must implement these
functions. This patch uses pgtable_l5_enabled variable instead of
including pgtable-nop4d.h to controle p4d's folding, and implements
corresponding helper functions.

Signed-off-by: Qinglin Pan <panqinglin2020@iscas.ac.cn>

diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 11823004b87a..947f23d7b6af 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -59,6 +59,26 @@ static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
 	}
 }
 
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
+{
+	if (pgtable_l5_enabled) {
+		unsigned long pfn = virt_to_pfn(p4d);
+
+		set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+	}
+}
+
+static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
+				     p4d_t *p4d)
+{
+	if (pgtable_l5_enabled) {
+		unsigned long pfn = virt_to_pfn(p4d);
+
+		set_pgd_safe(pgd,
+			     __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+	}
+}
+
 #define pud_alloc_one pud_alloc_one
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
@@ -76,6 +96,35 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 }
 
 #define __pud_free_tlb(tlb, pud, addr)  pud_free((tlb)->mm, pud)
+
+#define p4d_alloc_one p4d_alloc_one
+static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	if (pgtable_l5_enabled) {
+		gfp_t gfp = GFP_PGTABLE_USER;
+
+		if (mm == &init_mm)
+			gfp = GFP_PGTABLE_KERNEL;
+		return (p4d_t *)get_zeroed_page(gfp);
+	}
+
+	return NULL;
+}
+
+static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+	BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
+	free_page((unsigned long)p4d);
+}
+
+#define p4d_free p4d_free
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+	if (pgtable_l5_enabled)
+		__p4d_free(mm, p4d);
+}
+
+#define __p4d_free_tlb(tlb, p4d, addr)  p4d_free((tlb)->mm, p4d)
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index bbbdd66e5e2f..7e246e9f8d70 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -9,16 +9,24 @@
 #include <linux/const.h>
 
 extern bool pgtable_l4_enabled;
+extern bool pgtable_l5_enabled;
 
 #define PGDIR_SHIFT_L3  30
 #define PGDIR_SHIFT_L4  39
+#define PGDIR_SHIFT_L5  48
 #define PGDIR_SIZE_L3   (_AC(1, UL) << PGDIR_SHIFT_L3)
 
-#define PGDIR_SHIFT     (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3)
+#define PGDIR_SHIFT     (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
+		(pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
 /* Size of region mapped by a page global directory */
 #define PGDIR_SIZE      (_AC(1, UL) << PGDIR_SHIFT)
 #define PGDIR_MASK      (~(PGDIR_SIZE - 1))
 
+/* p4d is folded into pgd in case of 4-level page table */
+#define P4D_SHIFT      39
+#define P4D_SIZE       (_AC(1, UL) << P4D_SHIFT)
+#define P4D_MASK       (~(P4D_SIZE - 1))
+
 /* pud is folded into pgd in case of 3-level page table */
 #define PUD_SHIFT      30
 #define PUD_SIZE       (_AC(1, UL) << PUD_SHIFT)
@@ -29,6 +37,15 @@ extern bool pgtable_l4_enabled;
 #define PMD_SIZE        (_AC(1, UL) << PMD_SHIFT)
 #define PMD_MASK        (~(PMD_SIZE - 1))
 
+/* Page 4th Directory entry */
+typedef struct {
+	unsigned long p4d;
+} p4d_t;
+
+#define p4d_val(x)	((x).p4d)
+#define __p4d(x)	((p4d_t) { (x) })
+#define PTRS_PER_P4D	(PAGE_SIZE / sizeof(p4d_t))
+
 /* Page Upper Directory entry */
 typedef struct {
 	unsigned long pud;
@@ -99,6 +116,15 @@ static inline struct page *pud_page(pud_t pud)
 	return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
 }
 
+#define mm_p4d_folded  mm_p4d_folded
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+	if (pgtable_l5_enabled)
+		return false;
+
+	return true;
+}
+
 #define mm_pud_folded  mm_pud_folded
 static inline bool mm_pud_folded(struct mm_struct *mm)
 {
@@ -128,6 +154,9 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
 #define pud_ERROR(e)   \
 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
 
+#define p4d_ERROR(e)   \
+	pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
+
 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
 {
 	if (pgtable_l4_enabled)
@@ -166,6 +195,16 @@ static inline void p4d_clear(p4d_t *p4d)
 		set_p4d(p4d, __p4d(0));
 }
 
+static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
+{
+	return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _p4d_pfn(p4d_t p4d)
+{
+	return p4d_val(p4d) >> _PAGE_PFN_SHIFT;
+}
+
 static inline pud_t *p4d_pgtable(p4d_t p4d)
 {
 	if (pgtable_l4_enabled)
@@ -173,6 +212,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
 
 	return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
 }
+#define p4d_page_vaddr(p4d)	((unsigned long)p4d_pgtable(p4d))
 
 static inline struct page *p4d_page(p4d_t p4d)
 {
@@ -190,4 +230,68 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
 	return (pud_t *)p4d;
 }
 
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+	if (pgtable_l5_enabled)
+		*pgdp = pgd;
+	else
+		set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+	if (pgtable_l5_enabled)
+		return (pgd_val(pgd) == 0);
+
+	return 0;
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+	if (pgtable_l5_enabled)
+		return (pgd_val(pgd) & _PAGE_PRESENT);
+
+	return 1;
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+	if (pgtable_l5_enabled)
+		return !pgd_present(pgd);
+
+	return 0;
+}
+
+static inline void pgd_clear(pgd_t *pgd)
+{
+	if (pgtable_l5_enabled)
+		set_pgd(pgd, __pgd(0));
+}
+
+static inline p4d_t *pgd_pgtable(pgd_t pgd)
+{
+	if (pgtable_l5_enabled)
+		return (p4d_t *)pfn_to_virt(pgd_val(pgd) >> _PAGE_PFN_SHIFT);
+
+	return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
+}
+#define pgd_page_vaddr(pgd)	((unsigned long)pgd_pgtable(pgd))
+
+static inline struct page *pgd_page(pgd_t pgd)
+{
+	return pfn_to_page(pgd_val(pgd) >> _PAGE_PFN_SHIFT);
+}
+#define pgd_page(pgd)	pgd_page(pgd)
+
+#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
+#define p4d_offset p4d_offset
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+	if (pgtable_l5_enabled)
+		return pgd_pgtable(*pgd) + p4d_index(address);
+
+	return (p4d_t *)pgd;
+}
+
 #endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7e949f25c933..58fece3133e9 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -62,7 +62,8 @@
  * position vmemmap directly below the VMALLOC region.
  */
 #ifdef CONFIG_64BIT
-#define VA_BITS		(pgtable_l4_enabled ? 48 : 39)
+#define VA_BITS		(pgtable_l5_enabled ? \
+				57 : (pgtable_l4_enabled ? 48 : 39))
 #else
 #define VA_BITS		32
 #endif
@@ -102,7 +103,6 @@
 
 #ifndef __ASSEMBLY__
 
-#include <asm-generic/pgtable-nop4d.h>
 #include <asm/page.h>
 #include <asm/tlbflush.h>
 #include <linux/mm_types.h>
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index cf4d018b7d66..9e4ed02e3d56 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -45,7 +45,9 @@ u64 satp_mode = SATP_MODE_32;
 EXPORT_SYMBOL(satp_mode);
 
 bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
+bool pgtable_l5_enabled = false;
 EXPORT_SYMBOL(pgtable_l4_enabled);
+EXPORT_SYMBOL(pgtable_l5_enabled);
 
 phys_addr_t phys_ram_base __ro_after_init;
 EXPORT_SYMBOL(phys_ram_base);
-- 
2.34.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/4] riscv: mm: Prepare pt_ops helper functions for sv57
  2022-01-27  2:48 [PATCH v2 0/4] Add Sv57 page table support panqinglin2020
  2022-01-27  2:48 ` [PATCH v2 1/4] riscv: mm: Control p4d's folding by pgtable_l5_enabled panqinglin2020
@ 2022-01-27  2:48 ` panqinglin2020
  2022-01-27  2:48 ` [PATCH v2 3/4] riscv: mm: Set sv57 on defaultly panqinglin2020
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: panqinglin2020 @ 2022-01-27  2:48 UTC (permalink / raw)
  To: paul.walmsley, palmer, aou, linux-riscv; +Cc: jeff, xuyinan, Qinglin Pan

From: Qinglin Pan <panqinglin2020@iscas.ac.cn>

This patch prepare some pt_ops helper functions which will be used in
creating sv57 mappings during boot time.

Signed-off-by: Qinglin Pan <panqinglin2020@iscas.ac.cn>

diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 58a718573ad6..3cfece8b6568 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -25,6 +25,7 @@ enum fixed_addresses {
 	FIX_PTE,
 	FIX_PMD,
 	FIX_PUD,
+	FIX_P4D,
 	FIX_TEXT_POKE1,
 	FIX_TEXT_POKE0,
 	FIX_EARLYCON_MEM_BASE,
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 58fece3133e9..351cabcd528e 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -133,6 +133,8 @@ struct pt_alloc_ops {
 	phys_addr_t (*alloc_pmd)(uintptr_t va);
 	pud_t *(*get_pud_virt)(phys_addr_t pa);
 	phys_addr_t (*alloc_pud)(uintptr_t va);
+	p4d_t *(*get_p4d_virt)(phys_addr_t pa);
+	phys_addr_t (*alloc_p4d)(uintptr_t va);
 #endif
 };
 
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 9e4ed02e3d56..425377c97c40 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -229,6 +229,7 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
 
 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+static p4d_t __maybe_unused early_dtb_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
 static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
 static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 
@@ -319,6 +320,16 @@ static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 #define early_pmd      ((pmd_t *)XIP_FIXUP(early_pmd))
 #endif /* CONFIG_XIP_KERNEL */
 
+static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss;
+static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss;
+static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
+
+#ifdef CONFIG_XIP_KERNEL
+#define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d))
+#define fixmap_p4d     ((p4d_t *)XIP_FIXUP(fixmap_p4d))
+#define early_p4d      ((p4d_t *)XIP_FIXUP(early_p4d))
+#endif /* CONFIG_XIP_KERNEL */
+
 static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss;
 static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss;
 static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
@@ -433,6 +444,44 @@ static phys_addr_t alloc_pud_late(uintptr_t va)
 	return __pa(vaddr);
 }
 
+static p4d_t *__init get_p4d_virt_early(phys_addr_t pa)
+{
+	return (p4d_t *)((uintptr_t)pa);
+}
+
+static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa)
+{
+	clear_fixmap(FIX_P4D);
+	return (p4d_t *)set_fixmap_offset(FIX_P4D, pa);
+}
+
+static p4d_t *__init get_p4d_virt_late(phys_addr_t pa)
+{
+	return (p4d_t *)__va(pa);
+}
+
+static phys_addr_t __init alloc_p4d_early(uintptr_t va)
+{
+	/* Only one P4D is available for early mapping */
+	BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
+
+	return (uintptr_t)early_p4d;
+}
+
+static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
+{
+	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+}
+
+static phys_addr_t alloc_p4d_late(uintptr_t va)
+{
+	unsigned long vaddr;
+
+	vaddr = __get_free_page(GFP_KERNEL);
+	BUG_ON(!vaddr);
+	return __pa(vaddr);
+}
+
 static void __init create_pud_mapping(pud_t *pudp,
 				      uintptr_t va, phys_addr_t pa,
 				      phys_addr_t sz, pgprot_t prot)
@@ -460,21 +509,55 @@ static void __init create_pud_mapping(pud_t *pudp,
 	create_pmd_mapping(nextp, va, pa, sz, prot);
 }
 
-#define pgd_next_t		pud_t
-#define alloc_pgd_next(__va)	(pgtable_l4_enabled ?			\
-		pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va))
-#define get_pgd_next_virt(__pa)	(pgtable_l4_enabled ?			\
-		pt_ops.get_pud_virt(__pa) : (pgd_next_t *)pt_ops.get_pmd_virt(__pa))
+static void __init create_p4d_mapping(p4d_t *p4dp,
+				      uintptr_t va, phys_addr_t pa,
+				      phys_addr_t sz, pgprot_t prot)
+{
+	pud_t *nextp;
+	phys_addr_t next_phys;
+	uintptr_t p4d_index = p4d_index(va);
+
+	if (sz == P4D_SIZE) {
+		if (p4d_val(p4dp[p4d_index]) == 0)
+			p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot);
+		return;
+	}
+
+	if (p4d_val(p4dp[p4d_index]) == 0) {
+		next_phys = pt_ops.alloc_pud(va);
+		p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE);
+		nextp = pt_ops.get_pud_virt(next_phys);
+		memset(nextp, 0, PAGE_SIZE);
+	} else {
+		next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index]));
+		nextp = pt_ops.get_pud_virt(next_phys);
+	}
+
+	create_pud_mapping(nextp, va, pa, sz, prot);
+}
+
+#define pgd_next_t		p4d_t
+#define alloc_pgd_next(__va)	(pgtable_l5_enabled ?			\
+		pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ?		\
+		pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va)))
+#define get_pgd_next_virt(__pa)	(pgtable_l5_enabled ?			\
+		pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ?	\
+		pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa)))
 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
+				(pgtable_l5_enabled ?			\
+		create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \
 				(pgtable_l4_enabled ?			\
-		create_pud_mapping(__nextp, __va, __pa, __sz, __prot) :	\
-		create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot))
-#define fixmap_pgd_next		(pgtable_l4_enabled ?			\
-		(uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd)
-#define trampoline_pgd_next	(pgtable_l4_enabled ?			\
-		(uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd)
-#define early_dtb_pgd_next	(pgtable_l4_enabled ?			\
-		(uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd)
+		create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) :	\
+		create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot)))
+#define fixmap_pgd_next		(pgtable_l5_enabled ?			\
+		(uintptr_t)fixmap_p4d : (pgtable_l4_enabled ?		\
+		(uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd))
+#define trampoline_pgd_next	(pgtable_l5_enabled ?			\
+		(uintptr_t)trampoline_p4d : (pgtable_l4_enabled ?	\
+		(uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd))
+#define early_dtb_pgd_next	(pgtable_l5_enabled ?			\
+		(uintptr_t)early_dtb_p4d : (pgtable_l4_enabled ?	\
+		(uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd))
 #else
 #define pgd_next_t		pte_t
 #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
@@ -483,6 +566,7 @@ static void __init create_pud_mapping(pud_t *pudp,
 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
 #define fixmap_pgd_next		((uintptr_t)fixmap_pte)
 #define early_dtb_pgd_next	((uintptr_t)early_dtb_pmd)
+#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot)
 #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot)
 #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
 #endif /* __PAGETABLE_PMD_FOLDED */
@@ -693,10 +777,13 @@ static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
 			   PGDIR_SIZE,
 			   IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
 
-	if (pgtable_l4_enabled) {
+	if (pgtable_l5_enabled)
+		create_p4d_mapping(early_dtb_p4d, DTB_EARLY_BASE_VA,
+				   (uintptr_t)early_dtb_pud, P4D_SIZE, PAGE_TABLE);
+
+	if (pgtable_l4_enabled)
 		create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA,
 				   (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE);
-	}
 
 	if (IS_ENABLED(CONFIG_64BIT)) {
 		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
@@ -732,6 +819,8 @@ void __init pt_ops_set_early(void)
 	pt_ops.get_pmd_virt = get_pmd_virt_early;
 	pt_ops.alloc_pud = alloc_pud_early;
 	pt_ops.get_pud_virt = get_pud_virt_early;
+	pt_ops.alloc_p4d = alloc_p4d_early;
+	pt_ops.get_p4d_virt = get_p4d_virt_early;
 #endif
 }
 
@@ -752,6 +841,8 @@ void __init pt_ops_set_fixmap(void)
 	pt_ops.get_pmd_virt = kernel_mapping_pa_to_va((uintptr_t)get_pmd_virt_fixmap);
 	pt_ops.alloc_pud = kernel_mapping_pa_to_va((uintptr_t)alloc_pud_fixmap);
 	pt_ops.get_pud_virt = kernel_mapping_pa_to_va((uintptr_t)get_pud_virt_fixmap);
+	pt_ops.alloc_p4d = kernel_mapping_pa_to_va((uintptr_t)alloc_p4d_fixmap);
+	pt_ops.get_p4d_virt = kernel_mapping_pa_to_va((uintptr_t)get_p4d_virt_fixmap);
 #endif
 }
 
@@ -768,6 +859,8 @@ void __init pt_ops_set_late(void)
 	pt_ops.get_pmd_virt = get_pmd_virt_late;
 	pt_ops.alloc_pud = alloc_pud_late;
 	pt_ops.get_pud_virt = get_pud_virt_late;
+	pt_ops.alloc_p4d = alloc_p4d_late;
+	pt_ops.get_p4d_virt = get_p4d_virt_late;
 #endif
 }
 
@@ -828,6 +921,10 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 			   fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
 
 #ifndef __PAGETABLE_PMD_FOLDED
+	/* Setup fixmap P4D and PUD */
+	if (pgtable_l5_enabled)
+		create_p4d_mapping(fixmap_p4d, FIXADDR_START,
+				   (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE);
 	/* Setup fixmap PUD and PMD */
 	if (pgtable_l4_enabled)
 		create_pud_mapping(fixmap_pud, FIXADDR_START,
@@ -837,6 +934,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 	/* Setup trampoline PGD and PMD */
 	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
 			   trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE);
+	if (pgtable_l5_enabled)
+		create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr,
+				   (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE);
 	if (pgtable_l4_enabled)
 		create_pud_mapping(trampoline_pud, kernel_map.virt_addr,
 				   (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE);
@@ -938,6 +1038,7 @@ static void __init setup_vm_final(void)
 	clear_fixmap(FIX_PTE);
 	clear_fixmap(FIX_PMD);
 	clear_fixmap(FIX_PUD);
+	clear_fixmap(FIX_P4D);
 
 	/* Move to swapper page table */
 	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode);
-- 
2.34.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 3/4] riscv: mm: Set sv57 on defaultly
  2022-01-27  2:48 [PATCH v2 0/4] Add Sv57 page table support panqinglin2020
  2022-01-27  2:48 ` [PATCH v2 1/4] riscv: mm: Control p4d's folding by pgtable_l5_enabled panqinglin2020
  2022-01-27  2:48 ` [PATCH v2 2/4] riscv: mm: Prepare pt_ops helper functions for sv57 panqinglin2020
@ 2022-01-27  2:48 ` panqinglin2020
  2022-01-27  2:48 ` [PATCH v2 4/4] riscv: mm: Support kasan for sv57 panqinglin2020
  2022-02-22 20:28 ` [PATCH v2 0/4] Add Sv57 page table support Palmer Dabbelt
  4 siblings, 0 replies; 6+ messages in thread
From: panqinglin2020 @ 2022-01-27  2:48 UTC (permalink / raw)
  To: paul.walmsley, palmer, aou, linux-riscv; +Cc: jeff, xuyinan, Qinglin Pan

From: Qinglin Pan <panqinglin2020@iscas.ac.cn>

This patch sets sv57 on defaultly if CONFIG_64BIT. And do fallback to try
to set sv48 on boot time if sv57 is not supported in current hardware.

Signed-off-by: Qinglin Pan <panqinglin2020@iscas.ac.cn>

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index fd60ab94260b..818c08a351bf 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -151,7 +151,7 @@ config PAGE_OFFSET
 	hex
 	default 0xC0000000 if 32BIT
 	default 0x80000000 if 64BIT && !MMU
-	default 0xffffaf8000000000 if 64BIT
+	default 0xff60000000000000 if 64BIT
 
 config KASAN_SHADOW_OFFSET
 	hex
@@ -202,7 +202,7 @@ config FIX_EARLYCON_MEM
 
 config PGTABLE_LEVELS
 	int
-	default 4 if 64BIT
+	default 5 if 64BIT
 	default 2
 
 config LOCKDEP_SUPPORT
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index ae711692eec9..299abdef0cd6 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -47,6 +47,7 @@
 #define SATP_PPN	_AC(0x00000FFFFFFFFFFF, UL)
 #define SATP_MODE_39	_AC(0x8000000000000000, UL)
 #define SATP_MODE_48	_AC(0x9000000000000000, UL)
+#define SATP_MODE_57	_AC(0xa000000000000000, UL)
 #define SATP_ASID_BITS	16
 #define SATP_ASID_SHIFT	44
 #define SATP_ASID_MASK	_AC(0xFFFF, UL)
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 160e3a1e8f8b..28ee1b05f3aa 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -41,6 +41,7 @@
  * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
  * define the PAGE_OFFSET value for SV39.
  */
+#define PAGE_OFFSET_L4		_AC(0xffffaf8000000000, UL)
 #define PAGE_OFFSET_L3		_AC(0xffffffd800000000, UL)
 #else
 #define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index ad0a7e9f828b..3e7e35d50e69 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -79,7 +79,9 @@ static void print_mmu(struct seq_file *f)
 #if defined(CONFIG_32BIT)
 	strncpy(sv_type, "sv32", 5);
 #elif defined(CONFIG_64BIT)
-	if (pgtable_l4_enabled)
+	if (pgtable_l5_enabled)
+		strncpy(sv_type, "sv57", 5);
+	else if (pgtable_l4_enabled)
 		strncpy(sv_type, "sv48", 5);
 	else
 		strncpy(sv_type, "sv39", 5);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 425377c97c40..df8ddde2e750 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -38,14 +38,14 @@ EXPORT_SYMBOL(kernel_map);
 #endif
 
 #ifdef CONFIG_64BIT
-u64 satp_mode = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_48 : SATP_MODE_39;
+u64 satp_mode = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39;
 #else
 u64 satp_mode = SATP_MODE_32;
 #endif
 EXPORT_SYMBOL(satp_mode);
 
 bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
-bool pgtable_l5_enabled = false;
+bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
 EXPORT_SYMBOL(pgtable_l4_enabled);
 EXPORT_SYMBOL(pgtable_l5_enabled);
 
@@ -659,6 +659,13 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
 #endif /* CONFIG_STRICT_KERNEL_RWX */
 
 #ifdef CONFIG_64BIT
+static void __init disable_pgtable_l5(void)
+{
+	pgtable_l5_enabled = false;
+	kernel_map.page_offset = PAGE_OFFSET_L4;
+	satp_mode = SATP_MODE_48;
+}
+
 static void __init disable_pgtable_l4(void)
 {
 	pgtable_l4_enabled = false;
@@ -675,12 +682,12 @@ static void __init disable_pgtable_l4(void)
 static __init void set_satp_mode(void)
 {
 	u64 identity_satp, hw_satp;
-	uintptr_t set_satp_mode_pmd;
+	uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
+	bool check_l4 = false;
 
-	set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
-	create_pgd_mapping(early_pg_dir,
-			   set_satp_mode_pmd, (uintptr_t)early_pud,
-			   PGDIR_SIZE, PAGE_TABLE);
+	create_p4d_mapping(early_p4d,
+			set_satp_mode_pmd, (uintptr_t)early_pud,
+			P4D_SIZE, PAGE_TABLE);
 	create_pud_mapping(early_pud,
 			   set_satp_mode_pmd, (uintptr_t)early_pmd,
 			   PUD_SIZE, PAGE_TABLE);
@@ -692,6 +699,11 @@ static __init void set_satp_mode(void)
 			   set_satp_mode_pmd + PMD_SIZE,
 			   set_satp_mode_pmd + PMD_SIZE,
 			   PMD_SIZE, PAGE_KERNEL_EXEC);
+retry:
+	create_pgd_mapping(early_pg_dir,
+			   set_satp_mode_pmd,
+			   check_l4 ? (uintptr_t)early_pud : (uintptr_t)early_p4d,
+			   PGDIR_SIZE, PAGE_TABLE);
 
 	identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode;
 
@@ -700,10 +712,17 @@ static __init void set_satp_mode(void)
 	hw_satp = csr_swap(CSR_SATP, 0ULL);
 	local_flush_tlb_all();
 
-	if (hw_satp != identity_satp)
+	if (hw_satp != identity_satp) {
+		if (!check_l4) {
+			disable_pgtable_l5();
+			check_l4 = true;
+			goto retry;
+		}
 		disable_pgtable_l4();
+	}
 
 	memset(early_pg_dir, 0, PAGE_SIZE);
+	memset(early_p4d, 0, PAGE_SIZE);
 	memset(early_pud, 0, PAGE_SIZE);
 	memset(early_pmd, 0, PAGE_SIZE);
 }
-- 
2.34.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 4/4] riscv: mm: Support kasan for sv57
  2022-01-27  2:48 [PATCH v2 0/4] Add Sv57 page table support panqinglin2020
                   ` (2 preceding siblings ...)
  2022-01-27  2:48 ` [PATCH v2 3/4] riscv: mm: Set sv57 on defaultly panqinglin2020
@ 2022-01-27  2:48 ` panqinglin2020
  2022-02-22 20:28 ` [PATCH v2 0/4] Add Sv57 page table support Palmer Dabbelt
  4 siblings, 0 replies; 6+ messages in thread
From: panqinglin2020 @ 2022-01-27  2:48 UTC (permalink / raw)
  To: paul.walmsley, palmer, aou, linux-riscv; +Cc: jeff, xuyinan, Qinglin Pan

From: Qinglin Pan <panqinglin2020@iscas.ac.cn>

This patchset add kasan_populate and kasan_shallow_populate for sv57,
and is tested on both qemu and unmatched with CONFIG_KASAN and
CONFIG_KASAN_VMALLOC on.

Signed-off-by: Qinglin Pan <panqinglin2020@iscas.ac.cn>

diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index f61f7ca6fe0f..3c99b24c9adf 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -111,6 +111,8 @@ static void __init kasan_populate_pud(pgd_t *pgd,
 		 * pt_ops facility.
 		 */
 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
+	} else if (pgd_none(*pgd)) {
+		base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
 	} else {
 		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
 		if (base_pud == lm_alias(kasan_early_shadow_pud))
@@ -149,13 +151,72 @@ static void __init kasan_populate_pud(pgd_t *pgd,
 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
 }
 
-#define kasan_early_shadow_pgd_next			(pgtable_l4_enabled ?	\
+static void __init kasan_populate_p4d(pgd_t *pgd,
+				      unsigned long vaddr, unsigned long end,
+				      bool early)
+{
+	phys_addr_t phys_addr;
+	p4d_t *p4dp, *base_p4d;
+	unsigned long next;
+
+	if (early) {
+		/*
+		 * We can't use pgd_page_vaddr here as it would return a linear
+		 * mapping address but it is not mapped yet, but when populating
+		 * early_pg_dir, we need the physical address and when populating
+		 * swapper_pg_dir, we need the kernel virtual address so use
+		 * pt_ops facility.
+		 */
+		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
+	} else {
+		base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+		if (base_p4d == lm_alias(kasan_early_shadow_p4d))
+			base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
+	}
+
+	p4dp = base_p4d + p4d_index(vaddr);
+
+	do {
+		next = p4d_addr_end(vaddr, end);
+
+		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
+			if (early) {
+				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
+				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
+				continue;
+			} else {
+				phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
+				if (phys_addr) {
+					set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
+					continue;
+				}
+			}
+		}
+
+		kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
+	} while (p4dp++, vaddr = next, vaddr != end);
+
+	/*
+	 * Wait for the whole P4D to be populated before setting the P4D in
+	 * the page table, otherwise, if we did set the P4D before populating
+	 * it entirely, memblock could allocate a page at a physical address
+	 * where KASAN is not populated yet and then we'd get a page fault.
+	 */
+	if (!early)
+		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
+}
+
+#define kasan_early_shadow_pgd_next			(pgtable_l5_enabled ?	\
+				(uintptr_t)kasan_early_shadow_p4d :		\
+							(pgtable_l4_enabled ?	\
 				(uintptr_t)kasan_early_shadow_pud :		\
-				(uintptr_t)kasan_early_shadow_pmd)
+				(uintptr_t)kasan_early_shadow_pmd))
 #define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
+		(pgtable_l5_enabled ?						\
+		kasan_populate_p4d(pgdp, vaddr, next, early) :			\
 		(pgtable_l4_enabled ?						\
 			kasan_populate_pud(pgdp, vaddr, next, early) :		\
-			kasan_populate_pmd((pud_t *)pgdp, vaddr, next))
+			kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
 
 static void __init kasan_populate_pgd(pgd_t *pgdp,
 				      unsigned long vaddr, unsigned long end,
@@ -219,6 +280,14 @@ asmlinkage void __init kasan_early_init(void)
 					PAGE_TABLE));
 	}
 
+	if (pgtable_l5_enabled) {
+		for (i = 0; i < PTRS_PER_P4D; ++i)
+			set_p4d(kasan_early_shadow_p4d + i,
+				pfn_p4d(PFN_DOWN
+					(__pa(((uintptr_t)kasan_early_shadow_pud))),
+					PAGE_TABLE));
+	}
+
 	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
 
@@ -244,9 +313,27 @@ static void __init kasan_populate(void *start, void *end)
 	memset(start, KASAN_SHADOW_INIT, end - start);
 }
 
+static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
+					      unsigned long vaddr, unsigned long end)
+{
+	unsigned long next;
+	pmd_t *pmdp, *base_pmd;
+	bool is_kasan_pte;
+
+	base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
+	pmdp = base_pmd + pmd_index(vaddr);
+
+	do {
+		next = pmd_addr_end(vaddr, end);
+		is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
+
+		if (is_kasan_pte)
+			pmd_clear(pmdp);
+	} while (pmdp++, vaddr = next, vaddr != end);
+}
+
 static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
-					      unsigned long vaddr, unsigned long end,
-					      bool kasan_populate)
+					      unsigned long vaddr, unsigned long end)
 {
 	unsigned long next;
 	pud_t *pudp, *base_pud;
@@ -256,21 +343,60 @@ static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
 	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
 	pudp = base_pud + pud_index(vaddr);
 
-	if (kasan_populate)
-		memcpy(base_pud, (void *)kasan_early_shadow_pgd_next,
-		       sizeof(pud_t) * PTRS_PER_PUD);
-
 	do {
 		next = pud_addr_end(vaddr, end);
 		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
 
-		if (is_kasan_pmd) {
-			base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-			set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
-		}
+		if (!is_kasan_pmd)
+			continue;
+
+		base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+		set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
+
+		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
+			continue;
+
+		memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
+		kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
 	} while (pudp++, vaddr = next, vaddr != end);
 }
 
+static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
+					      unsigned long vaddr, unsigned long end)
+{
+	unsigned long next;
+	p4d_t *p4dp, *base_p4d;
+	pud_t *base_pud;
+	bool is_kasan_pud;
+
+	base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
+	p4dp = base_p4d + p4d_index(vaddr);
+
+	do {
+		next = p4d_addr_end(vaddr, end);
+		is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
+
+		if (!is_kasan_pud)
+			continue;
+
+		base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+		set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
+
+		if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
+			continue;
+
+		memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
+		kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
+	} while (p4dp++, vaddr = next, vaddr != end);
+}
+
+#define kasan_shallow_populate_pgd_next(pgdp, vaddr, next)			\
+		(pgtable_l5_enabled ?						\
+		kasan_shallow_populate_p4d(pgdp, vaddr, next) :			\
+		(pgtable_l4_enabled ?						\
+		kasan_shallow_populate_pud(pgdp, vaddr, next) :			\
+		kasan_shallow_populate_pmd(pgdp, vaddr, next)))
+
 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
 {
 	unsigned long next;
@@ -291,7 +417,8 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
 			continue;
 
-		kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next);
+		memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
+		kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
 	} while (pgd_k++, vaddr = next, vaddr != end);
 }
 
-- 
2.34.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 0/4] Add Sv57 page table support
  2022-01-27  2:48 [PATCH v2 0/4] Add Sv57 page table support panqinglin2020
                   ` (3 preceding siblings ...)
  2022-01-27  2:48 ` [PATCH v2 4/4] riscv: mm: Support kasan for sv57 panqinglin2020
@ 2022-02-22 20:28 ` Palmer Dabbelt
  4 siblings, 0 replies; 6+ messages in thread
From: Palmer Dabbelt @ 2022-02-22 20:28 UTC (permalink / raw)
  To: panqinglin2020
  Cc: Paul Walmsley, aou, linux-riscv, jeff, xuyinan, panqinglin2020

On Wed, 26 Jan 2022 18:48:40 PST (-0800), panqinglin2020@iscas.ac.cn wrote:
> From: Qinglin Pan <panqinglin2020@iscas.ac.cn>
>
> This implements Sv57 support at runtime. The kernel will try to boot with
> 5-level page table firstly , and will fallback to 4-level if the HW does
> not support it. And it will finally fallback to 3-level if the HW alse does
> not support sv48.
>
> Tested on:
>  - qemu rv64
>  - qemu rv64 with CONFIG_KASAN and CONFIG_KASAN_VMALLOC on.
>  - qemu rv32
>  - sifive unmatched with CONFIG_KASAN and CONFIG_KASAN_VMALLOC on.
>
> Changes in v2:
>   - Fix KASAN for sv57, thanks to Alex
>
> Qinglin Pan (4):
>   riscv: mm: Control p4d's folding by pgtable_l5_enabled
>   riscv: mm: Prepare pt_ops helper functions for sv57
>   riscv: mm: Set sv57 on defaultly
>   riscv: mm: Support kasan for sv57
>
>  arch/riscv/Kconfig                  |   4 +-
>  arch/riscv/include/asm/csr.h        |   1 +
>  arch/riscv/include/asm/fixmap.h     |   1 +
>  arch/riscv/include/asm/page.h       |   1 +
>  arch/riscv/include/asm/pgalloc.h    |  49 ++++++++
>  arch/riscv/include/asm/pgtable-64.h | 106 +++++++++++++++++-
>  arch/riscv/include/asm/pgtable.h    |   6 +-
>  arch/riscv/kernel/cpu.c             |   4 +-
>  arch/riscv/mm/init.c                | 166 ++++++++++++++++++++++++----
>  arch/riscv/mm/kasan_init.c          | 155 +++++++++++++++++++++++---
>  10 files changed, 451 insertions(+), 42 deletions(-)

Thanks, this is on for-next.

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-02-22 20:29 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-27  2:48 [PATCH v2 0/4] Add Sv57 page table support panqinglin2020
2022-01-27  2:48 ` [PATCH v2 1/4] riscv: mm: Control p4d's folding by pgtable_l5_enabled panqinglin2020
2022-01-27  2:48 ` [PATCH v2 2/4] riscv: mm: Prepare pt_ops helper functions for sv57 panqinglin2020
2022-01-27  2:48 ` [PATCH v2 3/4] riscv: mm: Set sv57 on defaultly panqinglin2020
2022-01-27  2:48 ` [PATCH v2 4/4] riscv: mm: Support kasan for sv57 panqinglin2020
2022-02-22 20:28 ` [PATCH v2 0/4] Add Sv57 page table support Palmer Dabbelt

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.