linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] Move {tramp_pg_dir,swapper_pg_dir} to .rodata
@ 2018-06-25 11:39 Jun Yao
  2018-06-25 11:39 ` [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir Jun Yao
  2018-06-25 11:39 ` [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section Jun Yao
  0 siblings, 2 replies; 9+ messages in thread
From: Jun Yao @ 2018-06-25 11:39 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: catalin.marinas, will.deacon, ard.biesheuvel, james.morse,
	linux-kernel, kernel-hardening

Version 2 changes:

	As James Morse suggested[1], we introduce init_pg_dir, which
	is a reserved area of __initdata section. We setup initial
	page tables in it and then we setup final page tables in
	swapper_pg_dir directly. In this way, we can avoid using
	temporary top level.

	To defend 'KSMA', we need to handle page table configurations
	which we can write a block-mapping into swapper_pg_dir. These
	configurations are:

	CONFIG_ARM64_VA_BITS_39(4KB granule, 1GB block)
	CONFIG_ARM64_VA_BITS_36(16KB granule, 32MB block)
	CONFIG_ARM64_VA_BITS_42(64KB granule, 512MB block)

	If these configurations are selected, we move {tramp_pg_dir,
	swapper_pg_dir} to .rodata section. And we update
	swapper_pg_dir by fixmap.

[1] https://patchwork.kernel.org/patch/10476597/

Jun Yao (2):
  arm64/mm: Introduce init_pg_dir
  arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section

 arch/arm64/include/asm/fixmap.h   |  2 +-
 arch/arm64/include/asm/pgalloc.h  | 33 ++++++++++++++++++++
 arch/arm64/include/asm/pgtable.h  |  8 +++--
 arch/arm64/kernel/head.S          | 52 ++++++++++++++++++++++---------
 arch/arm64/kernel/vmlinux.lds.S   | 26 +++++++++++++++-
 arch/arm64/mm/mmu.c               | 36 +++++++--------------
 include/asm-generic/vmlinux.lds.h |  5 +++
 mm/init-mm.c                      |  2 +-
 8 files changed, 119 insertions(+), 45 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir
  2018-06-25 11:39 [PATCH v2 0/2] Move {tramp_pg_dir,swapper_pg_dir} to .rodata Jun Yao
@ 2018-06-25 11:39 ` Jun Yao
  2018-06-25 13:55   ` kbuild test robot
                     ` (2 more replies)
  2018-06-25 11:39 ` [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section Jun Yao
  1 sibling, 3 replies; 9+ messages in thread
From: Jun Yao @ 2018-06-25 11:39 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: catalin.marinas, will.deacon, ard.biesheuvel, james.morse,
	linux-kernel, kernel-hardening

We setup initial page tables in init_pg_dir, which is a reserved
area of the __initdata section. And in paging_init(), we no
longer need a temporary top-level and we can setup final page
tables in swapper_pg_dir directly.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/include/asm/fixmap.h   |  1 -
 arch/arm64/include/asm/pgtable.h  |  5 ++--
 arch/arm64/kernel/head.S          | 46 +++++++++++++++++++++++--------
 arch/arm64/kernel/vmlinux.lds.S   |  3 +-
 arch/arm64/mm/mmu.c               | 30 ++++----------------
 include/asm-generic/vmlinux.lds.h |  5 ++++
 mm/init-mm.c                      |  2 +-
 7 files changed, 49 insertions(+), 43 deletions(-)

diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index ec1e6d6fa14c..62908eeedcdc 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -83,7 +83,6 @@ enum fixed_addresses {
 	FIX_PTE,
 	FIX_PMD,
 	FIX_PUD,
-	FIX_PGD,
 
 	__end_of_fixed_addresses
 };
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7c4c8f318ba9..b2435e8b975b 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -592,9 +592,6 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 
-#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
-#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
-
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -718,6 +715,8 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 }
 #endif
 
+extern pgd_t init_pg_dir[PTRS_PER_PGD];
+extern pgd_t init_pg_end[];
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t swapper_pg_end[];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b0853069702f..9677deb7b6c7 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -276,6 +276,15 @@ ENDPROC(preserve_boot_args)
 	populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
 	.endm
 
+	.macro clear_pages, start, size
+1:	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	subs	\size, \size, #64
+	b.ne	1b
+	.endm
+
 /*
  * Setup the initial page tables. We only setup the barest amount which is
  * required to get the kernel running. The following sections are required:
@@ -287,7 +296,7 @@ __create_page_tables:
 	mov	x28, lr
 
 	/*
-	 * Invalidate the idmap and swapper page tables to avoid potential
+	 * Invalidate the idmap and init page tables to avoid potential
 	 * dirty cache lines being evicted.
 	 */
 	adrp	x0, idmap_pg_dir
@@ -295,18 +304,23 @@ __create_page_tables:
 	sub	x1, x1, x0
 	bl	__inval_dcache_area
 
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	bl	__inval_dcache_area
+
 	/*
-	 * Clear the idmap and swapper page tables.
+	 * Clear the idmap and init page tables.
 	 */
 	adrp	x0, idmap_pg_dir
 	adrp	x1, swapper_pg_end
 	sub	x1, x1, x0
-1:	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	subs	x1, x1, #64
-	b.ne	1b
+	clear_pages x0, x1
+
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	clear_pages x0, x1
 
 	mov	x7, SWAPPER_MM_MMUFLAGS
 
@@ -373,7 +387,7 @@ __create_page_tables:
 	/*
 	 * Map the kernel image (starting with PHYS_OFFSET).
 	 */
-	adrp	x0, swapper_pg_dir
+	adrp	x0, init_pg_dir
 	mov_q	x5, KIMAGE_VADDR + TEXT_OFFSET	// compile time __va(_text)
 	add	x5, x5, x23			// add KASLR displacement
 	mov	x4, PTRS_PER_PGD
@@ -386,7 +400,7 @@ __create_page_tables:
 
 	/*
 	 * Since the page tables have been populated with non-cacheable
-	 * accesses (MMU disabled), invalidate the idmap and swapper page
+	 * accesses (MMU disabled), invalidate the idmap and init page
 	 * tables again to remove any speculatively loaded cache lines.
 	 */
 	adrp	x0, idmap_pg_dir
@@ -395,6 +409,12 @@ __create_page_tables:
 	dmb	sy
 	bl	__inval_dcache_area
 
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	dmb	sy
+	bl	__inval_dcache_area
+
 	ret	x28
 ENDPROC(__create_page_tables)
 	.ltorg
@@ -706,6 +726,7 @@ secondary_startup:
 	 * Common entry point for secondary CPUs.
 	 */
 	bl	__cpu_setup			// initialise processor
+	adr_l   x26, swapper_pg_dir
 	bl	__enable_mmu
 	ldr	x8, =__secondary_switched
 	br	x8
@@ -748,6 +769,7 @@ ENDPROC(__secondary_switched)
  * Enable the MMU.
  *
  *  x0  = SCTLR_EL1 value for turning on the MMU.
+ *  x26 = TTBR1_EL1 value for turning on the MMU.
  *
  * Returns to the caller via x30/lr. This requires the caller to be covered
  * by the .idmap.text section.
@@ -762,7 +784,7 @@ ENTRY(__enable_mmu)
 	b.ne	__no_granule_support
 	update_early_cpu_boot_status 0, x1, x2
 	adrp	x1, idmap_pg_dir
-	adrp	x2, swapper_pg_dir
+	mov	x2, x26
 	phys_to_ttbr x3, x1
 	phys_to_ttbr x4, x2
 	msr	ttbr0_el1, x3			// load TTBR0
@@ -822,7 +844,7 @@ __primary_switch:
 	mov	x19, x0				// preserve new SCTLR_EL1 value
 	mrs	x20, sctlr_el1			// preserve old SCTLR_EL1 value
 #endif
-
+	adrp	x26, init_pg_dir
 	bl	__enable_mmu
 #ifdef CONFIG_RELOCATABLE
 	bl	__relocate_kernel
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 605d1b60469c..b0e4255fcba4 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -168,6 +168,7 @@ SECTIONS
 		CON_INITCALL
 		SECURITY_INITCALL
 		INIT_RAM_FS
+		INIT_DIR
 		*(.init.rodata.* .init.bss)	/* from the EFI stub */
 	}
 	.exit.data : {
@@ -229,7 +230,7 @@ SECTIONS
 	. += RESERVED_TTBR0_SIZE;
 #endif
 	swapper_pg_dir = .;
-	. += SWAPPER_DIR_SIZE;
+	. += PAGE_SIZE;
 	swapper_pg_end = .;
 
 	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 2dbb2c9f1ec1..a3b5f1dffb84 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -628,34 +628,14 @@ static void __init map_kernel(pgd_t *pgdp)
  */
 void __init paging_init(void)
 {
-	phys_addr_t pgd_phys = early_pgtable_alloc();
-	pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
-
-	map_kernel(pgdp);
-	map_mem(pgdp);
-
 	/*
-	 * We want to reuse the original swapper_pg_dir so we don't have to
-	 * communicate the new address to non-coherent secondaries in
-	 * secondary_entry, and so cpu_switch_mm can generate the address with
-	 * adrp+add rather than a load from some global variable.
-	 *
-	 * To do this we need to go via a temporary pgd.
+	 * Setup final page tables in swapper_pg_dir.
 	 */
-	cpu_replace_ttbr1(__va(pgd_phys));
-	memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
-	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+	map_kernel(swapper_pg_dir);
+	map_mem(swapper_pg_dir);
 
-	pgd_clear_fixmap();
-	memblock_free(pgd_phys, PAGE_SIZE);
-
-	/*
-	 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
-	 * allocated with it.
-	 */
-	memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
-		      __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
-		      - PAGE_SIZE);
+	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+	init_mm.pgd = swapper_pg_dir;
 }
 
 /*
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index af240573e482..a11e7117da4d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -230,6 +230,11 @@
 	KEEP(*(.dtb.init.rodata))					\
 	VMLINUX_SYMBOL(__dtb_end) = .;
 
+#define INIT_DIR							\
+	. = ALIGN(PAGE_SIZE);						\
+	init_pg_dir = .;						\
+	. += SWAPPER_DIR_SIZE;						\
+	init_pg_end = .;
 /*
  * .data section
  */
diff --git a/mm/init-mm.c b/mm/init-mm.c
index f94d5d15ebc0..08a0eed00667 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -17,7 +17,7 @@
 
 struct mm_struct init_mm = {
 	.mm_rb		= RB_ROOT,
-	.pgd		= swapper_pg_dir,
+	.pgd		= init_pg_dir,
 	.mm_users	= ATOMIC_INIT(2),
 	.mm_count	= ATOMIC_INIT(1),
 	.mmap_sem	= __RWSEM_INITIALIZER(init_mm.mmap_sem),
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section
  2018-06-25 11:39 [PATCH v2 0/2] Move {tramp_pg_dir,swapper_pg_dir} to .rodata Jun Yao
  2018-06-25 11:39 ` [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir Jun Yao
@ 2018-06-25 11:39 ` Jun Yao
  2018-06-26 11:21   ` James Morse
  2018-06-27 11:07   ` Suzuki K Poulose
  1 sibling, 2 replies; 9+ messages in thread
From: Jun Yao @ 2018-06-25 11:39 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: catalin.marinas, will.deacon, ard.biesheuvel, james.morse,
	linux-kernel, kernel-hardening

When CONFIG_ARM64_VA_BITS_36/CONFIG_ARM64_VA_BITS_39/
CONFIG_ARM64_VA_BITS_42 are selected, a block-mapping can be
written to swapper_pg_dir. To defend 'KSMA', we move swapper_pg_dir
to .rodata section when these configurations are selected. At the
same time, we update swapper_pg_dir by fixmap.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/include/asm/fixmap.h  |  1 +
 arch/arm64/include/asm/pgalloc.h | 33 ++++++++++++++++++++++++++++++++
 arch/arm64/include/asm/pgtable.h |  5 +++++
 arch/arm64/kernel/head.S         |  6 +++---
 arch/arm64/kernel/vmlinux.lds.S  | 23 ++++++++++++++++++++++
 arch/arm64/mm/mmu.c              |  6 ++++++
 6 files changed, 71 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 62908eeedcdc..881784b43965 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -83,6 +83,7 @@ enum fixed_addresses {
 	FIX_PTE,
 	FIX_PMD,
 	FIX_PUD,
+	FIX_SWAPPER,
 
 	__end_of_fixed_addresses
 };
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 2e05bcd944c8..62512ad9c310 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -49,6 +49,22 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
 
 static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
 {
+#ifdef CONFIG_ARM64_VA_BITS_39
+	if (mm == &init_mm) {
+		pud_t *pud;
+		unsigned long offset;
+		extern spinlock_t pgdir_lock;
+
+		spin_lock(&pgdir_lock);
+		pud = (pud_t *)swapper_set_fixmap();
+		offset = (unsigned long)pudp - (unsigned long)swapper_pg_dir;
+		pud = (pud_t *)((unsigned long)pud + offset);
+		__pud_populate(pud, __pa(pmdp), PMD_TYPE_TABLE);
+		swapper_clear_fixmap();
+		spin_unlock(&pgdir_lock);
+		return;
+	}
+#endif
 	__pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
 }
 #else
@@ -142,6 +158,23 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
 	/*
 	 * The pmd must be loaded with the physical address of the PTE table
 	 */
+#if defined(CONFIG_ARM64_VA_BITS_42) || \
+	defined(CONFIG_ARM64_VA_BITS_36)
+	if (mm == &init_mm) {
+		pmd_t *pmd;
+		unsigned long offset;
+		extern spinlock_t pgdir_lock;
+
+		spin_lock(&pgdir_lock);
+		pmd = (pmd_t *)swapper_set_fixmap();
+		offset = (unsigned long)pmdp - (unsigned long)swapper_pg_dir;
+		pmd = (pmd_t *)((unsigned long)pmd + offset);
+		__pmd_populate(pmd, __pa(ptep), PMD_TYPE_TABLE);
+		swapper_clear_fixmap();
+		spin_unlock(&pgdir_lock);
+		return;
+	}
+#endif
 	__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE);
 }
 
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b2435e8b975b..85743ea0709b 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -592,6 +592,11 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 
+#define swapper_set_fixmap() \
+	set_fixmap_offset(FIX_SWAPPER, __pa_symbol(swapper_pg_dir))
+
+#define swapper_clear_fixmap()	clear_fixmap(FIX_SWAPPER)
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 9677deb7b6c7..9db187024b44 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -300,7 +300,7 @@ __create_page_tables:
 	 * dirty cache lines being evicted.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x1, swapper_pg_end
+	adrp	x1, idmap_pg_end
 	sub	x1, x1, x0
 	bl	__inval_dcache_area
 
@@ -313,7 +313,7 @@ __create_page_tables:
 	 * Clear the idmap and init page tables.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x1, swapper_pg_end
+	adrp	x1, idmap_pg_end
 	sub	x1, x1, x0
 	clear_pages x0, x1
 
@@ -404,7 +404,7 @@ __create_page_tables:
 	 * tables again to remove any speculatively loaded cache lines.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x1, swapper_pg_end
+	adrp	x1, idmap_pg_end
 	sub	x1, x1, x0
 	dmb	sy
 	bl	__inval_dcache_area
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index b0e4255fcba4..db72c4680f1d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -219,6 +219,28 @@ SECTIONS
 	. = ALIGN(PAGE_SIZE);
 	idmap_pg_dir = .;
 	. += IDMAP_DIR_SIZE;
+	idmap_pg_end = .;
+
+#if defined(CONFIG_ARM64_VA_BITS_39) || \
+	defined(CONFIG_ARM64_VA_BITS_36) || \
+	defined(CONFIG_ARM64_VA_BITS_42)
+	.rodata : {
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+		. = ALIGN(PAGE_SIZE);
+		tramp_pg_dir = .;
+		. += PAGE_SIZE;
+#endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+		reserved_ttbr0 = .;
+		. += RESERVED_TTBR0_SIZE;
+#endif
+		swapper_pg_dir = .;
+		. += PAGE_SIZE;
+		swapper_pg_end = .;
+	}
+
+#else
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 	tramp_pg_dir = .;
@@ -232,6 +254,7 @@ SECTIONS
 	swapper_pg_dir = .;
 	. += PAGE_SIZE;
 	swapper_pg_end = .;
+#endif
 
 	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
 	_end = .;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a3b5f1dffb84..fbaf3e9b4a43 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -66,6 +66,12 @@ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
 
+#if defined(CONFIG_ARM64_VA_BITS_39) || \
+	defined(CONFIG_ARM64_VA_BITS_36) || \
+	defined(CONFIG_ARM64_VA_BITS_42)
+DEFINE_SPINLOCK(pgdir_lock);
+#endif
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 			      unsigned long size, pgprot_t vma_prot)
 {
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir
  2018-06-25 11:39 ` [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir Jun Yao
  2018-06-25 13:55   ` kbuild test robot
@ 2018-06-25 13:55   ` kbuild test robot
  2018-06-25 16:37   ` James Morse
  2 siblings, 0 replies; 9+ messages in thread
From: kbuild test robot @ 2018-06-25 13:55 UTC (permalink / raw)
  To: Jun Yao
  Cc: kbuild-all, linux-arm-kernel, catalin.marinas, will.deacon,
	ard.biesheuvel, james.morse, linux-kernel, kernel-hardening

[-- Attachment #1: Type: text/plain, Size: 1151 bytes --]

Hi Jun,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on arm64/for-next/core]
[also build test ERROR on v4.18-rc2 next-20180625]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Jun-Yao/arm64-mm-Introduce-init_pg_dir/20180625-194126
base:   https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-next/core
config: x86_64-randconfig-x015-201825 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-16) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All errors (new ones prefixed by >>):

>> mm/init-mm.c:20:10: error: 'init_pg_dir' undeclared here (not in a function); did you mean 'init_pid_ns'?
     .pgd  = init_pg_dir,
             ^~~~~~~~~~~
             init_pid_ns

vim +20 mm/init-mm.c

    17	
    18	struct mm_struct init_mm = {
    19		.mm_rb		= RB_ROOT,
  > 20		.pgd		= init_pg_dir,

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 27204 bytes --]

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir
  2018-06-25 11:39 ` [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir Jun Yao
@ 2018-06-25 13:55   ` kbuild test robot
  2018-06-25 13:55   ` kbuild test robot
  2018-06-25 16:37   ` James Morse
  2 siblings, 0 replies; 9+ messages in thread
From: kbuild test robot @ 2018-06-25 13:55 UTC (permalink / raw)
  To: Jun Yao
  Cc: kbuild-all, linux-arm-kernel, catalin.marinas, will.deacon,
	ard.biesheuvel, james.morse, linux-kernel, kernel-hardening

[-- Attachment #1: Type: text/plain, Size: 1320 bytes --]

Hi Jun,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on arm64/for-next/core]
[also build test ERROR on v4.18-rc2 next-20180625]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Jun-Yao/arm64-mm-Introduce-init_pg_dir/20180625-194126
base:   https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-next/core
config: parisc-allyesconfig (attached as .config)
compiler: hppa-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        GCC_VERSION=7.2.0 make.cross ARCH=parisc 

All errors (new ones prefixed by >>):

>> mm/init-mm.c:20:10: error: 'init_pg_dir' undeclared here (not in a function); did you mean 'init_per_cpu'?
     .pgd  = init_pg_dir,
             ^~~~~~~~~~~
             init_per_cpu

vim +20 mm/init-mm.c

    17	
    18	struct mm_struct init_mm = {
    19		.mm_rb		= RB_ROOT,
  > 20		.pgd		= init_pg_dir,

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 52576 bytes --]

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir
  2018-06-25 11:39 ` [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir Jun Yao
  2018-06-25 13:55   ` kbuild test robot
  2018-06-25 13:55   ` kbuild test robot
@ 2018-06-25 16:37   ` James Morse
  2 siblings, 0 replies; 9+ messages in thread
From: James Morse @ 2018-06-25 16:37 UTC (permalink / raw)
  To: Jun Yao
  Cc: linux-arm-kernel, catalin.marinas, will.deacon, ard.biesheuvel,
	linux-kernel, kernel-hardening

Hi Jun,

On 25/06/18 12:39, Jun Yao wrote:
> We setup initial page tables in init_pg_dir, which is a reserved
> area of the __initdata section. And in paging_init(), we no
> longer need a temporary top-level and we can setup final page
> tables in swapper_pg_dir directly.

This patch is doing quite a lot. Some ideas on how to break it up below.


>  arch/arm64/include/asm/fixmap.h   |  1 -
>  arch/arm64/include/asm/pgtable.h  |  5 ++--
>  arch/arm64/kernel/head.S          | 46 +++++++++++++++++++++++--------

You missed an enable_mmu() caller in sleep.S


>  arch/arm64/kernel/vmlinux.lds.S   |  3 +-
>  arch/arm64/mm/mmu.c               | 30 ++++----------------

>  include/asm-generic/vmlinux.lds.h |  5 ++++
>  mm/init-mm.c                      |  2 +-

We shouldn't need to modify these files as they affect other architectures too.


> diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
> index ec1e6d6fa14c..62908eeedcdc 100644
> --- a/arch/arm64/include/asm/fixmap.h
> +++ b/arch/arm64/include/asm/fixmap.h
> @@ -83,7 +83,6 @@ enum fixed_addresses {
>  	FIX_PTE,
>  	FIX_PMD,
>  	FIX_PUD,
> -	FIX_PGD,

> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 7c4c8f318ba9..b2435e8b975b 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -592,9 +592,6 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
>  /* to find an entry in a kernel page-table-directory */
>  #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
>  
> -#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
> -#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)

I think we want to keep these for pgd_populate() once the top level entry is
read only. Linux's folding means the PUD or PMD levels may be called PGD so that
the top level type is always the same.


>  static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
>  {
>  	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
> @@ -718,6 +715,8 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
>  }
>  #endif
>  
> +extern pgd_t init_pg_dir[PTRS_PER_PGD];
> +extern pgd_t init_pg_end[];

Please mark these with __initdata so that tools like sparse can catch code
trying to access these once they've been freed.


>  extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
>  extern pgd_t swapper_pg_end[];
>  extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index b0853069702f..9677deb7b6c7 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -276,6 +276,15 @@ ENDPROC(preserve_boot_args)
>  	populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
>  	.endm
>  
> +	.macro clear_pages, start, size
> +1:	stp	xzr, xzr, [\start], #16
> +	stp	xzr, xzr, [\start], #16
> +	stp	xzr, xzr, [\start], #16
> +	stp	xzr, xzr, [\start], #16
> +	subs	\size, \size, #64
> +	b.ne	1b
> +	.endm

Could this go in arch/arm64/include/asm/assembler.h along with the other
assembly macros. We may want to use it elsewhere.

(Nit: This isn't really clearing pages, its more of a range.)


>  /*
>   * Setup the initial page tables. We only setup the barest amount which is
>   * required to get the kernel running. The following sections are required:



> @@ -373,7 +387,7 @@ __create_page_tables:
>  	/*
>  	 * Map the kernel image (starting with PHYS_OFFSET).
>  	 */
> -	adrp	x0, swapper_pg_dir
> +	adrp	x0, init_pg_dir
>  	mov_q	x5, KIMAGE_VADDR + TEXT_OFFSET	// compile time __va(_text)
>  	add	x5, x5, x23			// add KASLR displacement
>  	mov	x4, PTRS_PER_PGD
> @@ -386,7 +400,7 @@ __create_page_tables:
>  
>  	/*
>  	 * Since the page tables have been populated with non-cacheable
> -	 * accesses (MMU disabled), invalidate the idmap and swapper page
> +	 * accesses (MMU disabled), invalidate the idmap and init page
>  	 * tables again to remove any speculatively loaded cache lines.
>  	 */
>  	adrp	x0, idmap_pg_dir
> @@ -395,6 +409,12 @@ __create_page_tables:
>  	dmb	sy
>  	bl	__inval_dcache_area
>  
> +	adrp	x0, init_pg_dir
> +	adrp	x1, init_pg_end
> +	sub	x1, x1, x0

> +	dmb	sy

I think this barrier is to ensure the writes from map_memory happen before we
start invalidating the corresponding cache area, so that any newly cached data
after the invalidate must read the data map_memory wrote.

We shouldn't need a second one here.


> +	bl	__inval_dcache_area
> +
>  	ret	x28
>  ENDPROC(__create_page_tables)
>  	.ltorg
> @@ -706,6 +726,7 @@ secondary_startup:
>  	 * Common entry point for secondary CPUs.
>  	 */
>  	bl	__cpu_setup			// initialise processor
> +	adr_l   x26, swapper_pg_dir

We know this is page aligned, the adrp that you removed from __enable_mmu is enough.


>  	bl	__enable_mmu
>  	ldr	x8, =__secondary_switched
>  	br	x8
> @@ -748,6 +769,7 @@ ENDPROC(__secondary_switched)
>   * Enable the MMU.
>   *
>   *  x0  = SCTLR_EL1 value for turning on the MMU.
> + *  x26 = TTBR1_EL1 value for turning on the MMU.

(Nit: I'd prefer these functions kept to the PCS wherever possible. Yes this
means shuffling some registers which is noisy, but if this is broken up into
smaller patches it should be tolerable)


>   *
>   * Returns to the caller via x30/lr. This requires the caller to be covered
>   * by the .idmap.text section.
> @@ -762,7 +784,7 @@ ENTRY(__enable_mmu)
>  	b.ne	__no_granule_support
>  	update_early_cpu_boot_status 0, x1, x2
>  	adrp	x1, idmap_pg_dir
> -	adrp	x2, swapper_pg_dir
> +	mov	x2, x26
>  	phys_to_ttbr x3, x1
>  	phys_to_ttbr x4, x2
>  	msr	ttbr0_el1, x3			// load TTBR0


> diff --git a/mm/init-mm.c b/mm/init-mm.c
> index f94d5d15ebc0..08a0eed00667 100644
> --- a/mm/init-mm.c
> +++ b/mm/init-mm.c
> @@ -17,7 +17,7 @@
>
>  struct mm_struct init_mm = {
>  	.mm_rb		= RB_ROOT,
> -	.pgd		= swapper_pg_dir,
> +	.pgd		= init_pg_dir,
>  	.mm_users	= ATOMIC_INIT(2),
>  	.mm_count	= ATOMIC_INIT(1),
>  	.mmap_sem	= __RWSEM_INITIALIZER(init_mm.mmap_sem),
>

We really can't do this, it breaks other architectures that don't have an
'init_pg_dir', and code to shuffle to 'swapper_pg_dir' once they've done
paging_init().

We can keep the change entirely in arch/arm64 if we set init_mm.pgd to
init_pg_dir before we call early_fixmap_init() in kaslr_early_init() and
setup_arch(). You already switch it back in paging_init().


> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 2dbb2c9f1ec1..a3b5f1dffb84 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -628,34 +628,14 @@ static void __init map_kernel(pgd_t *pgdp)
>   */
>  void __init paging_init(void)
>  {
> -	phys_addr_t pgd_phys = early_pgtable_alloc();
> -	pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
> -
> -	map_kernel(pgdp);
> -	map_mem(pgdp);
> -
>  	/*
> -	 * We want to reuse the original swapper_pg_dir so we don't have to
> -	 * communicate the new address to non-coherent secondaries in
> -	 * secondary_entry, and so cpu_switch_mm can generate the address with
> -	 * adrp+add rather than a load from some global variable.
> -	 *
> -	 * To do this we need to go via a temporary pgd.
> +	 * Setup final page tables in swapper_pg_dir.
>  	 */
> -	cpu_replace_ttbr1(__va(pgd_phys));
> -	memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
> -	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
> +	map_kernel(swapper_pg_dir);
> +	map_mem(swapper_pg_dir);
>  
> -	pgd_clear_fixmap();
> -	memblock_free(pgd_phys, PAGE_SIZE);
> -
> -	/*
> -	 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
> -	 * allocated with it.
> -	 */
> -	memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
> -		      __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
> -		      - PAGE_SIZE);
> +	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
> +	init_mm.pgd = swapper_pg_dir;
>  }

This is difficult to follow because this patch is doing too much. I think you
could break it into four:
1. Create init_pg_dir, its linker-script changes and boiler-plate
clearing/cleaning/invalidating in head.S. Nothing will use it yet.
2. Make enable_mmu() take the physical address of the ttbr1 page as an argument.
3. Switch head.S to create page tables in init_pg_dir, and trim paging_init as
above.
4. Changes to the linker script and paging_init() to make swapper_pg_dir smaller
so we don't need to memblock_free() it.

This way the page table generation changes and the
boiler-plate-setup/final-cleanup are in separate patches.


Thanks,

James

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section
  2018-06-25 11:39 ` [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section Jun Yao
@ 2018-06-26 11:21   ` James Morse
  2018-06-27 11:07   ` Suzuki K Poulose
  1 sibling, 0 replies; 9+ messages in thread
From: James Morse @ 2018-06-26 11:21 UTC (permalink / raw)
  To: Jun Yao
  Cc: linux-arm-kernel, catalin.marinas, will.deacon, ard.biesheuvel,
	linux-kernel, kernel-hardening

Hi Jun,

On 25/06/18 12:39, Jun Yao wrote:
> When CONFIG_ARM64_VA_BITS_36/CONFIG_ARM64_VA_BITS_39/
> CONFIG_ARM64_VA_BITS_42 are selected, a block-mapping can be
> written to swapper_pg_dir. To defend 'KSMA', we move swapper_pg_dir
> to .rodata section when these configurations are selected. At the
> same time, we update swapper_pg_dir by fixmap.

I don't think we should consider individual MMU configurations. KSMA is the
motivation for making swapper_pg_dir read-only.
When making swapper_pg_dir read-only it should work in the same way for all
configurations as its simpler.


> diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
> index 62908eeedcdc..881784b43965 100644
> --- a/arch/arm64/include/asm/fixmap.h
> +++ b/arch/arm64/include/asm/fixmap.h
> @@ -83,6 +83,7 @@ enum fixed_addresses {
>  	FIX_PTE,
>  	FIX_PMD,
>  	FIX_PUD,
> +	FIX_SWAPPER,

Please leave this as FIX_PGD that you removed in the previous patch. Depending
on the folding pmd/pud can be used as the top level. If we always use the fixmap
entry that goes with the name, its clear how these operate.


>  	__end_of_fixed_addresses
>  };
> diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
> index 2e05bcd944c8..62512ad9c310 100644
> --- a/arch/arm64/include/asm/pgalloc.h
> +++ b/arch/arm64/include/asm/pgalloc.h

How come you don't touch pgd_populate()? Surely with a 4-level configuration we
may need to put a new pud-table in when we use a fresh chunk of vmalloc space...

(ah, this is because you are conditionally doing all this on configurations
where the MMU has top-level block mappings. I think this is unnecessarily
complicated, we should just do it all the time)


> @@ -49,6 +49,22 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
>  
>  static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
>  {
> +#ifdef CONFIG_ARM64_VA_BITS_39

Please make this depend on the number of levels not the VA size. We also have
3-level page tables for 64K with 48bit VA space, and 16K with 47bit VA space.

I'd prefer some "if (CONFIG_PGTABLE_LEVELS == 3)" style check, as that way the
compiler will always parse the code that is currently hidden behind #ifdefs.

We can avoid use of fixmap entries for subsequent levels by making the check
something like:
| if (CONFIG_PGTABLE_LEVELS == 3 && magic_helper(pud) == swapper_pg_dir)


> +	if (mm == &init_mm) {
> +		pud_t *pud;
> +		unsigned long offset;
> +		extern spinlock_t pgdir_lock;
> +
> +		spin_lock(&pgdir_lock);
> +		pud = (pud_t *)swapper_set_fixmap();

Can't we use pud_set_fixmap for pud levels?

Do we need an extra spinlock?, won't init_mm.page_table_lock always be held?
(we can put a lockdep_assert_held() in here to find cases that aren't).


> +		offset = (unsigned long)pudp - (unsigned long)swapper_pg_dir;
> +		pud = (pud_t *)((unsigned long)pud + offset);

It feels like there should be a helper for this.

I'm not comfortable assuming mm=init_mm means we are actually modifying
swapper_pg_dir. Hibernate does this on the grounds that the tables it generates
will be used as the kernel's mm when it starts restoring. This field wasn't used
before, so it wasn't clear what the field was for. I'm sure we will find
something else doing this...

Could we test that the provided pudp is within swapper_pg_dir before invoking
the fixmap path.


> +		__pud_populate(pud, __pa(pmdp), PMD_TYPE_TABLE);
> +		swapper_clear_fixmap();
> +		spin_unlock(&pgdir_lock);
> +		return;
> +	}
> +#endif
>  	__pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
>  }
>  #else


> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 9677deb7b6c7..9db187024b44 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S

(Changes here are cleanup, they should be in a separate patch)


> @@ -300,7 +300,7 @@ __create_page_tables:
>  	 * dirty cache lines being evicted.
>  	 */
>  	adrp	x0, idmap_pg_dir
> -	adrp	x1, swapper_pg_end
> +	adrp	x1, idmap_pg_end
>  	sub	x1, x1, x0
>  	bl	__inval_dcache_area

As we no longer modify swapper_pg_dir with the MMU disabled I think this is safe.


> @@ -313,7 +313,7 @@ __create_page_tables:
>  	 * Clear the idmap and init page tables.
>  	 */
>  	adrp	x0, idmap_pg_dir
> -	adrp	x1, swapper_pg_end
> +	adrp	x1, idmap_pg_end
>  	sub	x1, x1, x0
>  	clear_pages x0, x1

With the current placement of swapper_pg_dir after the BSS we rely on this to
zero that memory. It may have junk left over from the boot loader as this is the
area where the kernel's memory-footprint is bigger than the Image file.

Your vmlinux.lds.S changes below conditionally move swapper_pg_dir around.
Sometimes you need this, sometimes you don't.

Please always keep these pgd in the read-only section. I think we should keep
the page-zeroing as a sanity check, we only do it once during boot, and
corruption here isn't something we would be able to debug easily.


> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
> index b0e4255fcba4..db72c4680f1d 100644
> --- a/arch/arm64/kernel/vmlinux.lds.S
> +++ b/arch/arm64/kernel/vmlinux.lds.S
> @@ -219,6 +219,28 @@ SECTIONS
>  	. = ALIGN(PAGE_SIZE);
>  	idmap_pg_dir = .;
>  	. += IDMAP_DIR_SIZE;
> +	idmap_pg_end = .;
> +
> +#if defined(CONFIG_ARM64_VA_BITS_39) || \
> +	defined(CONFIG_ARM64_VA_BITS_36) || \
> +	defined(CONFIG_ARM64_VA_BITS_42)

I think we should always do this read-only-swapper, even if there aren't any
top-level block-mappings. The code will be much more maintainable this way.


> +	.rodata : {
> +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
> +		. = ALIGN(PAGE_SIZE);
> +		tramp_pg_dir = .;
> +		. += PAGE_SIZE;
> +#endif
> +
> +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
> +		reserved_ttbr0 = .;
> +		. += RESERVED_TTBR0_SIZE;
> +#endif
> +		swapper_pg_dir = .;
> +		. += PAGE_SIZE;
> +		swapper_pg_end = .;
> +	}
> +
> +#else
>  
>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>  	tramp_pg_dir = .;
> @@ -232,6 +254,7 @@ SECTIONS
>  	swapper_pg_dir = .;
>  	. += PAGE_SIZE;
>  	swapper_pg_end = .;
> +#endif



Thanks,

James

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section
  2018-06-25 11:39 ` [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section Jun Yao
  2018-06-26 11:21   ` James Morse
@ 2018-06-27 11:07   ` Suzuki K Poulose
  2018-06-27 11:23     ` James Morse
  1 sibling, 1 reply; 9+ messages in thread
From: Suzuki K Poulose @ 2018-06-27 11:07 UTC (permalink / raw)
  To: Jun Yao, linux-arm-kernel
  Cc: catalin.marinas, will.deacon, ard.biesheuvel, james.morse,
	linux-kernel, kernel-hardening

Hi Jun

On 25/06/18 12:39, Jun Yao wrote:
> When CONFIG_ARM64_VA_BITS_36/CONFIG_ARM64_VA_BITS_39/
> CONFIG_ARM64_VA_BITS_42 are selected, a block-mapping can be
> written to swapper_pg_dir. To defend 'KSMA', we move swapper_pg_dir
> to .rodata section when these configurations are selected. At the
> same time, we update swapper_pg_dir by fixmap.
> 
> Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
> ---
>   arch/arm64/include/asm/fixmap.h  |  1 +
>   arch/arm64/include/asm/pgalloc.h | 33 ++++++++++++++++++++++++++++++++
>   arch/arm64/include/asm/pgtable.h |  5 +++++
>   arch/arm64/kernel/head.S         |  6 +++---
>   arch/arm64/kernel/vmlinux.lds.S  | 23 ++++++++++++++++++++++
>   arch/arm64/mm/mmu.c              |  6 ++++++
>   6 files changed, 71 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
> index 62908eeedcdc..881784b43965 100644
> --- a/arch/arm64/include/asm/fixmap.h
> +++ b/arch/arm64/include/asm/fixmap.h
> @@ -83,6 +83,7 @@ enum fixed_addresses {
>   	FIX_PTE,
>   	FIX_PMD,
>   	FIX_PUD,
> +	FIX_SWAPPER,
>   
>   	__end_of_fixed_addresses
>   };
> diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
> index 2e05bcd944c8..62512ad9c310 100644
> --- a/arch/arm64/include/asm/pgalloc.h
> +++ b/arch/arm64/include/asm/pgalloc.h
> @@ -49,6 +49,22 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
>   
>   static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
>   {
> +#ifdef CONFIG_ARM64_VA_BITS_39

Could we please use

#ifdef __PAGETABLE_PUD_FOLDED ^^


> +	}
> +#endif
>   	__pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
>   }
>   #else
> @@ -142,6 +158,23 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
>   	/*
>   	 * The pmd must be loaded with the physical address of the PTE table
>   	 */
> +#if defined(CONFIG_ARM64_VA_BITS_42) || \
> +	defined(CONFIG_ARM64_VA_BITS_36)

and

#ifdef __PGTABLE_PMD_FOLDED here ^^


> +#if defined(CONFIG_ARM64_VA_BITS_39) || \
> +	defined(CONFIG_ARM64_VA_BITS_36) || \
> +	defined(CONFIG_ARM64_VA_BITS_42)

and

#ifdef __PGTABLE_PMD_FOLDED ^^ (as it implies
__PGTABLE_PUD_FOLDED )

instead ?


Cheers
Suzuki

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section
  2018-06-27 11:07   ` Suzuki K Poulose
@ 2018-06-27 11:23     ` James Morse
  0 siblings, 0 replies; 9+ messages in thread
From: James Morse @ 2018-06-27 11:23 UTC (permalink / raw)
  To: Suzuki K Poulose, Jun Yao
  Cc: linux-arm-kernel, catalin.marinas, will.deacon, ard.biesheuvel,
	linux-kernel, kernel-hardening

Hi Suzuki, Jun,

On 27/06/18 12:07, Suzuki K Poulose wrote:
> On 25/06/18 12:39, Jun Yao wrote:
>> When CONFIG_ARM64_VA_BITS_36/CONFIG_ARM64_VA_BITS_39/
>> CONFIG_ARM64_VA_BITS_42 are selected, a block-mapping can be
>> written to swapper_pg_dir. To defend 'KSMA', we move swapper_pg_dir
>> to .rodata section when these configurations are selected. At the
>> same time, we update swapper_pg_dir by fixmap.

>> diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
>> index 2e05bcd944c8..62512ad9c310 100644
>> --- a/arch/arm64/include/asm/pgalloc.h
>> +++ b/arch/arm64/include/asm/pgalloc.h
>> @@ -49,6 +49,22 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t
>> pmdp, pudval_t prot)
>>     static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t
>> *pmdp)
>>   {
>> +#ifdef CONFIG_ARM64_VA_BITS_39
> 
> Could we please use
> 
> #ifdef __PAGETABLE_PUD_FOLDED ^^

>> @@ -142,6 +158,23 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
>> pte_t *ptep)
>>       /*
>>        * The pmd must be loaded with the physical address of the PTE table
>>        */
>> +#if defined(CONFIG_ARM64_VA_BITS_42) || \
>> +    defined(CONFIG_ARM64_VA_BITS_36)
> 
> and
> 
> #ifdef __PGTABLE_PMD_FOLDED here ^^
> 
> 
>> +#if defined(CONFIG_ARM64_VA_BITS_39) || \
>> +    defined(CONFIG_ARM64_VA_BITS_36) || \
>> +    defined(CONFIG_ARM64_VA_BITS_42)
> 
> and
> 
> #ifdef __PGTABLE_PMD_FOLDED ^^ (as it implies
> __PGTABLE_PUD_FOLDED )
> 
> instead ?

Aha, I didn't know these existed. As we are only interested in matching one
level for now, this is even clearer.


Thanks,

James

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-06-27 11:23 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-25 11:39 [PATCH v2 0/2] Move {tramp_pg_dir,swapper_pg_dir} to .rodata Jun Yao
2018-06-25 11:39 ` [PATCH v2 1/2] arm64/mm: Introduce init_pg_dir Jun Yao
2018-06-25 13:55   ` kbuild test robot
2018-06-25 13:55   ` kbuild test robot
2018-06-25 16:37   ` James Morse
2018-06-25 11:39 ` [PATCH v2 2/2] arm64/mm: Move {tramp_pg_dir, swapper_pg_dir} to .rodata section Jun Yao
2018-06-26 11:21   ` James Morse
2018-06-27 11:07   ` Suzuki K Poulose
2018-06-27 11:23     ` James Morse

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).