All of lore.kernel.org
 help / color / mirror / Atom feed
* [RESEND PATCH v4 0/6] arm64/mm: Move swapper_pg_dir to rodata
@ 2018-08-22  9:54 ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, james.morse, linux-kernel

The set_init_mm_pgd() is reimplemented using assembly in order to
avoid being instrumented by kasan.

Test following configs with CONFIG_RANDOMIZE_BASE/UNMAP_KERNEL_AT_EL0/
CONFIG_ARM64_SW_TTBR0_PAN/CONFIG_KASAN_OUTLINE enabled on qemu:

	1. CONFIG_ARM64_4K_PAGES/CONFIG_ARM64_VA_BITS_48
	2. CONFIG_ARM64_4K_PAGES/CONFIG_ARM64_VA_BITS_39
	3. CONFIG_ARM64_64K_PAGES/CONFIG_ARM64_VA_BITS_48
	4. CONFIG_ARM64_64K_PAGES/CONFIG_ARM64_VA_BITS_42

Jun Yao (6):
  arm64/mm: Introduce the init_pg_dir.
  arm64/mm: Pass ttbr1 as a parameter to __enable_mmu().
  arm64/mm: Create the initial page table in the init_pg_dir.
  arm64/mm: Create the final page table directly in swapper_pg_dir.
  arm64/mm: Populate the swapper_pg_dir by fixmap.
  arm64/mm: Move {idmap_pg_dir .. swapper_pg_dir} to rodata section.

 arch/arm64/include/asm/assembler.h | 29 +++++++++++++
 arch/arm64/include/asm/pgtable.h   | 66 ++++++++++++++++++++++++++----
 arch/arm64/kernel/head.S           | 48 ++++++++++++++--------
 arch/arm64/kernel/sleep.S          |  1 +
 arch/arm64/kernel/vmlinux.lds.S    | 47 ++++++++++++++-------
 arch/arm64/mm/mmu.c                | 45 ++++++++------------
 6 files changed, 168 insertions(+), 68 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 0/6] arm64/mm: Move swapper_pg_dir to rodata
@ 2018-08-22  9:54 ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel

The set_init_mm_pgd() is reimplemented using assembly in order to
avoid being instrumented by kasan.

Test following configs with CONFIG_RANDOMIZE_BASE/UNMAP_KERNEL_AT_EL0/
CONFIG_ARM64_SW_TTBR0_PAN/CONFIG_KASAN_OUTLINE enabled on qemu:

	1. CONFIG_ARM64_4K_PAGES/CONFIG_ARM64_VA_BITS_48
	2. CONFIG_ARM64_4K_PAGES/CONFIG_ARM64_VA_BITS_39
	3. CONFIG_ARM64_64K_PAGES/CONFIG_ARM64_VA_BITS_48
	4. CONFIG_ARM64_64K_PAGES/CONFIG_ARM64_VA_BITS_42

Jun Yao (6):
  arm64/mm: Introduce the init_pg_dir.
  arm64/mm: Pass ttbr1 as a parameter to __enable_mmu().
  arm64/mm: Create the initial page table in the init_pg_dir.
  arm64/mm: Create the final page table directly in swapper_pg_dir.
  arm64/mm: Populate the swapper_pg_dir by fixmap.
  arm64/mm: Move {idmap_pg_dir .. swapper_pg_dir} to rodata section.

 arch/arm64/include/asm/assembler.h | 29 +++++++++++++
 arch/arm64/include/asm/pgtable.h   | 66 ++++++++++++++++++++++++++----
 arch/arm64/kernel/head.S           | 48 ++++++++++++++--------
 arch/arm64/kernel/sleep.S          |  1 +
 arch/arm64/kernel/vmlinux.lds.S    | 47 ++++++++++++++-------
 arch/arm64/mm/mmu.c                | 45 ++++++++------------
 6 files changed, 168 insertions(+), 68 deletions(-)

-- 
2.17.1

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 1/6] arm64/mm: Introduce the init_pg_dir.
  2018-08-22  9:54 ` Jun Yao
@ 2018-08-22  9:54   ` Jun Yao
  -1 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, james.morse, linux-kernel

To make the swapper_pg_dir read only, we will move it to the rodata
section. And force the kernel to set up the initial page table in
the init_pg_dir. After generating all levels page table, we copy
only the top level into the swapper_pg_dir during paging_init().

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/include/asm/assembler.h | 29 +++++++++++++++++++++++++++++
 arch/arm64/kernel/head.S           | 22 +++++++++++++++-------
 arch/arm64/kernel/vmlinux.lds.S    |  8 ++++++++
 3 files changed, 52 insertions(+), 7 deletions(-)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 0bcc98dbba56..eb363a915c0e 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -456,6 +456,35 @@ USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
 	b.ne	9998b
 	.endm
 
+/*
+ * clear_page - clear one page
+ *
+ *	start:	page aligned virtual address
+ */
+	.macro clear_page, start:req
+9996:	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	tst	\start, #(PAGE_SIZE - 1)
+	b.ne	9996b
+	.endm
+
+/*
+ * clear_pages - clear contiguous pages
+ *
+ *	start, end: page aligend virtual addresses
+ */
+	.macro clear_pages, start:req, end:req
+	sub	\end, \end, \start
+	lsr	\end, \end, #(PAGE_SHIFT)
+9997:	cbz	\end, 9998f
+	clear_page \start
+	sub	\end, \end, #1
+	b	9997b
+9998:
+	.endm
+
 /*
  * Annotate a function as position independent, i.e., safe to be called before
  * the kernel virtual mapping is activated.
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b0853069702f..2c83a8c47e3f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -295,18 +295,21 @@ __create_page_tables:
 	sub	x1, x1, x0
 	bl	__inval_dcache_area
 
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	bl	__inval_dcache_area
+
 	/*
 	 * Clear the idmap and swapper page tables.
 	 */
 	adrp	x0, idmap_pg_dir
 	adrp	x1, swapper_pg_end
-	sub	x1, x1, x0
-1:	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	subs	x1, x1, #64
-	b.ne	1b
+	clear_pages x0, x1
+
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	clear_pages x0, x1
 
 	mov	x7, SWAPPER_MM_MMUFLAGS
 
@@ -395,6 +398,11 @@ __create_page_tables:
 	dmb	sy
 	bl	__inval_dcache_area
 
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	bl	__inval_dcache_area
+
 	ret	x28
 ENDPROC(__create_page_tables)
 	.ltorg
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 605d1b60469c..61d7cee3eaa6 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -68,6 +68,12 @@ jiffies = jiffies_64;
 #define TRAMP_TEXT
 #endif
 
+#define INIT_PG_TABLES					\
+	. = ALIGN(PAGE_SIZE);                           \
+	init_pg_dir = .;                                \
+	. += SWAPPER_DIR_SIZE;                          \
+	init_pg_end = .;
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -161,6 +167,8 @@ SECTIONS
 	__inittext_end = .;
 	__initdata_begin = .;
 
+	INIT_PG_TABLES
+
 	.init.data : {
 		INIT_DATA
 		INIT_SETUP(16)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 1/6] arm64/mm: Introduce the init_pg_dir.
@ 2018-08-22  9:54   ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel

To make the swapper_pg_dir read only, we will move it to the rodata
section. And force the kernel to set up the initial page table in
the init_pg_dir. After generating all levels page table, we copy
only the top level into the swapper_pg_dir during paging_init().

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/include/asm/assembler.h | 29 +++++++++++++++++++++++++++++
 arch/arm64/kernel/head.S           | 22 +++++++++++++++-------
 arch/arm64/kernel/vmlinux.lds.S    |  8 ++++++++
 3 files changed, 52 insertions(+), 7 deletions(-)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 0bcc98dbba56..eb363a915c0e 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -456,6 +456,35 @@ USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
 	b.ne	9998b
 	.endm
 
+/*
+ * clear_page - clear one page
+ *
+ *	start:	page aligned virtual address
+ */
+	.macro clear_page, start:req
+9996:	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	stp	xzr, xzr, [\start], #16
+	tst	\start, #(PAGE_SIZE - 1)
+	b.ne	9996b
+	.endm
+
+/*
+ * clear_pages - clear contiguous pages
+ *
+ *	start, end: page aligend virtual addresses
+ */
+	.macro clear_pages, start:req, end:req
+	sub	\end, \end, \start
+	lsr	\end, \end, #(PAGE_SHIFT)
+9997:	cbz	\end, 9998f
+	clear_page \start
+	sub	\end, \end, #1
+	b	9997b
+9998:
+	.endm
+
 /*
  * Annotate a function as position independent, i.e., safe to be called before
  * the kernel virtual mapping is activated.
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b0853069702f..2c83a8c47e3f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -295,18 +295,21 @@ __create_page_tables:
 	sub	x1, x1, x0
 	bl	__inval_dcache_area
 
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	bl	__inval_dcache_area
+
 	/*
 	 * Clear the idmap and swapper page tables.
 	 */
 	adrp	x0, idmap_pg_dir
 	adrp	x1, swapper_pg_end
-	sub	x1, x1, x0
-1:	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	subs	x1, x1, #64
-	b.ne	1b
+	clear_pages x0, x1
+
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	clear_pages x0, x1
 
 	mov	x7, SWAPPER_MM_MMUFLAGS
 
@@ -395,6 +398,11 @@ __create_page_tables:
 	dmb	sy
 	bl	__inval_dcache_area
 
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	bl	__inval_dcache_area
+
 	ret	x28
 ENDPROC(__create_page_tables)
 	.ltorg
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 605d1b60469c..61d7cee3eaa6 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -68,6 +68,12 @@ jiffies = jiffies_64;
 #define TRAMP_TEXT
 #endif
 
+#define INIT_PG_TABLES					\
+	. = ALIGN(PAGE_SIZE);                           \
+	init_pg_dir = .;                                \
+	. += SWAPPER_DIR_SIZE;                          \
+	init_pg_end = .;
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -161,6 +167,8 @@ SECTIONS
 	__inittext_end = .;
 	__initdata_begin = .;
 
+	INIT_PG_TABLES
+
 	.init.data : {
 		INIT_DATA
 		INIT_SETUP(16)
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 2/6] arm64/mm: Pass ttbr1 as a parameter to __enable_mmu().
  2018-08-22  9:54 ` Jun Yao
@ 2018-08-22  9:54   ` Jun Yao
  -1 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, james.morse, linux-kernel

The kernel sets up the initial page table in the init_pg_dir.
However, it will create the final page table in the swapper_pg_dir
during the initialization process. We need to let __enable_mmu()
know which page table to use.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/kernel/head.S  | 21 ++++++++++++---------
 arch/arm64/kernel/sleep.S |  1 +
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2c83a8c47e3f..c3e4b1886cde 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -714,6 +714,7 @@ secondary_startup:
 	 * Common entry point for secondary CPUs.
 	 */
 	bl	__cpu_setup			// initialise processor
+	adrp	x1, swapper_pg_dir
 	bl	__enable_mmu
 	ldr	x8, =__secondary_switched
 	br	x8
@@ -756,6 +757,7 @@ ENDPROC(__secondary_switched)
  * Enable the MMU.
  *
  *  x0  = SCTLR_EL1 value for turning on the MMU.
+ *  x1  = TTBR1_EL1 value for turning on the MMU.
  *
  * Returns to the caller via x30/lr. This requires the caller to be covered
  * by the .idmap.text section.
@@ -764,15 +766,15 @@ ENDPROC(__secondary_switched)
  * If it isn't, park the CPU
  */
 ENTRY(__enable_mmu)
-	mrs	x1, ID_AA64MMFR0_EL1
-	ubfx	x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
-	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
+	mrs	x5, ID_AA64MMFR0_EL1
+	ubfx	x6, x5, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+	cmp	x6, #ID_AA64MMFR0_TGRAN_SUPPORTED
 	b.ne	__no_granule_support
-	update_early_cpu_boot_status 0, x1, x2
-	adrp	x1, idmap_pg_dir
-	adrp	x2, swapper_pg_dir
-	phys_to_ttbr x3, x1
-	phys_to_ttbr x4, x2
+	update_early_cpu_boot_status 0, x5, x6
+	adrp	x5, idmap_pg_dir
+	mov	x6, x1
+	phys_to_ttbr x3, x5
+	phys_to_ttbr x4, x6
 	msr	ttbr0_el1, x3			// load TTBR0
 	msr	ttbr1_el1, x4			// load TTBR1
 	isb
@@ -791,7 +793,7 @@ ENDPROC(__enable_mmu)
 
 __no_granule_support:
 	/* Indicate that this CPU can't boot and is stuck in the kernel */
-	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
+	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x5, x6
 1:
 	wfe
 	wfi
@@ -831,6 +833,7 @@ __primary_switch:
 	mrs	x20, sctlr_el1			// preserve old SCTLR_EL1 value
 #endif
 
+	adrp	x1, swapper_pg_dir
 	bl	__enable_mmu
 #ifdef CONFIG_RELOCATABLE
 	bl	__relocate_kernel
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index bebec8ef9372..3e53ffa07994 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,6 +101,7 @@ ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
 	bl	__cpu_setup
 	/* enable the MMU early - so we can access sleep_save_stash by va */
+	adrp	x1, swapper_pg_dir
 	bl	__enable_mmu
 	ldr	x8, =_cpu_resume
 	br	x8
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 2/6] arm64/mm: Pass ttbr1 as a parameter to __enable_mmu().
@ 2018-08-22  9:54   ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel

The kernel sets up the initial page table in the init_pg_dir.
However, it will create the final page table in the swapper_pg_dir
during the initialization process. We need to let __enable_mmu()
know which page table to use.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/kernel/head.S  | 21 ++++++++++++---------
 arch/arm64/kernel/sleep.S |  1 +
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2c83a8c47e3f..c3e4b1886cde 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -714,6 +714,7 @@ secondary_startup:
 	 * Common entry point for secondary CPUs.
 	 */
 	bl	__cpu_setup			// initialise processor
+	adrp	x1, swapper_pg_dir
 	bl	__enable_mmu
 	ldr	x8, =__secondary_switched
 	br	x8
@@ -756,6 +757,7 @@ ENDPROC(__secondary_switched)
  * Enable the MMU.
  *
  *  x0  = SCTLR_EL1 value for turning on the MMU.
+ *  x1  = TTBR1_EL1 value for turning on the MMU.
  *
  * Returns to the caller via x30/lr. This requires the caller to be covered
  * by the .idmap.text section.
@@ -764,15 +766,15 @@ ENDPROC(__secondary_switched)
  * If it isn't, park the CPU
  */
 ENTRY(__enable_mmu)
-	mrs	x1, ID_AA64MMFR0_EL1
-	ubfx	x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
-	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
+	mrs	x5, ID_AA64MMFR0_EL1
+	ubfx	x6, x5, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+	cmp	x6, #ID_AA64MMFR0_TGRAN_SUPPORTED
 	b.ne	__no_granule_support
-	update_early_cpu_boot_status 0, x1, x2
-	adrp	x1, idmap_pg_dir
-	adrp	x2, swapper_pg_dir
-	phys_to_ttbr x3, x1
-	phys_to_ttbr x4, x2
+	update_early_cpu_boot_status 0, x5, x6
+	adrp	x5, idmap_pg_dir
+	mov	x6, x1
+	phys_to_ttbr x3, x5
+	phys_to_ttbr x4, x6
 	msr	ttbr0_el1, x3			// load TTBR0
 	msr	ttbr1_el1, x4			// load TTBR1
 	isb
@@ -791,7 +793,7 @@ ENDPROC(__enable_mmu)
 
 __no_granule_support:
 	/* Indicate that this CPU can't boot and is stuck in the kernel */
-	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
+	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x5, x6
 1:
 	wfe
 	wfi
@@ -831,6 +833,7 @@ __primary_switch:
 	mrs	x20, sctlr_el1			// preserve old SCTLR_EL1 value
 #endif
 
+	adrp	x1, swapper_pg_dir
 	bl	__enable_mmu
 #ifdef CONFIG_RELOCATABLE
 	bl	__relocate_kernel
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index bebec8ef9372..3e53ffa07994 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,6 +101,7 @@ ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
 	bl	__cpu_setup
 	/* enable the MMU early - so we can access sleep_save_stash by va */
+	adrp	x1, swapper_pg_dir
 	bl	__enable_mmu
 	ldr	x8, =_cpu_resume
 	br	x8
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 3/6] arm64/mm: Create the initial page table in the init_pg_dir.
  2018-08-22  9:54 ` Jun Yao
@ 2018-08-22  9:54   ` Jun Yao
  -1 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, james.morse, linux-kernel

Create the initial page table in the init_pg_dir. And before
calling kasan_early_init(), we update the init_mm.pgd by
introducing set_init_mm_pgd(). This will ensure that pgd_offset_k()
works correctly. When the final page table is created, we redirect
the init_mm.pgd to the swapper_pg_dir.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/include/asm/pgtable.h |  2 ++
 arch/arm64/kernel/head.S         |  9 ++++++---
 arch/arm64/mm/mmu.c              | 14 ++++++++++++++
 3 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 1bdeca8918a6..46ef21ebfe47 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -712,6 +712,8 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 }
 #endif
 
+extern pgd_t init_pg_dir[PTRS_PER_PGD];
+extern pgd_t init_pg_end[];
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t swapper_pg_end[];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c3e4b1886cde..ede2e964592b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -376,7 +376,7 @@ __create_page_tables:
 	/*
 	 * Map the kernel image (starting with PHYS_OFFSET).
 	 */
-	adrp	x0, swapper_pg_dir
+	adrp	x0, init_pg_dir
 	mov_q	x5, KIMAGE_VADDR + TEXT_OFFSET	// compile time __va(_text)
 	add	x5, x5, x23			// add KASLR displacement
 	mov	x4, PTRS_PER_PGD
@@ -402,7 +402,6 @@ __create_page_tables:
 	adrp	x1, init_pg_end
 	sub	x1, x1, x0
 	bl	__inval_dcache_area
-
 	ret	x28
 ENDPROC(__create_page_tables)
 	.ltorg
@@ -439,6 +438,9 @@ __primary_switched:
 	bl	__pi_memset
 	dsb	ishst				// Make zero page visible to PTW
 
+	adrp	x0, init_pg_dir
+	bl	set_init_mm_pgd
+
 #ifdef CONFIG_KASAN
 	bl	kasan_early_init
 #endif
@@ -833,8 +835,9 @@ __primary_switch:
 	mrs	x20, sctlr_el1			// preserve old SCTLR_EL1 value
 #endif
 
-	adrp	x1, swapper_pg_dir
+	adrp	x1, init_pg_dir
 	bl	__enable_mmu
+
 #ifdef CONFIG_RELOCATABLE
 	bl	__relocate_kernel
 #ifdef CONFIG_RANDOMIZE_BASE
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 65f86271f02b..f7e544f6f3eb 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -623,6 +623,19 @@ static void __init map_kernel(pgd_t *pgdp)
 	kasan_copy_shadow(pgdp);
 }
 
+/*
+ * set_init_mm_pgd() just updates init_mm.pgd. The purpose of using
+ * assembly is to prevent KASAN instrumentation, as KASAN has not
+ * been initialized when this function is called.
+ */
+void __init set_init_mm_pgd(pgd_t *pgd)
+{
+	pgd_t **addr = &(init_mm.pgd);
+
+	asm volatile("str %x0, [%1]\n"
+			: : "r" (pgd), "r" (addr) : "memory");
+}
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps and sets up the zero page.
@@ -646,6 +659,7 @@ void __init paging_init(void)
 	cpu_replace_ttbr1(__va(pgd_phys));
 	memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+	set_init_mm_pgd(swapper_pg_dir);
 
 	pgd_clear_fixmap();
 	memblock_free(pgd_phys, PAGE_SIZE);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 3/6] arm64/mm: Create the initial page table in the init_pg_dir.
@ 2018-08-22  9:54   ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel

Create the initial page table in the init_pg_dir. And before
calling kasan_early_init(), we update the init_mm.pgd by
introducing set_init_mm_pgd(). This will ensure that pgd_offset_k()
works correctly. When the final page table is created, we redirect
the init_mm.pgd to the swapper_pg_dir.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/include/asm/pgtable.h |  2 ++
 arch/arm64/kernel/head.S         |  9 ++++++---
 arch/arm64/mm/mmu.c              | 14 ++++++++++++++
 3 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 1bdeca8918a6..46ef21ebfe47 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -712,6 +712,8 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 }
 #endif
 
+extern pgd_t init_pg_dir[PTRS_PER_PGD];
+extern pgd_t init_pg_end[];
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t swapper_pg_end[];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c3e4b1886cde..ede2e964592b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -376,7 +376,7 @@ __create_page_tables:
 	/*
 	 * Map the kernel image (starting with PHYS_OFFSET).
 	 */
-	adrp	x0, swapper_pg_dir
+	adrp	x0, init_pg_dir
 	mov_q	x5, KIMAGE_VADDR + TEXT_OFFSET	// compile time __va(_text)
 	add	x5, x5, x23			// add KASLR displacement
 	mov	x4, PTRS_PER_PGD
@@ -402,7 +402,6 @@ __create_page_tables:
 	adrp	x1, init_pg_end
 	sub	x1, x1, x0
 	bl	__inval_dcache_area
-
 	ret	x28
 ENDPROC(__create_page_tables)
 	.ltorg
@@ -439,6 +438,9 @@ __primary_switched:
 	bl	__pi_memset
 	dsb	ishst				// Make zero page visible to PTW
 
+	adrp	x0, init_pg_dir
+	bl	set_init_mm_pgd
+
 #ifdef CONFIG_KASAN
 	bl	kasan_early_init
 #endif
@@ -833,8 +835,9 @@ __primary_switch:
 	mrs	x20, sctlr_el1			// preserve old SCTLR_EL1 value
 #endif
 
-	adrp	x1, swapper_pg_dir
+	adrp	x1, init_pg_dir
 	bl	__enable_mmu
+
 #ifdef CONFIG_RELOCATABLE
 	bl	__relocate_kernel
 #ifdef CONFIG_RANDOMIZE_BASE
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 65f86271f02b..f7e544f6f3eb 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -623,6 +623,19 @@ static void __init map_kernel(pgd_t *pgdp)
 	kasan_copy_shadow(pgdp);
 }
 
+/*
+ * set_init_mm_pgd() just updates init_mm.pgd. The purpose of using
+ * assembly is to prevent KASAN instrumentation, as KASAN has not
+ * been initialized when this function is called.
+ */
+void __init set_init_mm_pgd(pgd_t *pgd)
+{
+	pgd_t **addr = &(init_mm.pgd);
+
+	asm volatile("str %x0, [%1]\n"
+			: : "r" (pgd), "r" (addr) : "memory");
+}
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps and sets up the zero page.
@@ -646,6 +659,7 @@ void __init paging_init(void)
 	cpu_replace_ttbr1(__va(pgd_phys));
 	memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+	set_init_mm_pgd(swapper_pg_dir);
 
 	pgd_clear_fixmap();
 	memblock_free(pgd_phys, PAGE_SIZE);
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 4/6] arm64/mm: Create the final page table directly in swapper_pg_dir.
  2018-08-22  9:54 ` Jun Yao
@ 2018-08-22  9:54   ` Jun Yao
  -1 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, james.morse, linux-kernel

As the initial page table is created in the init_pg_dir, we can set
up the final page table directly in the swapper_pg_dir. And it only
contains the top level page table, so we can reduce it to a page
size.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/kernel/vmlinux.lds.S |  2 +-
 arch/arm64/mm/mmu.c             | 29 ++---------------------------
 2 files changed, 3 insertions(+), 28 deletions(-)

diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 61d7cee3eaa6..2446911f4262 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -237,7 +237,7 @@ SECTIONS
 	. += RESERVED_TTBR0_SIZE;
 #endif
 	swapper_pg_dir = .;
-	. += SWAPPER_DIR_SIZE;
+	. += PAGE_SIZE;
 	swapper_pg_end = .;
 
 	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f7e544f6f3eb..b7f9afb628ac 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -642,35 +642,10 @@ void __init set_init_mm_pgd(pgd_t *pgd)
  */
 void __init paging_init(void)
 {
-	phys_addr_t pgd_phys = early_pgtable_alloc();
-	pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
-
-	map_kernel(pgdp);
-	map_mem(pgdp);
-
-	/*
-	 * We want to reuse the original swapper_pg_dir so we don't have to
-	 * communicate the new address to non-coherent secondaries in
-	 * secondary_entry, and so cpu_switch_mm can generate the address with
-	 * adrp+add rather than a load from some global variable.
-	 *
-	 * To do this we need to go via a temporary pgd.
-	 */
-	cpu_replace_ttbr1(__va(pgd_phys));
-	memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
+	map_kernel(swapper_pg_dir);
+	map_mem(swapper_pg_dir);
 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 	set_init_mm_pgd(swapper_pg_dir);
-
-	pgd_clear_fixmap();
-	memblock_free(pgd_phys, PAGE_SIZE);
-
-	/*
-	 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
-	 * allocated with it.
-	 */
-	memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
-		      __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
-		      - PAGE_SIZE);
 }
 
 /*
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 4/6] arm64/mm: Create the final page table directly in swapper_pg_dir.
@ 2018-08-22  9:54   ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel

As the initial page table is created in the init_pg_dir, we can set
up the final page table directly in the swapper_pg_dir. And it only
contains the top level page table, so we can reduce it to a page
size.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/kernel/vmlinux.lds.S |  2 +-
 arch/arm64/mm/mmu.c             | 29 ++---------------------------
 2 files changed, 3 insertions(+), 28 deletions(-)

diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 61d7cee3eaa6..2446911f4262 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -237,7 +237,7 @@ SECTIONS
 	. += RESERVED_TTBR0_SIZE;
 #endif
 	swapper_pg_dir = .;
-	. += SWAPPER_DIR_SIZE;
+	. += PAGE_SIZE;
 	swapper_pg_end = .;
 
 	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f7e544f6f3eb..b7f9afb628ac 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -642,35 +642,10 @@ void __init set_init_mm_pgd(pgd_t *pgd)
  */
 void __init paging_init(void)
 {
-	phys_addr_t pgd_phys = early_pgtable_alloc();
-	pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
-
-	map_kernel(pgdp);
-	map_mem(pgdp);
-
-	/*
-	 * We want to reuse the original swapper_pg_dir so we don't have to
-	 * communicate the new address to non-coherent secondaries in
-	 * secondary_entry, and so cpu_switch_mm can generate the address with
-	 * adrp+add rather than a load from some global variable.
-	 *
-	 * To do this we need to go via a temporary pgd.
-	 */
-	cpu_replace_ttbr1(__va(pgd_phys));
-	memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
+	map_kernel(swapper_pg_dir);
+	map_mem(swapper_pg_dir);
 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 	set_init_mm_pgd(swapper_pg_dir);
-
-	pgd_clear_fixmap();
-	memblock_free(pgd_phys, PAGE_SIZE);
-
-	/*
-	 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
-	 * allocated with it.
-	 */
-	memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
-		      __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
-		      - PAGE_SIZE);
 }
 
 /*
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
  2018-08-22  9:54 ` Jun Yao
@ 2018-08-22  9:54   ` Jun Yao
  -1 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, james.morse, linux-kernel

Since we will move the swapper_pg_dir to rodata section, we need a
way to update it. The fixmap can handle it. When the swapper_pg_dir
needs to be updated, we map it dynamically. The map will be
canceled after the update is complete. In this way, we can defend
against KSMA(Kernel Space Mirror Attack).

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/include/asm/pgtable.h | 68 ++++++++++++++++++++++++++------
 arch/arm64/mm/mmu.c              |  2 +
 2 files changed, 59 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 46ef21ebfe47..d5c3df99af7b 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -45,6 +45,13 @@
 #include <linux/mm_types.h>
 #include <linux/sched.h>
 
+extern pgd_t init_pg_dir[PTRS_PER_PGD];
+extern pgd_t init_pg_end[];
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pgd_t swapper_pg_end[];
+extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
+
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
 extern void __pud_error(const char *file, int line, unsigned long val);
@@ -428,8 +435,32 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				 PUD_TYPE_TABLE)
 #endif
 
+extern spinlock_t swapper_pgdir_lock;
+
+#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
+#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
+
+static inline bool in_swapper_pgdir(void *addr)
+{
+	return ((unsigned long)addr & PAGE_MASK) ==
+		((unsigned long)swapper_pg_dir & PAGE_MASK);
+}
+
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+#ifdef __PAGETABLE_PMD_FOLDED
+	if (in_swapper_pgdir(pmdp)) {
+		pmd_t *fixmap_pmdp;
+
+		spin_lock(&swapper_pgdir_lock);
+		fixmap_pmdp = (pmd_t *)pgd_set_fixmap(__pa(pmdp));
+		WRITE_ONCE(*fixmap_pmdp, pmd);
+		dsb(ishst);
+		pgd_clear_fixmap();
+		spin_unlock(&swapper_pgdir_lock);
+		return;
+	}
+#endif
 	WRITE_ONCE(*pmdp, pmd);
 	dsb(ishst);
 }
@@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
+#ifdef __PAGETABLE_PUD_FOLDED
+	if (in_swapper_pgdir(pudp)) {
+		pud_t *fixmap_pudp;
+
+		spin_lock(&swapper_pgdir_lock);
+		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
+		WRITE_ONCE(*fixmap_pudp, pud);
+		dsb(ishst);
+		pgd_clear_fixmap();
+		spin_unlock(&swapper_pgdir_lock);
+		return;
+	}
+#endif
 	WRITE_ONCE(*pudp, pud);
 	dsb(ishst);
 }
@@ -532,8 +576,19 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
 
 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-	WRITE_ONCE(*pgdp, pgd);
-	dsb(ishst);
+	if (in_swapper_pgdir(pgdp)) {
+		pgd_t *fixmap_pgdp;
+
+		spin_lock(&swapper_pgdir_lock);
+		fixmap_pgdp = pgd_set_fixmap(__pa(pgdp));
+		WRITE_ONCE(*fixmap_pgdp, pgd);
+		dsb(ishst);
+		pgd_clear_fixmap();
+		spin_unlock(&swapper_pgdir_lock);
+	} else {
+		WRITE_ONCE(*pgdp, pgd);
+		dsb(ishst);
+	}
 }
 
 static inline void pgd_clear(pgd_t *pgdp)
@@ -586,8 +641,6 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 
-#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
-#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
@@ -712,13 +765,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 }
 #endif
 
-extern pgd_t init_pg_dir[PTRS_PER_PGD];
-extern pgd_t init_pg_end[];
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern pgd_t swapper_pg_end[];
-extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
-extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
-
 /*
  * Encode and decode a swap entry:
  *	bits 0-1:	present (must be zero)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b7f9afb628ac..691a05bbf87b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -67,6 +67,8 @@ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
 
+DEFINE_SPINLOCK(swapper_pgdir_lock);
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 			      unsigned long size, pgprot_t vma_prot)
 {
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
@ 2018-08-22  9:54   ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel

Since we will move the swapper_pg_dir to rodata section, we need a
way to update it. The fixmap can handle it. When the swapper_pg_dir
needs to be updated, we map it dynamically. The map will be
canceled after the update is complete. In this way, we can defend
against KSMA(Kernel Space Mirror Attack).

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/include/asm/pgtable.h | 68 ++++++++++++++++++++++++++------
 arch/arm64/mm/mmu.c              |  2 +
 2 files changed, 59 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 46ef21ebfe47..d5c3df99af7b 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -45,6 +45,13 @@
 #include <linux/mm_types.h>
 #include <linux/sched.h>
 
+extern pgd_t init_pg_dir[PTRS_PER_PGD];
+extern pgd_t init_pg_end[];
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pgd_t swapper_pg_end[];
+extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
+
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
 extern void __pud_error(const char *file, int line, unsigned long val);
@@ -428,8 +435,32 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				 PUD_TYPE_TABLE)
 #endif
 
+extern spinlock_t swapper_pgdir_lock;
+
+#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
+#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
+
+static inline bool in_swapper_pgdir(void *addr)
+{
+	return ((unsigned long)addr & PAGE_MASK) ==
+		((unsigned long)swapper_pg_dir & PAGE_MASK);
+}
+
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+#ifdef __PAGETABLE_PMD_FOLDED
+	if (in_swapper_pgdir(pmdp)) {
+		pmd_t *fixmap_pmdp;
+
+		spin_lock(&swapper_pgdir_lock);
+		fixmap_pmdp = (pmd_t *)pgd_set_fixmap(__pa(pmdp));
+		WRITE_ONCE(*fixmap_pmdp, pmd);
+		dsb(ishst);
+		pgd_clear_fixmap();
+		spin_unlock(&swapper_pgdir_lock);
+		return;
+	}
+#endif
 	WRITE_ONCE(*pmdp, pmd);
 	dsb(ishst);
 }
@@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
+#ifdef __PAGETABLE_PUD_FOLDED
+	if (in_swapper_pgdir(pudp)) {
+		pud_t *fixmap_pudp;
+
+		spin_lock(&swapper_pgdir_lock);
+		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
+		WRITE_ONCE(*fixmap_pudp, pud);
+		dsb(ishst);
+		pgd_clear_fixmap();
+		spin_unlock(&swapper_pgdir_lock);
+		return;
+	}
+#endif
 	WRITE_ONCE(*pudp, pud);
 	dsb(ishst);
 }
@@ -532,8 +576,19 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
 
 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-	WRITE_ONCE(*pgdp, pgd);
-	dsb(ishst);
+	if (in_swapper_pgdir(pgdp)) {
+		pgd_t *fixmap_pgdp;
+
+		spin_lock(&swapper_pgdir_lock);
+		fixmap_pgdp = pgd_set_fixmap(__pa(pgdp));
+		WRITE_ONCE(*fixmap_pgdp, pgd);
+		dsb(ishst);
+		pgd_clear_fixmap();
+		spin_unlock(&swapper_pgdir_lock);
+	} else {
+		WRITE_ONCE(*pgdp, pgd);
+		dsb(ishst);
+	}
 }
 
 static inline void pgd_clear(pgd_t *pgdp)
@@ -586,8 +641,6 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 
-#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
-#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
@@ -712,13 +765,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 }
 #endif
 
-extern pgd_t init_pg_dir[PTRS_PER_PGD];
-extern pgd_t init_pg_end[];
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern pgd_t swapper_pg_end[];
-extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
-extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
-
 /*
  * Encode and decode a swap entry:
  *	bits 0-1:	present (must be zero)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b7f9afb628ac..691a05bbf87b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -67,6 +67,8 @@ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
 
+DEFINE_SPINLOCK(swapper_pgdir_lock);
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 			      unsigned long size, pgprot_t vma_prot)
 {
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 6/6] arm64/mm: Move {idmap_pg_dir .. swapper_pg_dir} to rodata section.
  2018-08-22  9:54 ` Jun Yao
@ 2018-08-22  9:54   ` Jun Yao
  -1 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, james.morse, linux-kernel

Move the idmap_pg_dir/tramp_pg_dir/reserved_ttbr0/swapper_pg_dir to
the rodata section. When the kernel is initialized, the
idmap_pg_dir, tramp_pg_dir and reserved_ttbr0 will not change. And
it's safe to move them to rodata section.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/kernel/vmlinux.lds.S | 39 ++++++++++++++++++++-------------
 1 file changed, 24 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 2446911f4262..142528a23b44 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -64,8 +64,13 @@ jiffies = jiffies_64;
 	*(.entry.tramp.text)				\
 	. = ALIGN(PAGE_SIZE);				\
 	__entry_tramp_text_end = .;
+
+#define TRAMP_PG_TABLE					\
+	tramp_pg_dir = .;				\
+	. += PAGE_SIZE;
 #else
 #define TRAMP_TEXT
+#define TRAMP_PG_TABLE
 #endif
 
 #define INIT_PG_TABLES					\
@@ -74,6 +79,24 @@ jiffies = jiffies_64;
 	. += SWAPPER_DIR_SIZE;                          \
 	init_pg_end = .;
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_PG_TABLE				\
+	reserved_ttbr0 = .;				\
+	. += RESERVED_TTBR0_SIZE;
+#else
+#define RESERVED_PG_TABLE
+#endif
+
+#define KERNEL_PG_TABLES				\
+	. = ALIGN(PAGE_SIZE);                           \
+	idmap_pg_dir = .;				\
+	. += IDMAP_DIR_SIZE;				\
+	TRAMP_PG_TABLE					\
+	RESERVED_PG_TABLE				\
+	swapper_pg_dir = .;				\
+	. += PAGE_SIZE;					\
+	swapper_pg_end = .;
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -143,6 +166,7 @@ SECTIONS
 	RO_DATA(PAGE_SIZE)		/* everything from this point to     */
 	EXCEPTION_TABLE(8)		/* __init_begin will be marked RO NX */
 	NOTES
+	KERNEL_PG_TABLES
 
 	. = ALIGN(SEGMENT_ALIGN);
 	__init_begin = .;
@@ -224,21 +248,6 @@ SECTIONS
 	BSS_SECTION(0, 0, 0)
 
 	. = ALIGN(PAGE_SIZE);
-	idmap_pg_dir = .;
-	. += IDMAP_DIR_SIZE;
-
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-	tramp_pg_dir = .;
-	. += PAGE_SIZE;
-#endif
-
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-	reserved_ttbr0 = .;
-	. += RESERVED_TTBR0_SIZE;
-#endif
-	swapper_pg_dir = .;
-	. += PAGE_SIZE;
-	swapper_pg_end = .;
 
 	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
 	_end = .;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 6/6] arm64/mm: Move {idmap_pg_dir .. swapper_pg_dir} to rodata section.
@ 2018-08-22  9:54   ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-08-22  9:54 UTC (permalink / raw)
  To: linux-arm-kernel

Move the idmap_pg_dir/tramp_pg_dir/reserved_ttbr0/swapper_pg_dir to
the rodata section. When the kernel is initialized, the
idmap_pg_dir, tramp_pg_dir and reserved_ttbr0 will not change. And
it's safe to move them to rodata section.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
---
 arch/arm64/kernel/vmlinux.lds.S | 39 ++++++++++++++++++++-------------
 1 file changed, 24 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 2446911f4262..142528a23b44 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -64,8 +64,13 @@ jiffies = jiffies_64;
 	*(.entry.tramp.text)				\
 	. = ALIGN(PAGE_SIZE);				\
 	__entry_tramp_text_end = .;
+
+#define TRAMP_PG_TABLE					\
+	tramp_pg_dir = .;				\
+	. += PAGE_SIZE;
 #else
 #define TRAMP_TEXT
+#define TRAMP_PG_TABLE
 #endif
 
 #define INIT_PG_TABLES					\
@@ -74,6 +79,24 @@ jiffies = jiffies_64;
 	. += SWAPPER_DIR_SIZE;                          \
 	init_pg_end = .;
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_PG_TABLE				\
+	reserved_ttbr0 = .;				\
+	. += RESERVED_TTBR0_SIZE;
+#else
+#define RESERVED_PG_TABLE
+#endif
+
+#define KERNEL_PG_TABLES				\
+	. = ALIGN(PAGE_SIZE);                           \
+	idmap_pg_dir = .;				\
+	. += IDMAP_DIR_SIZE;				\
+	TRAMP_PG_TABLE					\
+	RESERVED_PG_TABLE				\
+	swapper_pg_dir = .;				\
+	. += PAGE_SIZE;					\
+	swapper_pg_end = .;
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -143,6 +166,7 @@ SECTIONS
 	RO_DATA(PAGE_SIZE)		/* everything from this point to     */
 	EXCEPTION_TABLE(8)		/* __init_begin will be marked RO NX */
 	NOTES
+	KERNEL_PG_TABLES
 
 	. = ALIGN(SEGMENT_ALIGN);
 	__init_begin = .;
@@ -224,21 +248,6 @@ SECTIONS
 	BSS_SECTION(0, 0, 0)
 
 	. = ALIGN(PAGE_SIZE);
-	idmap_pg_dir = .;
-	. += IDMAP_DIR_SIZE;
-
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-	tramp_pg_dir = .;
-	. += PAGE_SIZE;
-#endif
-
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-	reserved_ttbr0 = .;
-	. += RESERVED_TTBR0_SIZE;
-#endif
-	swapper_pg_dir = .;
-	. += PAGE_SIZE;
-	swapper_pg_end = .;
 
 	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
 	_end = .;
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 0/6] arm64/mm: Move swapper_pg_dir to rodata
  2018-08-22  9:54 ` Jun Yao
@ 2018-09-07  9:57   ` James Morse
  -1 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: Jun Yao; +Cc: linux-arm-kernel, catalin.marinas, will.deacon, linux-kernel

Hi Jun,

(I'm a bit confused about which version of this series I should be looking at.
I have a v4, and two v4-resends, all of which are different. Please only mark
something as 'resend' if it is exactly the same!)


On 22/08/18 10:54, Jun Yao wrote:
> The set_init_mm_pgd() is reimplemented using assembly in order to
> avoid being instrumented by kasan.

There are some tidier ways of fixing this. The kasan init code is also C code
that is run before kasan is initialized. Kbuild is told not to let KASAN touch
it with 'KASAN_SANITISE_filename.o := n'.

But, in this case you're only calling into C code from pre-kasan head.S so you
can use the same helper to set init_mm.pgd. I don't think this is worth the
effort, we can just do the store in assembly. (more in patch 3).


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 0/6] arm64/mm: Move swapper_pg_dir to rodata
@ 2018-09-07  9:57   ` James Morse
  0 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jun,

(I'm a bit confused about which version of this series I should be looking at.
I have a v4, and two v4-resends, all of which are different. Please only mark
something as 'resend' if it is exactly the same!)


On 22/08/18 10:54, Jun Yao wrote:
> The set_init_mm_pgd() is reimplemented using assembly in order to
> avoid being instrumented by kasan.

There are some tidier ways of fixing this. The kasan init code is also C code
that is run before kasan is initialized. Kbuild is told not to let KASAN touch
it with 'KASAN_SANITISE_filename.o := n'.

But, in this case you're only calling into C code from pre-kasan head.S so you
can use the same helper to set init_mm.pgd. I don't think this is worth the
effort, we can just do the store in assembly. (more in patch 3).


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 1/6] arm64/mm: Introduce the init_pg_dir.
  2018-08-22  9:54   ` Jun Yao
@ 2018-09-07  9:57     ` James Morse
  -1 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: Jun Yao; +Cc: linux-arm-kernel, catalin.marinas, will.deacon, linux-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> To make the swapper_pg_dir read only, we will move it to the rodata
> section. And force the kernel to set up the initial page table in
> the init_pg_dir. After generating all levels page table, we copy
> only the top level into the swapper_pg_dir during paging_init().

Could you add v3's
| Add init_pg_dir to vmlinux.lds.S and boiler-plate
| clearing/cleaning/invalidating it in head.S.

too. This makes it obvious that 'init_pg_dir isn't used yet' is deliberate.

Reviewed-by: James Morse <james.morse@arm.com>


Some boring nits:

> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index 0bcc98dbba56..eb363a915c0e 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -456,6 +456,35 @@ USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU

> +/*
> + * clear_pages - clear contiguous pages
> + *
> + *	start, end: page aligend virtual addresses

(Nit: aligned)


> + */
> +	.macro clear_pages, start:req, end:req

> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
> index 605d1b60469c..61d7cee3eaa6 100644
> --- a/arch/arm64/kernel/vmlinux.lds.S
> +++ b/arch/arm64/kernel/vmlinux.lds.S
> @@ -68,6 +68,12 @@ jiffies = jiffies_64;
>  #define TRAMP_TEXT
>  #endif
>  
> +#define INIT_PG_TABLES					\

                               ^ These are tabs ...

> +	. = ALIGN(PAGE_SIZE);                           \

                               ^ ... but these are spaces.

> +	init_pg_dir = .;                                \
> +	. += SWAPPER_DIR_SIZE;                          \
> +	init_pg_end = .;

Please pick one and stick with it. The macro above,
CONFIG_UNMAP_KERNEL_AT_EL0, uses tabs, please do the same.



Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 1/6] arm64/mm: Introduce the init_pg_dir.
@ 2018-09-07  9:57     ` James Morse
  0 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> To make the swapper_pg_dir read only, we will move it to the rodata
> section. And force the kernel to set up the initial page table in
> the init_pg_dir. After generating all levels page table, we copy
> only the top level into the swapper_pg_dir during paging_init().

Could you add v3's
| Add init_pg_dir to vmlinux.lds.S and boiler-plate
| clearing/cleaning/invalidating it in head.S.

too. This makes it obvious that 'init_pg_dir isn't used yet' is deliberate.

Reviewed-by: James Morse <james.morse@arm.com>


Some boring nits:

> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index 0bcc98dbba56..eb363a915c0e 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -456,6 +456,35 @@ USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU

> +/*
> + * clear_pages - clear contiguous pages
> + *
> + *	start, end: page aligend virtual addresses

(Nit: aligned)


> + */
> +	.macro clear_pages, start:req, end:req

> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
> index 605d1b60469c..61d7cee3eaa6 100644
> --- a/arch/arm64/kernel/vmlinux.lds.S
> +++ b/arch/arm64/kernel/vmlinux.lds.S
> @@ -68,6 +68,12 @@ jiffies = jiffies_64;
>  #define TRAMP_TEXT
>  #endif
>  
> +#define INIT_PG_TABLES					\

                               ^ These are tabs ...

> +	. = ALIGN(PAGE_SIZE);                           \

                               ^ ... but these are spaces.

> +	init_pg_dir = .;                                \
> +	. += SWAPPER_DIR_SIZE;                          \
> +	init_pg_end = .;

Please pick one and stick with it. The macro above,
CONFIG_UNMAP_KERNEL_AT_EL0, uses tabs, please do the same.



Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 2/6] arm64/mm: Pass ttbr1 as a parameter to __enable_mmu().
  2018-08-22  9:54   ` Jun Yao
@ 2018-09-07  9:57     ` James Morse
  -1 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: Jun Yao; +Cc: linux-arm-kernel, catalin.marinas, will.deacon, linux-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> The kernel sets up the initial page table in the init_pg_dir.

(Nit: 'will set up', it doesn't until patch 3.)

> However, it will create the final page table in the swapper_pg_dir
> during the initialization process. We need to let __enable_mmu()
> know which page table to use.

> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 2c83a8c47e3f..c3e4b1886cde 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -756,6 +757,7 @@ ENDPROC(__secondary_switched)
>   * Enable the MMU.
>   *
>   *  x0  = SCTLR_EL1 value for turning on the MMU.
> + *  x1  = TTBR1_EL1 value for turning on the MMU.
>   *
>   * Returns to the caller via x30/lr. This requires the caller to be covered
>   * by the .idmap.text section.
> @@ -764,15 +766,15 @@ ENDPROC(__secondary_switched)
>   * If it isn't, park the CPU
>   */
>  ENTRY(__enable_mmu)
> -	mrs	x1, ID_AA64MMFR0_EL1
> -	ubfx	x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
> -	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
> +	mrs	x5, ID_AA64MMFR0_EL1
> +	ubfx	x6, x5, #ID_AA64MMFR0_TGRAN_SHIFT, 4
> +	cmp	x6, #ID_AA64MMFR0_TGRAN_SUPPORTED
>  	b.ne	__no_granule_support
> -	update_early_cpu_boot_status 0, x1, x2
> -	adrp	x1, idmap_pg_dir
> -	adrp	x2, swapper_pg_dir
> -	phys_to_ttbr x3, x1
> -	phys_to_ttbr x4, x2
> +	update_early_cpu_boot_status 0, x5, x6
> +	adrp	x5, idmap_pg_dir
> +	mov	x6, x1
> +	phys_to_ttbr x3, x5
> +	phys_to_ttbr x4, x6
>  	msr	ttbr0_el1, x3			// load TTBR0
>  	msr	ttbr1_el1, x4			// load TTBR1
>  	isb
> @@ -791,7 +793,7 @@ ENDPROC(__enable_mmu)
>  
>  __no_granule_support:
>  	/* Indicate that this CPU can't boot and is stuck in the kernel */


> -	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
> +	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x5, x6

(You don't need to change these as they are both temporary registers.)

Reviewed-by: James Morse <james.morse@arm.com>


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 2/6] arm64/mm: Pass ttbr1 as a parameter to __enable_mmu().
@ 2018-09-07  9:57     ` James Morse
  0 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> The kernel sets up the initial page table in the init_pg_dir.

(Nit: 'will set up', it doesn't until patch 3.)

> However, it will create the final page table in the swapper_pg_dir
> during the initialization process. We need to let __enable_mmu()
> know which page table to use.

> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 2c83a8c47e3f..c3e4b1886cde 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -756,6 +757,7 @@ ENDPROC(__secondary_switched)
>   * Enable the MMU.
>   *
>   *  x0  = SCTLR_EL1 value for turning on the MMU.
> + *  x1  = TTBR1_EL1 value for turning on the MMU.
>   *
>   * Returns to the caller via x30/lr. This requires the caller to be covered
>   * by the .idmap.text section.
> @@ -764,15 +766,15 @@ ENDPROC(__secondary_switched)
>   * If it isn't, park the CPU
>   */
>  ENTRY(__enable_mmu)
> -	mrs	x1, ID_AA64MMFR0_EL1
> -	ubfx	x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
> -	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
> +	mrs	x5, ID_AA64MMFR0_EL1
> +	ubfx	x6, x5, #ID_AA64MMFR0_TGRAN_SHIFT, 4
> +	cmp	x6, #ID_AA64MMFR0_TGRAN_SUPPORTED
>  	b.ne	__no_granule_support
> -	update_early_cpu_boot_status 0, x1, x2
> -	adrp	x1, idmap_pg_dir
> -	adrp	x2, swapper_pg_dir
> -	phys_to_ttbr x3, x1
> -	phys_to_ttbr x4, x2
> +	update_early_cpu_boot_status 0, x5, x6
> +	adrp	x5, idmap_pg_dir
> +	mov	x6, x1
> +	phys_to_ttbr x3, x5
> +	phys_to_ttbr x4, x6
>  	msr	ttbr0_el1, x3			// load TTBR0
>  	msr	ttbr1_el1, x4			// load TTBR1
>  	isb
> @@ -791,7 +793,7 @@ ENDPROC(__enable_mmu)
>  
>  __no_granule_support:
>  	/* Indicate that this CPU can't boot and is stuck in the kernel */


> -	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
> +	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x5, x6

(You don't need to change these as they are both temporary registers.)

Reviewed-by: James Morse <james.morse@arm.com>


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 4/6] arm64/mm: Create the final page table directly in swapper_pg_dir.
  2018-08-22  9:54   ` Jun Yao
@ 2018-09-07  9:57     ` James Morse
  -1 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: Jun Yao; +Cc: linux-arm-kernel, catalin.marinas, will.deacon, linux-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> As the initial page table is created in the init_pg_dir, we can set
> up the final page table directly in the swapper_pg_dir. And it only> contains the top level page table, so we can reduce it to a page

Reviewed-by: James Morse <james.morse@arm.com>


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 4/6] arm64/mm: Create the final page table directly in swapper_pg_dir.
@ 2018-09-07  9:57     ` James Morse
  0 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> As the initial page table is created in the init_pg_dir, we can set
> up the final page table directly in the swapper_pg_dir. And it only> contains the top level page table, so we can reduce it to a page

Reviewed-by: James Morse <james.morse@arm.com>


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 3/6] arm64/mm: Create the initial page table in the init_pg_dir.
  2018-08-22  9:54   ` Jun Yao
@ 2018-09-07  9:57     ` James Morse
  -1 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: Jun Yao; +Cc: linux-arm-kernel, catalin.marinas, will.deacon, linux-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> Create the initial page table in the init_pg_dir. And before
> calling kasan_early_init(), we update the init_mm.pgd by
> introducing set_init_mm_pgd(). This will ensure that pgd_offset_k()
> works correctly. When the final page table is created, we redirect
> the init_mm.pgd to the swapper_pg_dir.

> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index c3e4b1886cde..ede2e964592b 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -402,7 +402,6 @@ __create_page_tables:
>  	adrp	x1, init_pg_end
>  	sub	x1, x1, x0
>  	bl	__inval_dcache_area
> -
>  	ret	x28
>  ENDPROC(__create_page_tables)
>  	.ltorg

Nit: spurious whitespace change.


> @@ -439,6 +438,9 @@ __primary_switched:
>  	bl	__pi_memset
>  	dsb	ishst				// Make zero page visible to PTW
>  
> +	adrp	x0, init_pg_dir
> +	bl	set_init_mm_pgd

Having a C helper to just do a store is a bit strange, calling C code before
kasan is ready is clearly causing some pain.

Couldn't we just do store in assembly here?:
------------------%<------------------
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index ede2e964592b..7464fb31452d 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -439,7 +439,8 @@ __primary_switched:
        dsb     ishst                           // Make zero page visible to PTW

        adrp    x0, init_pg_dir
-       bl      set_init_mm_pgd
+       adr_l   x1, init_mm
+       str     x0, [x1, #MM_PGD]

 #ifdef CONFIG_KASAN
        bl      kasan_early_init
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 323aeb5f2fe6..43f52cfdfad4 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -82,6 +82,7 @@ int main(void)
   DEFINE(S_FRAME_SIZE,      sizeof(struct pt_regs));
   BLANK();
   DEFINE(MM_CONTEXT_ID,     offsetof(struct mm_struct, context.id.counter));
+  DEFINE(MM_PGD,            offsetof(struct mm_struct, pgd));
   BLANK();
   DEFINE(VMA_VM_MM,         offsetof(struct vm_area_struct, vm_mm));
   DEFINE(VMA_VM_FLAGS,      offsetof(struct vm_area_struct, vm_flags));
------------------%<------------------


> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 65f86271f02b..f7e544f6f3eb 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -623,6 +623,19 @@ static void __init map_kernel(pgd_t *pgdp)
>  	kasan_copy_shadow(pgdp);
>  }
>  
> +/*
> + * set_init_mm_pgd() just updates init_mm.pgd. The purpose of using
> + * assembly is to prevent KASAN instrumentation, as KASAN has not
> + * been initialized when this function is called.

You're hiding the store from KASAN as its shadow region hasn't been initialized yet?

I think newer versions of the compiler let KASAN check stack accesses too, and
the compiler may generate those all by itself. Hiding things like this gets us
into an arms-race with the compiler.


> +void __init set_init_mm_pgd(pgd_t *pgd)
> +{
> +	pgd_t **addr = &(init_mm.pgd);
> +
> +	asm volatile("str %x0, [%1]\n"
> +			: : "r" (pgd), "r" (addr) : "memory");
> +}
> +
>  /*
>   * paging_init() sets up the page tables, initialises the zone memory
>   * maps and sets up the zero page.


Thanks,

James

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 3/6] arm64/mm: Create the initial page table in the init_pg_dir.
@ 2018-09-07  9:57     ` James Morse
  0 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:57 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> Create the initial page table in the init_pg_dir. And before
> calling kasan_early_init(), we update the init_mm.pgd by
> introducing set_init_mm_pgd(). This will ensure that pgd_offset_k()
> works correctly. When the final page table is created, we redirect
> the init_mm.pgd to the swapper_pg_dir.

> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index c3e4b1886cde..ede2e964592b 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -402,7 +402,6 @@ __create_page_tables:
>  	adrp	x1, init_pg_end
>  	sub	x1, x1, x0
>  	bl	__inval_dcache_area
> -
>  	ret	x28
>  ENDPROC(__create_page_tables)
>  	.ltorg

Nit: spurious whitespace change.


> @@ -439,6 +438,9 @@ __primary_switched:
>  	bl	__pi_memset
>  	dsb	ishst				// Make zero page visible to PTW
>  
> +	adrp	x0, init_pg_dir
> +	bl	set_init_mm_pgd

Having a C helper to just do a store is a bit strange, calling C code before
kasan is ready is clearly causing some pain.

Couldn't we just do store in assembly here?:
------------------%<------------------
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index ede2e964592b..7464fb31452d 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -439,7 +439,8 @@ __primary_switched:
        dsb     ishst                           // Make zero page visible to PTW

        adrp    x0, init_pg_dir
-       bl      set_init_mm_pgd
+       adr_l   x1, init_mm
+       str     x0, [x1, #MM_PGD]

 #ifdef CONFIG_KASAN
        bl      kasan_early_init
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 323aeb5f2fe6..43f52cfdfad4 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -82,6 +82,7 @@ int main(void)
   DEFINE(S_FRAME_SIZE,      sizeof(struct pt_regs));
   BLANK();
   DEFINE(MM_CONTEXT_ID,     offsetof(struct mm_struct, context.id.counter));
+  DEFINE(MM_PGD,            offsetof(struct mm_struct, pgd));
   BLANK();
   DEFINE(VMA_VM_MM,         offsetof(struct vm_area_struct, vm_mm));
   DEFINE(VMA_VM_FLAGS,      offsetof(struct vm_area_struct, vm_flags));
------------------%<------------------


> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 65f86271f02b..f7e544f6f3eb 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -623,6 +623,19 @@ static void __init map_kernel(pgd_t *pgdp)
>  	kasan_copy_shadow(pgdp);
>  }
>  
> +/*
> + * set_init_mm_pgd() just updates init_mm.pgd. The purpose of using
> + * assembly is to prevent KASAN instrumentation, as KASAN has not
> + * been initialized when this function is called.

You're hiding the store from KASAN as its shadow region hasn't been initialized yet?

I think newer versions of the compiler let KASAN check stack accesses too, and
the compiler may generate those all by itself. Hiding things like this gets us
into an arms-race with the compiler.


> +void __init set_init_mm_pgd(pgd_t *pgd)
> +{
> +	pgd_t **addr = &(init_mm.pgd);
> +
> +	asm volatile("str %x0, [%1]\n"
> +			: : "r" (pgd), "r" (addr) : "memory");
> +}
> +
>  /*
>   * paging_init() sets up the page tables, initialises the zone memory
>   * maps and sets up the zero page.


Thanks,

James

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
  2018-08-22  9:54   ` Jun Yao
@ 2018-09-07  9:58     ` James Morse
  -1 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:58 UTC (permalink / raw)
  To: Jun Yao; +Cc: linux-arm-kernel, catalin.marinas, will.deacon, linux-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> Since we will move the swapper_pg_dir to rodata section, we need a
> way to update it. The fixmap can handle it. When the swapper_pg_dir
> needs to be updated, we map it dynamically. The map will be
> canceled after the update is complete. In this way, we can defend
> against KSMA(Kernel Space Mirror Attack).


> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 46ef21ebfe47..d5c3df99af7b 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -428,8 +435,32 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>  				 PUD_TYPE_TABLE)
>  #endif
>  
> +extern spinlock_t swapper_pgdir_lock;

Hmmm, it would be good if we could avoid exposing this lock.
Wherever this ends up needs to include spinlock.h, and we don't have to do that
in arch headers today.


>  static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>  {
> +#ifdef __PAGETABLE_PMD_FOLDED
> +	if (in_swapper_pgdir(pmdp)) {
> +		pmd_t *fixmap_pmdp;
> +
> +		spin_lock(&swapper_pgdir_lock);
> +		fixmap_pmdp = (pmd_t *)pgd_set_fixmap(__pa(pmdp));
> +		WRITE_ONCE(*fixmap_pmdp, pmd);
> +		dsb(ishst);
> +		pgd_clear_fixmap();
> +		spin_unlock(&swapper_pgdir_lock);
> +		return;
> +	}
> +#endif

You have this pattern multiple times, it ought to be a macro. (Any reason why
the last copy for pgd is different?)

Putting all this directly into the inlined helper is noisy and risks bloating
the locations it appears. Could we do the in_swappper_pgdir() test, and if it
passes call some out-of-line set_swapper_pgd() that lives in mm/mmu.c? Once we
know we're using the fixmap I don't think there is a benefit to inline-ing the code.

Doing this would avoid moving the extern defines and p?d_set_fixmap() helpers
around in this header and let us avoid extern-ing the lock or including
spinlock.h in here.


>  	WRITE_ONCE(*pmdp, pmd);
>  	dsb(ishst);
>  }
> @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
>  
>  static inline void set_pud(pud_t *pudp, pud_t pud)
>  {
> +#ifdef __PAGETABLE_PUD_FOLDED
> +	if (in_swapper_pgdir(pudp)) {
> +		pud_t *fixmap_pudp;
> +
> +		spin_lock(&swapper_pgdir_lock);
> +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));

This is a bit subtle: are you using the pgd fixmap entry because the path from
map_mem() uses the other three?

Using the pgd fix slot for a pud looks a bit strange to me, but its arguably a
side-effect of the folding.

I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
be during paging_init(). What do you think to keeping paging_init()s use of the
pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
during paging_init()?


> +		WRITE_ONCE(*fixmap_pudp, pud);

> +		dsb(ishst);
> +		pgd_clear_fixmap();

Hmm,

p?d_clear_fixmap() is done by calling __set_fixmap(FIX_P?G, 0, __pgprot(0)).

__set_fixmap() calls flush_tlb_kernel_range() if the flags are 0.

flush_tlb_kernel_range() has a dsb(ishst) before it does the maintenance, (even
via flush_tlb_all()).

I think we can replace replace the dsb() before each p?d_clear_fixmap() call
with a comment that the flush_tlb_*() will do it for us. Something like:

|/*
| * We need dsb(ishst) here to ensure the page-table-walker sees our new entry
| * before set_p?d() returns. The fixmap's flush_tlb_kernel_range() via
| * clear_fixmap() does this for us.
| */


> +		spin_unlock(&swapper_pgdir_lock);
> +		return;
> +	}
> +#endif
>  	WRITE_ONCE(*pudp, pud);
>  	dsb(ishst);
>  }


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
@ 2018-09-07  9:58     ` James Morse
  0 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-07  9:58 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jun,

On 22/08/18 10:54, Jun Yao wrote:
> Since we will move the swapper_pg_dir to rodata section, we need a
> way to update it. The fixmap can handle it. When the swapper_pg_dir
> needs to be updated, we map it dynamically. The map will be
> canceled after the update is complete. In this way, we can defend
> against KSMA(Kernel Space Mirror Attack).


> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 46ef21ebfe47..d5c3df99af7b 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -428,8 +435,32 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>  				 PUD_TYPE_TABLE)
>  #endif
>  
> +extern spinlock_t swapper_pgdir_lock;

Hmmm, it would be good if we could avoid exposing this lock.
Wherever this ends up needs to include spinlock.h, and we don't have to do that
in arch headers today.


>  static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
>  {
> +#ifdef __PAGETABLE_PMD_FOLDED
> +	if (in_swapper_pgdir(pmdp)) {
> +		pmd_t *fixmap_pmdp;
> +
> +		spin_lock(&swapper_pgdir_lock);
> +		fixmap_pmdp = (pmd_t *)pgd_set_fixmap(__pa(pmdp));
> +		WRITE_ONCE(*fixmap_pmdp, pmd);
> +		dsb(ishst);
> +		pgd_clear_fixmap();
> +		spin_unlock(&swapper_pgdir_lock);
> +		return;
> +	}
> +#endif

You have this pattern multiple times, it ought to be a macro. (Any reason why
the last copy for pgd is different?)

Putting all this directly into the inlined helper is noisy and risks bloating
the locations it appears. Could we do the in_swappper_pgdir() test, and if it
passes call some out-of-line set_swapper_pgd() that lives in mm/mmu.c? Once we
know we're using the fixmap I don't think there is a benefit to inline-ing the code.

Doing this would avoid moving the extern defines and p?d_set_fixmap() helpers
around in this header and let us avoid extern-ing the lock or including
spinlock.h in here.


>  	WRITE_ONCE(*pmdp, pmd);
>  	dsb(ishst);
>  }
> @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
>  
>  static inline void set_pud(pud_t *pudp, pud_t pud)
>  {
> +#ifdef __PAGETABLE_PUD_FOLDED
> +	if (in_swapper_pgdir(pudp)) {
> +		pud_t *fixmap_pudp;
> +
> +		spin_lock(&swapper_pgdir_lock);
> +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));

This is a bit subtle: are you using the pgd fixmap entry because the path from
map_mem() uses the other three?

Using the pgd fix slot for a pud looks a bit strange to me, but its arguably a
side-effect of the folding.

I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
be during paging_init(). What do you think to keeping paging_init()s use of the
pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
during paging_init()?


> +		WRITE_ONCE(*fixmap_pudp, pud);

> +		dsb(ishst);
> +		pgd_clear_fixmap();

Hmm,

p?d_clear_fixmap() is done by calling __set_fixmap(FIX_P?G, 0, __pgprot(0)).

__set_fixmap() calls flush_tlb_kernel_range() if the flags are 0.

flush_tlb_kernel_range() has a dsb(ishst) before it does the maintenance, (even
via flush_tlb_all()).

I think we can replace replace the dsb() before each p?d_clear_fixmap() call
with a comment that the flush_tlb_*() will do it for us. Something like:

|/*
| * We need dsb(ishst) here to ensure the page-table-walker sees our new entry
| * before set_p?d() returns. The fixmap's flush_tlb_kernel_range() via
| * clear_fixmap() does this for us.
| */


> +		spin_unlock(&swapper_pgdir_lock);
> +		return;
> +	}
> +#endif
>  	WRITE_ONCE(*pudp, pud);
>  	dsb(ishst);
>  }


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
  2018-09-07  9:58     ` James Morse
@ 2018-09-10 11:41       ` Jun Yao
  -1 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-09-10 11:41 UTC (permalink / raw)
  To: James Morse; +Cc: linux-arm-kernel, catalin.marinas, will.deacon, linux-kernel

Hi James,

On Fri, Sep 07, 2018 at 10:58:22AM +0100, James Morse wrote:
> On 22/08/18 10:54, Jun Yao wrote:
> >  	WRITE_ONCE(*pmdp, pmd);
> >  	dsb(ishst);
> >  }
> > @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
> >  
> >  static inline void set_pud(pud_t *pudp, pud_t pud)
> >  {
> > +#ifdef __PAGETABLE_PUD_FOLDED
> > +	if (in_swapper_pgdir(pudp)) {
> > +		pud_t *fixmap_pudp;
> > +
> > +		spin_lock(&swapper_pgdir_lock);
> > +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
> 
> This is a bit subtle: are you using the pgd fixmap entry because the path from
> map_mem() uses the other three?
> 
> Using the pgd fix slot for a pud looks a bit strange to me, but its arguably a
> side-effect of the folding.

Yes, it's a side-effect of the folding.

When the CONFIG_PGTABLE_LEVELS == 3, the pud is folded into the pgd. It
means that the pgd is never none and it is also a pud. That's why I use
the pgd fixmap entry.

Maybe write this more clearly:

static inline void set_pud(pud_t *pudp, pud_t pud)
{
#ifdef __PAGETABLE_PUD_FOLDED
	pgd_t *pgdp = (pgd_t *)pudp;

	if (...) {
		pgd_t *fixmap_pgdp;
		pud_t *fixmap_pudp;

		spin_lock(...);
		fixmap_pgdp = pgd_set_fixmap(__pa(pgdp));
		fixmap_pudp = pud_set_fixmap_offset(fixmap_pgdp, 0UL);
		...
	}

Do you have any way to make it look more reasonable?

> I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
> be during paging_init(). What do you think to keeping paging_init()s use of the
> pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
> during paging_init()?

I think the set_pud() should not be called on a 64K/42bit-VA. As only
the level 2 and level 3 page tables are in use. It means that the pmd is
folded into the pud and the pud is never none. So the set_pud() should
not be called.

I think a variable can be introduced to indicate whether paging_init()
has been completed. And decide whether or not to skip the
in_swapper_pgdir() base on the value of it.

I don't know if this is reasonable. What do you think?

Thanks,

Jun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
@ 2018-09-10 11:41       ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-09-10 11:41 UTC (permalink / raw)
  To: linux-arm-kernel

Hi James,

On Fri, Sep 07, 2018 at 10:58:22AM +0100, James Morse wrote:
> On 22/08/18 10:54, Jun Yao wrote:
> >  	WRITE_ONCE(*pmdp, pmd);
> >  	dsb(ishst);
> >  }
> > @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
> >  
> >  static inline void set_pud(pud_t *pudp, pud_t pud)
> >  {
> > +#ifdef __PAGETABLE_PUD_FOLDED
> > +	if (in_swapper_pgdir(pudp)) {
> > +		pud_t *fixmap_pudp;
> > +
> > +		spin_lock(&swapper_pgdir_lock);
> > +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
> 
> This is a bit subtle: are you using the pgd fixmap entry because the path from
> map_mem() uses the other three?
> 
> Using the pgd fix slot for a pud looks a bit strange to me, but its arguably a
> side-effect of the folding.

Yes, it's a side-effect of the folding.

When the CONFIG_PGTABLE_LEVELS == 3, the pud is folded into the pgd. It
means that the pgd is never none and it is also a pud. That's why I use
the pgd fixmap entry.

Maybe write this more clearly:

static inline void set_pud(pud_t *pudp, pud_t pud)
{
#ifdef __PAGETABLE_PUD_FOLDED
	pgd_t *pgdp = (pgd_t *)pudp;

	if (...) {
		pgd_t *fixmap_pgdp;
		pud_t *fixmap_pudp;

		spin_lock(...);
		fixmap_pgdp = pgd_set_fixmap(__pa(pgdp));
		fixmap_pudp = pud_set_fixmap_offset(fixmap_pgdp, 0UL);
		...
	}

Do you have any way to make it look more reasonable?

> I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
> be during paging_init(). What do you think to keeping paging_init()s use of the
> pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
> during paging_init()?

I think the set_pud() should not be called on a 64K/42bit-VA. As only
the level 2 and level 3 page tables are in use. It means that the pmd is
folded into the pud and the pud is never none. So the set_pud() should
not be called.

I think a variable can be introduced to indicate whether paging_init()
has been completed. And decide whether or not to skip the
in_swapper_pgdir() base on the value of it.

I don't know if this is reasonable. What do you think?

Thanks,

Jun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
  2018-09-07  9:58     ` James Morse
@ 2018-09-13 10:50       ` Jun Yao
  -1 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-09-13 10:50 UTC (permalink / raw)
  To: James Morse; +Cc: linux-arm-kernel, catalin.marinas, will.deacon, linux-kernel

Hi James,

On Fri, Sep 07, 2018 at 10:58:22AM +0100, James Morse wrote:
> On 22/08/18 10:54, Jun Yao wrote:
> >  	WRITE_ONCE(*pmdp, pmd);
> >  	dsb(ishst);
> >  }
> > @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
> >  
> >  static inline void set_pud(pud_t *pudp, pud_t pud)
> >  {
> > +#ifdef __PAGETABLE_PUD_FOLDED
> > +	if (in_swapper_pgdir(pudp)) {
> > +		pud_t *fixmap_pudp;
> > +
> > +		spin_lock(&swapper_pgdir_lock);
> > +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
>
> I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
> be during paging_init(). What do you think to keeping paging_init()s use of the
> pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
> during paging_init()?

I find that the __create_pgd_mapping() is used to set up the page table
during paging_init(). And there are six functions calling it with
different pgdps:

update_mapping_prot()		init_mm.pgd(swapper_pg_dir)
create_mapping_noalloc()	init_mm.pgd(swapper_pg_dir)
__map_memblock()		pgdp(swapper_pg_dir)
map_kernel_segment()		pgdp(swapper_pg_dir)
create_pgd_mapping()		!(init_mm.pgd)
map_entry_trampoline()		tramp_pg_dir

In order to skip the in_swapper_pgdir() test during paging_init(), We
need a way to determine if we are currently in paging_init(). The way I
can think of is to create a function similar to __create_pgd_mapping().
And it is used to create the page table during paging_init(). It differs
from the __create_pgd_mapping() only in that it calls
p?d_populate_without_test(). However, in this way, I'm worried that I am
reinventing the wheel.

Thanks,

Jun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
@ 2018-09-13 10:50       ` Jun Yao
  0 siblings, 0 replies; 34+ messages in thread
From: Jun Yao @ 2018-09-13 10:50 UTC (permalink / raw)
  To: linux-arm-kernel

Hi James,

On Fri, Sep 07, 2018 at 10:58:22AM +0100, James Morse wrote:
> On 22/08/18 10:54, Jun Yao wrote:
> >  	WRITE_ONCE(*pmdp, pmd);
> >  	dsb(ishst);
> >  }
> > @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
> >  
> >  static inline void set_pud(pud_t *pudp, pud_t pud)
> >  {
> > +#ifdef __PAGETABLE_PUD_FOLDED
> > +	if (in_swapper_pgdir(pudp)) {
> > +		pud_t *fixmap_pudp;
> > +
> > +		spin_lock(&swapper_pgdir_lock);
> > +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
>
> I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
> be during paging_init(). What do you think to keeping paging_init()s use of the
> pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
> during paging_init()?

I find that the __create_pgd_mapping() is used to set up the page table
during paging_init(). And there are six functions calling it with
different pgdps:

update_mapping_prot()		init_mm.pgd(swapper_pg_dir)
create_mapping_noalloc()	init_mm.pgd(swapper_pg_dir)
__map_memblock()		pgdp(swapper_pg_dir)
map_kernel_segment()		pgdp(swapper_pg_dir)
create_pgd_mapping()		!(init_mm.pgd)
map_entry_trampoline()		tramp_pg_dir

In order to skip the in_swapper_pgdir() test during paging_init(), We
need a way to determine if we are currently in paging_init(). The way I
can think of is to create a function similar to __create_pgd_mapping().
And it is used to create the page table during paging_init(). It differs
from the __create_pgd_mapping() only in that it calls
p?d_populate_without_test(). However, in this way, I'm worried that I am
reinventing the wheel.

Thanks,

Jun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
  2018-09-13 10:50       ` Jun Yao
@ 2018-09-14  8:38         ` James Morse
  -1 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-14  8:38 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, linux-kernel

Hi Jun,

On 13/09/18 11:50, Jun Yao wrote:
> On Fri, Sep 07, 2018 at 10:58:22AM +0100, James Morse wrote:
>> On 22/08/18 10:54, Jun Yao wrote:
>>>  	WRITE_ONCE(*pmdp, pmd);
>>>  	dsb(ishst);
>>>  }
>>> @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
>>>  
>>>  static inline void set_pud(pud_t *pudp, pud_t pud)
>>>  {
>>> +#ifdef __PAGETABLE_PUD_FOLDED
>>> +	if (in_swapper_pgdir(pudp)) {
>>> +		pud_t *fixmap_pudp;
>>> +
>>> +		spin_lock(&swapper_pgdir_lock);
>>> +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
>>
>> I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
>> be during paging_init(). What do you think to keeping paging_init()s use of the
>> pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
>> during paging_init()?
> 
> I find that the __create_pgd_mapping() is used to set up the page table
> during paging_init(). And there are six functions calling it with
> different pgdps:
> 
> update_mapping_prot()		init_mm.pgd(swapper_pg_dir)
> create_mapping_noalloc()	init_mm.pgd(swapper_pg_dir)
> __map_memblock()		pgdp(swapper_pg_dir)
> map_kernel_segment()		pgdp(swapper_pg_dir)
> create_pgd_mapping()		!(init_mm.pgd)
> map_entry_trampoline()		tramp_pg_dir
> 
> In order to skip the in_swapper_pgdir() test during paging_init(), We
> need a way to determine if we are currently in paging_init().

We don't need to know paging_init() is the caller, we just want to fool
in_swapper_pgdir() into not matching the address. Using pgd_set_fixmap() in
paging_init(), as we do today would do this, as the value passed to
map_kernel()/map_mem() would never match swapper_pg_dir().

(this is just to stop unnecessary tlbi for every write to swapper_pg_dir, as we
don't need to do that during early boot when it isn't loaded)


> The way I
> can think of is to create a function similar to __create_pgd_mapping().
> And it is used to create the page table during paging_init(). It differs
> from the __create_pgd_mapping() only in that it calls
> p?d_populate_without_test(). However, in this way, I'm worried that I am
> reinventing the wheel.

I agree duplicating the code doesn't sound good.

Something like this? (barely tested):
----------------%<----------------
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 691a05bbf87b..64ba422482cc 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -644,8 +690,12 @@ void __init set_init_mm_pgd(pgd_t *pgd)
  */
 void __init paging_init(void)
 {
-       map_kernel(swapper_pg_dir);
-       map_mem(swapper_pg_dir);
+       pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
+
+       map_kernel(pgdp);
+       map_mem(pgdp);
+       pgd_clear_fixmap();
+
        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
        set_init_mm_pgd(swapper_pg_dir);
 }
----------------%<----------------


This reduced the pgd-fixmap setup/teardown calls during boot from 68 to 2...


Thanks,

James

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
@ 2018-09-14  8:38         ` James Morse
  0 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-14  8:38 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jun,

On 13/09/18 11:50, Jun Yao wrote:
> On Fri, Sep 07, 2018 at 10:58:22AM +0100, James Morse wrote:
>> On 22/08/18 10:54, Jun Yao wrote:
>>>  	WRITE_ONCE(*pmdp, pmd);
>>>  	dsb(ishst);
>>>  }
>>> @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
>>>  
>>>  static inline void set_pud(pud_t *pudp, pud_t pud)
>>>  {
>>> +#ifdef __PAGETABLE_PUD_FOLDED
>>> +	if (in_swapper_pgdir(pudp)) {
>>> +		pud_t *fixmap_pudp;
>>> +
>>> +		spin_lock(&swapper_pgdir_lock);
>>> +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
>>
>> I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
>> be during paging_init(). What do you think to keeping paging_init()s use of the
>> pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
>> during paging_init()?
> 
> I find that the __create_pgd_mapping() is used to set up the page table
> during paging_init(). And there are six functions calling it with
> different pgdps:
> 
> update_mapping_prot()		init_mm.pgd(swapper_pg_dir)
> create_mapping_noalloc()	init_mm.pgd(swapper_pg_dir)
> __map_memblock()		pgdp(swapper_pg_dir)
> map_kernel_segment()		pgdp(swapper_pg_dir)
> create_pgd_mapping()		!(init_mm.pgd)
> map_entry_trampoline()		tramp_pg_dir
> 
> In order to skip the in_swapper_pgdir() test during paging_init(), We
> need a way to determine if we are currently in paging_init().

We don't need to know paging_init() is the caller, we just want to fool
in_swapper_pgdir() into not matching the address. Using pgd_set_fixmap() in
paging_init(), as we do today would do this, as the value passed to
map_kernel()/map_mem() would never match swapper_pg_dir().

(this is just to stop unnecessary tlbi for every write to swapper_pg_dir, as we
don't need to do that during early boot when it isn't loaded)


> The way I
> can think of is to create a function similar to __create_pgd_mapping().
> And it is used to create the page table during paging_init(). It differs
> from the __create_pgd_mapping() only in that it calls
> p?d_populate_without_test(). However, in this way, I'm worried that I am
> reinventing the wheel.

I agree duplicating the code doesn't sound good.

Something like this? (barely tested):
----------------%<----------------
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 691a05bbf87b..64ba422482cc 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -644,8 +690,12 @@ void __init set_init_mm_pgd(pgd_t *pgd)
  */
 void __init paging_init(void)
 {
-       map_kernel(swapper_pg_dir);
-       map_mem(swapper_pg_dir);
+       pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
+
+       map_kernel(pgdp);
+       map_mem(pgdp);
+       pgd_clear_fixmap();
+
        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
        set_init_mm_pgd(swapper_pg_dir);
 }
----------------%<----------------


This reduced the pgd-fixmap setup/teardown calls during boot from 68 to 2...


Thanks,

James

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
  2018-09-10 11:41       ` Jun Yao
@ 2018-09-14  8:44         ` James Morse
  -1 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-14  8:44 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: catalin.marinas, will.deacon, linux-kernel

Hi Jun,

On 10/09/18 12:41, Jun Yao wrote:
> On Fri, Sep 07, 2018 at 10:58:22AM +0100, James Morse wrote:
>> On 22/08/18 10:54, Jun Yao wrote:
>>>  	WRITE_ONCE(*pmdp, pmd);
>>>  	dsb(ishst);
>>>  }
>>> @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
>>>  
>>>  static inline void set_pud(pud_t *pudp, pud_t pud)
>>>  {
>>> +#ifdef __PAGETABLE_PUD_FOLDED
>>> +	if (in_swapper_pgdir(pudp)) {
>>> +		pud_t *fixmap_pudp;
>>> +
>>> +		spin_lock(&swapper_pgdir_lock);
>>> +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
>>
>> This is a bit subtle: are you using the pgd fixmap entry because the path from
>> map_mem() uses the other three?
>>
>> Using the pgd fix slot for a pud looks a bit strange to me, but its arguably a
>> side-effect of the folding.
> 
> Yes, it's a side-effect of the folding.
> 
> When the CONFIG_PGTABLE_LEVELS == 3, the pud is folded into the pgd. It
> means that the pgd is never none and it is also a pud. That's why I use
> the pgd fixmap entry.
> 
> Maybe write this more clearly:
> 
> static inline void set_pud(pud_t *pudp, pud_t pud)
> {
> #ifdef __PAGETABLE_PUD_FOLDED
> 	pgd_t *pgdp = (pgd_t *)pudp;
> 
> 	if (...) {
> 		pgd_t *fixmap_pgdp;
> 		pud_t *fixmap_pudp;
> 
> 		spin_lock(...);
> 		fixmap_pgdp = pgd_set_fixmap(__pa(pgdp));
> 		fixmap_pudp = pud_set_fixmap_offset(fixmap_pgdp, 0UL);

Using two fixmap entries is excessive, this is behind __PAGETABLE_PUD_FOLDED, so
we should know what is going on.

(The folding confuses me every time I look at it)


> Do you have any way to make it look more reasonable?

I'm just reacting to a function with 'pud' in the name, that takes two pud's as
arguments, using the pgd fixmap slot. I think its fine to leave it like this, as
in_swapper_pg_dir() has told us this is the pgd we're dealing with, it just
looks funny.


>> I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
>> be during paging_init(). What do you think to keeping paging_init()s use of the
>> pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
>> during paging_init()?
> 
> I think the set_pud() should not be called on a 64K/42bit-VA. As only
> the level 2 and level 3 page tables are in use. It means that the pmd is
> folded into the pud and the pud is never none. So the set_pud() should
> not be called.

(yes, sorry, it was just the one I picked on!)


> I think a variable can be introduced to indicate whether paging_init()
> has been completed. And decide whether or not to skip the
> in_swapper_pgdir() base on the value of it.
> 
> I don't know if this is reasonable. What do you think?

I think we can just trick in_swapper_pgdir(), this code only runs once, and its
already in a very strange environment.


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap.
@ 2018-09-14  8:44         ` James Morse
  0 siblings, 0 replies; 34+ messages in thread
From: James Morse @ 2018-09-14  8:44 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jun,

On 10/09/18 12:41, Jun Yao wrote:
> On Fri, Sep 07, 2018 at 10:58:22AM +0100, James Morse wrote:
>> On 22/08/18 10:54, Jun Yao wrote:
>>>  	WRITE_ONCE(*pmdp, pmd);
>>>  	dsb(ishst);
>>>  }
>>> @@ -480,6 +511,19 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
>>>  
>>>  static inline void set_pud(pud_t *pudp, pud_t pud)
>>>  {
>>> +#ifdef __PAGETABLE_PUD_FOLDED
>>> +	if (in_swapper_pgdir(pudp)) {
>>> +		pud_t *fixmap_pudp;
>>> +
>>> +		spin_lock(&swapper_pgdir_lock);
>>> +		fixmap_pudp = (pud_t *)pgd_set_fixmap(__pa(pudp));
>>
>> This is a bit subtle: are you using the pgd fixmap entry because the path from
>> map_mem() uses the other three?
>>
>> Using the pgd fix slot for a pud looks a bit strange to me, but its arguably a
>> side-effect of the folding.
> 
> Yes, it's a side-effect of the folding.
> 
> When the CONFIG_PGTABLE_LEVELS == 3, the pud is folded into the pgd. It
> means that the pgd is never none and it is also a pud. That's why I use
> the pgd fixmap entry.
> 
> Maybe write this more clearly:
> 
> static inline void set_pud(pud_t *pudp, pud_t pud)
> {
> #ifdef __PAGETABLE_PUD_FOLDED
> 	pgd_t *pgdp = (pgd_t *)pudp;
> 
> 	if (...) {
> 		pgd_t *fixmap_pgdp;
> 		pud_t *fixmap_pudp;
> 
> 		spin_lock(...);
> 		fixmap_pgdp = pgd_set_fixmap(__pa(pgdp));
> 		fixmap_pudp = pud_set_fixmap_offset(fixmap_pgdp, 0UL);

Using two fixmap entries is excessive, this is behind __PAGETABLE_PUD_FOLDED, so
we should know what is going on.

(The folding confuses me every time I look at it)


> Do you have any way to make it look more reasonable?

I'm just reacting to a function with 'pud' in the name, that takes two pud's as
arguments, using the pgd fixmap slot. I think its fine to leave it like this, as
in_swapper_pg_dir() has told us this is the pgd we're dealing with, it just
looks funny.


>> I see this called 68 times during boot on a 64K/42bit-VA, 65 of which appear to
>> be during paging_init(). What do you think to keeping paging_init()s use of the
>> pgd fixmap for swapper_pg_dir, deliberately to skip the in_swapper_pgdir() test
>> during paging_init()?
> 
> I think the set_pud() should not be called on a 64K/42bit-VA. As only
> the level 2 and level 3 page tables are in use. It means that the pmd is
> folded into the pud and the pud is never none. So the set_pud() should
> not be called.

(yes, sorry, it was just the one I picked on!)


> I think a variable can be introduced to indicate whether paging_init()
> has been completed. And decide whether or not to skip the
> in_swapper_pgdir() base on the value of it.
> 
> I don't know if this is reasonable. What do you think?

I think we can just trick in_swapper_pgdir(), this code only runs once, and its
already in a very strange environment.


Thanks,

James

^ permalink raw reply	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2018-09-14  8:44 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-22  9:54 [RESEND PATCH v4 0/6] arm64/mm: Move swapper_pg_dir to rodata Jun Yao
2018-08-22  9:54 ` Jun Yao
2018-08-22  9:54 ` [RESEND PATCH v4 1/6] arm64/mm: Introduce the init_pg_dir Jun Yao
2018-08-22  9:54   ` Jun Yao
2018-09-07  9:57   ` James Morse
2018-09-07  9:57     ` James Morse
2018-08-22  9:54 ` [RESEND PATCH v4 2/6] arm64/mm: Pass ttbr1 as a parameter to __enable_mmu() Jun Yao
2018-08-22  9:54   ` Jun Yao
2018-09-07  9:57   ` James Morse
2018-09-07  9:57     ` James Morse
2018-08-22  9:54 ` [RESEND PATCH v4 3/6] arm64/mm: Create the initial page table in the init_pg_dir Jun Yao
2018-08-22  9:54   ` Jun Yao
2018-09-07  9:57   ` James Morse
2018-09-07  9:57     ` James Morse
2018-08-22  9:54 ` [RESEND PATCH v4 4/6] arm64/mm: Create the final page table directly in swapper_pg_dir Jun Yao
2018-08-22  9:54   ` Jun Yao
2018-09-07  9:57   ` James Morse
2018-09-07  9:57     ` James Morse
2018-08-22  9:54 ` [RESEND PATCH v4 5/6] arm64/mm: Populate the swapper_pg_dir by fixmap Jun Yao
2018-08-22  9:54   ` Jun Yao
2018-09-07  9:58   ` James Morse
2018-09-07  9:58     ` James Morse
2018-09-10 11:41     ` Jun Yao
2018-09-10 11:41       ` Jun Yao
2018-09-14  8:44       ` James Morse
2018-09-14  8:44         ` James Morse
2018-09-13 10:50     ` Jun Yao
2018-09-13 10:50       ` Jun Yao
2018-09-14  8:38       ` James Morse
2018-09-14  8:38         ` James Morse
2018-08-22  9:54 ` [RESEND PATCH v4 6/6] arm64/mm: Move {idmap_pg_dir .. swapper_pg_dir} to rodata section Jun Yao
2018-08-22  9:54   ` Jun Yao
2018-09-07  9:57 ` [RESEND PATCH v4 0/6] arm64/mm: Move swapper_pg_dir to rodata James Morse
2018-09-07  9:57   ` James Morse

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.