linux-hardening.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: linux-hardening@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>,
	Marc Zyngier <maz@kernel.org>, Will Deacon <will@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Kees Cook <keescook@chromium.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Mark Brown <broonie@kernel.org>,
	Anshuman Khandual <anshuman.khandual@arm.com>
Subject: [PATCH v4 07/26] arm64: head: split off idmap creation code
Date: Mon, 13 Jun 2022 16:45:31 +0200	[thread overview]
Message-ID: <20220613144550.3760857-8-ardb@kernel.org> (raw)
In-Reply-To: <20220613144550.3760857-1-ardb@kernel.org>

Split off the creation of the ID map page tables, so that we can avoid
running it again unnecessarily when KASLR is in effect (which only
randomizes the virtual placement). This will permit us to drop some
explicit cache maintenance to the PoC which was necessary because the
cache invalidation being performed on some global variables might
otherwise clobber unrelated variables that happen to share a cacheline.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/kernel/head.S | 101 ++++++++++----------
 1 file changed, 52 insertions(+), 49 deletions(-)

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index eb54c0289c8a..1cbc52097bf9 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -84,7 +84,7 @@
 	 *  Register   Scope                      Purpose
 	 *  x21        primary_entry() .. start_kernel()        FDT pointer passed at boot in x0
 	 *  x23        primary_entry() .. start_kernel()        physical misalignment/KASLR offset
-	 *  x28        __create_page_tables()                   callee preserved temp register
+	 *  x28        clear_page_tables()                      callee preserved temp register
 	 *  x19/x20    __primary_switch()                       callee preserved temp registers
 	 *  x24        __primary_switch() .. relocate_kernel()  current RELR displacement
 	 */
@@ -94,7 +94,10 @@ SYM_CODE_START(primary_entry)
 	adrp	x23, __PHYS_OFFSET
 	and	x23, x23, MIN_KIMG_ALIGN - 1	// KASLR offset, defaults to 0
 	bl	set_cpu_boot_mode_flag
-	bl	__create_page_tables
+	bl	clear_page_tables
+	bl	create_idmap
+	bl	create_kernel_mapping
+
 	/*
 	 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
 	 * details.
@@ -122,6 +125,35 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
 	b	dcache_inval_poc		// tail call
 SYM_CODE_END(preserve_boot_args)
 
+SYM_FUNC_START_LOCAL(clear_page_tables)
+	mov	x28, lr
+
+	/*
+	 * Invalidate the init page tables to avoid potential dirty cache lines
+	 * being evicted. Other page tables are allocated in rodata as part of
+	 * the kernel image, and thus are clean to the PoC per the boot
+	 * protocol.
+	 */
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	bl	dcache_inval_poc
+
+	/*
+	 * Clear the init page tables.
+	 */
+	adrp	x0, init_pg_dir
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+1:	stp	xzr, xzr, [x0], #16
+	stp	xzr, xzr, [x0], #16
+	stp	xzr, xzr, [x0], #16
+	stp	xzr, xzr, [x0], #16
+	subs	x1, x1, #64
+	b.ne	1b
+
+	ret	x28
+SYM_FUNC_END(clear_page_tables)
+
 /*
  * Macro to populate page table entries, these entries can be pointers to the next level
  * or last level entries pointing to physical memory.
@@ -231,44 +263,8 @@ SYM_CODE_END(preserve_boot_args)
 	populate_entries \tbl, \rtbl, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
 	.endm
 
-/*
- * Setup the initial page tables. We only setup the barest amount which is
- * required to get the kernel running. The following sections are required:
- *   - identity mapping to enable the MMU (low address, TTBR0)
- *   - first few MB of the kernel linear mapping to jump to once the MMU has
- *     been enabled
- */
-SYM_FUNC_START_LOCAL(__create_page_tables)
-	mov	x28, lr
 
-	/*
-	 * Invalidate the init page tables to avoid potential dirty cache lines
-	 * being evicted. Other page tables are allocated in rodata as part of
-	 * the kernel image, and thus are clean to the PoC per the boot
-	 * protocol.
-	 */
-	adrp	x0, init_pg_dir
-	adrp	x1, init_pg_end
-	bl	dcache_inval_poc
-
-	/*
-	 * Clear the init page tables.
-	 */
-	adrp	x0, init_pg_dir
-	adrp	x1, init_pg_end
-	sub	x1, x1, x0
-1:	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	stp	xzr, xzr, [x0], #16
-	subs	x1, x1, #64
-	b.ne	1b
-
-	mov	x7, SWAPPER_MM_MMUFLAGS
-
-	/*
-	 * Create the identity mapping.
-	 */
+SYM_FUNC_START_LOCAL(create_idmap)
 	adrp	x0, idmap_pg_dir
 	adrp	x3, __idmap_text_start		// __pa(__idmap_text_start)
 
@@ -319,12 +315,23 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	 */
 #endif
 	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
+	mov	x7, SWAPPER_MM_MMUFLAGS
 
 	map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
 
 	/*
-	 * Map the kernel image (starting with PHYS_OFFSET).
+	 * Since the page tables have been populated with non-cacheable
+	 * accesses (MMU disabled), invalidate those tables again to
+	 * remove any speculatively loaded cache lines.
 	 */
+	dmb	sy
+
+	adrp	x0, idmap_pg_dir
+	adrp	x1, idmap_pg_end
+	b	dcache_inval_poc		// tail call
+SYM_FUNC_END(create_idmap)
+
+SYM_FUNC_START_LOCAL(create_kernel_mapping)
 	adrp	x0, init_pg_dir
 	mov_q	x5, KIMAGE_VADDR		// compile time __va(_text)
 	add	x5, x5, x23			// add KASLR displacement
@@ -332,6 +339,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	adrp	x3, _text			// runtime __pa(_text)
 	sub	x6, x6, x3			// _end - _text
 	add	x6, x6, x5			// runtime __va(_end)
+	mov	x7, SWAPPER_MM_MMUFLAGS
 
 	map_memory x0, x1, x5, x6, x7, x3, (VA_BITS - PGDIR_SHIFT), x10, x11, x12, x13, x14
 
@@ -342,16 +350,10 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	 */
 	dmb	sy
 
-	adrp	x0, idmap_pg_dir
-	adrp	x1, idmap_pg_end
-	bl	dcache_inval_poc
-
 	adrp	x0, init_pg_dir
 	adrp	x1, init_pg_end
-	bl	dcache_inval_poc
-
-	ret	x28
-SYM_FUNC_END(__create_page_tables)
+	b	dcache_inval_poc		// tail call
+SYM_FUNC_END(create_kernel_mapping)
 
 	/*
 	 * Initialize CPU registers with task-specific and cpu-specific context.
@@ -836,7 +838,8 @@ SYM_FUNC_START_LOCAL(__primary_switch)
 	pre_disable_mmu_workaround
 	msr	sctlr_el1, x20			// disable the MMU
 	isb
-	bl	__create_page_tables		// recreate kernel mapping
+	bl	clear_page_tables
+	bl	create_kernel_mapping		// Recreate kernel mapping
 
 	tlbi	vmalle1				// Remove any stale TLB entries
 	dsb	nsh
-- 
2.30.2


  parent reply	other threads:[~2022-06-13 18:31 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-13 14:45 [PATCH v4 00/26] arm64: refactor boot flow and add support for WXN Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 01/26] arm64: head: move kimage_vaddr variable into C file Ard Biesheuvel
2022-06-14  8:26   ` Anshuman Khandual
2022-06-13 14:45 ` [PATCH v4 02/26] arm64: mm: make vabits_actual a build time constant if possible Ard Biesheuvel
2022-06-14  8:25   ` Anshuman Khandual
2022-06-14  8:34     ` Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 03/26] arm64: head: move assignment of idmap_t0sz to C code Ard Biesheuvel
2022-06-14  9:22   ` Anshuman Khandual
2022-06-14  9:34     ` Ard Biesheuvel
2022-06-24 12:36   ` Will Deacon
2022-06-24 12:57     ` Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 04/26] arm64: head: drop idmap_ptrs_per_pgd Ard Biesheuvel
2022-06-15  4:07   ` Anshuman Khandual
2022-06-13 14:45 ` [PATCH v4 05/26] arm64: head: simplify page table mapping macros (slightly) Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 06/26] arm64: head: switch to map_memory macro for the extended ID map Ard Biesheuvel
2022-06-13 14:45 ` Ard Biesheuvel [this message]
2022-06-13 14:45 ` [PATCH v4 08/26] arm64: kernel: drop unnecessary PoC cache clean+invalidate Ard Biesheuvel
2022-06-15  4:32   ` Anshuman Khandual
2022-06-13 14:45 ` [PATCH v4 09/26] arm64: head: pass ID map root table address to __enable_mmu() Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 10/26] arm64: mm: provide idmap pointer to cpu_replace_ttbr1() Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 11/26] arm64: head: add helper function to remap regions in early page tables Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 12/26] arm64: head: cover entire kernel image in initial ID map Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 13/26] arm64: head: use relative references to the RELA and RELR tables Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 14/26] arm64: head: create a temporary FDT mapping in the initial ID map Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 15/26] arm64: idreg-override: use early FDT mapping in " Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 16/26] arm64: head: factor out TTBR1 assignment into a macro Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 17/26] arm64: head: populate kernel page tables with MMU and caches on Ard Biesheuvel
2022-06-24 12:56   ` Will Deacon
2022-06-24 13:07     ` Ard Biesheuvel
2022-06-24 13:29       ` Will Deacon
2022-06-24 14:07         ` Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 18/26] arm64: head: record CPU boot mode after enabling the MMU Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 19/26] arm64: kaslr: defer initialization to late initcall where permitted Ard Biesheuvel
2022-06-24 13:08   ` Will Deacon
2022-06-24 13:09     ` Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 20/26] arm64: head: avoid relocating the kernel twice for KASLR Ard Biesheuvel
2022-06-24 13:16   ` Will Deacon
2022-06-24 13:17     ` Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 21/26] arm64: setup: drop early FDT pointer helpers Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 22/26] arm64: mm: move ro_after_init section into the data segment Ard Biesheuvel
2022-06-13 17:00   ` Kees Cook
2022-06-13 17:16     ` Ard Biesheuvel
2022-06-13 23:38       ` Kees Cook
2022-06-16 11:31         ` Ard Biesheuvel
2022-06-16 16:18           ` Kees Cook
2022-06-16 16:31             ` Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 23/26] arm64: head: remap the kernel text/inittext region read-only Ard Biesheuvel
2022-06-13 16:57   ` Kees Cook
2022-06-13 14:45 ` [PATCH v4 24/26] mm: add arch hook to validate mmap() prot flags Ard Biesheuvel
2022-06-13 16:37   ` Kees Cook
2022-06-13 16:44     ` Ard Biesheuvel
2022-06-13 14:45 ` [PATCH v4 25/26] arm64: mm: add support for WXN memory translation attribute Ard Biesheuvel
2022-06-13 16:51   ` Kees Cook
2022-06-13 14:45 ` [PATCH v4 26/26] arm64: kernel: move ID map out of .text mapping Ard Biesheuvel
2022-06-13 16:52   ` Kees Cook
2022-06-24 13:19 ` [PATCH v4 00/26] arm64: refactor boot flow and add support for WXN Will Deacon
2022-06-24 14:40   ` Ard Biesheuvel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220613144550.3760857-8-ardb@kernel.org \
    --to=ardb@kernel.org \
    --cc=anshuman.khandual@arm.com \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=keescook@chromium.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-hardening@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).