linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v13 01/18] arm64: hyp-stub: Check the size of the HYP stub's vectors
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 02/18] arm64: hyp-stub: Move invalid vector entries into the vectors Pavel Tatashin
                   ` (14 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

From: James Morse <james.morse@arm.com>

Hibernate contains a set of temporary EL2 vectors used to 'park'
EL2 somewhere safe while all the memory is thrown in the air.
Making kexec do its relocations with the MMU on means they have to
be done at EL1, so EL2 has to be parked. This means yet another
set of vectors.

All these things do is HVC_SET_VECTORS and HVC_SOFT_RESTART, both
of which are implemented by the hyp-stub. Lets copy it instead
of re-inventing it.

To do this the hyp-stub's entrails need to be packed neatly inside
its 2K vectors.

Start by moving the final 2K alignment inside the end marker, and
add a build check that we didn't overflow 2K.

Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/kernel/hyp-stub.S | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 5eccbd62fec8..572b28646005 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -41,9 +41,13 @@ SYM_CODE_START(__hyp_stub_vectors)
 	ventry	el1_irq_invalid			// IRQ 32-bit EL1
 	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
 	ventry	el1_error_invalid		// Error 32-bit EL1
+	.align 11
+SYM_INNER_LABEL(__hyp_stub_vectors_end, SYM_L_LOCAL)
 SYM_CODE_END(__hyp_stub_vectors)
 
-	.align 11
+# Check the __hyp_stub_vectors didn't overflow
+.org . - (__hyp_stub_vectors_end - __hyp_stub_vectors) + SZ_2K
+
 
 SYM_CODE_START_LOCAL(el1_sync)
 	cmp	x0, #HVC_SET_VECTORS
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 02/18] arm64: hyp-stub: Move invalid vector entries into the vectors
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
  2021-04-08  4:05 ` [PATCH v13 01/18] arm64: hyp-stub: Check the size of the HYP stub's vectors Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync " Pavel Tatashin
                   ` (13 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

From: James Morse <james.morse@arm.com>

Most of the hyp-stub's vector entries are invalid. These are each
a unique function that branches to itself. To move these into the
vectors, merge the ventry and invalid_vector macros and give each
one a unique name.

This means we can copy the hyp-stub as it is self contained within
its vectors.

Signed-off-by: James Morse <james.morse@arm.com>

[Fixed merging issues]

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/kernel/hyp-stub.S | 56 +++++++++++++++---------------------
 1 file changed, 23 insertions(+), 33 deletions(-)

diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 572b28646005..ff329c5c074d 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -16,31 +16,38 @@
 #include <asm/ptrace.h>
 #include <asm/virt.h>
 
+.macro invalid_vector	label
+SYM_CODE_START_LOCAL(\label)
+	.align 7
+	b	\label
+SYM_CODE_END(\label)
+.endm
+
 	.text
 	.pushsection	.hyp.text, "ax"
 
 	.align 11
 
 SYM_CODE_START(__hyp_stub_vectors)
-	ventry	el2_sync_invalid		// Synchronous EL2t
-	ventry	el2_irq_invalid			// IRQ EL2t
-	ventry	el2_fiq_invalid			// FIQ EL2t
-	ventry	el2_error_invalid		// Error EL2t
+	invalid_vector	hyp_stub_el2t_sync_invalid	// Synchronous EL2t
+	invalid_vector	hyp_stub_el2t_irq_invalid	// IRQ EL2t
+	invalid_vector	hyp_stub_el2t_fiq_invalid	// FIQ EL2t
+	invalid_vector	hyp_stub_el2t_error_invalid	// Error EL2t
 
-	ventry	el2_sync_invalid		// Synchronous EL2h
-	ventry	el2_irq_invalid			// IRQ EL2h
-	ventry	el2_fiq_invalid			// FIQ EL2h
-	ventry	el2_error_invalid		// Error EL2h
+	invalid_vector	hyp_stub_el2h_sync_invalid	// Synchronous EL2h
+	invalid_vector	hyp_stub_el2h_irq_invalid	// IRQ EL2h
+	invalid_vector	hyp_stub_el2h_fiq_invalid	// FIQ EL2h
+	invalid_vector	hyp_stub_el2h_error_invalid	// Error EL2h
 
 	ventry	el1_sync			// Synchronous 64-bit EL1
-	ventry	el1_irq_invalid			// IRQ 64-bit EL1
-	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
-	ventry	el1_error_invalid		// Error 64-bit EL1
-
-	ventry	el1_sync_invalid		// Synchronous 32-bit EL1
-	ventry	el1_irq_invalid			// IRQ 32-bit EL1
-	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
-	ventry	el1_error_invalid		// Error 32-bit EL1
+	invalid_vector	hyp_stub_el1_irq_invalid	// IRQ 64-bit EL1
+	invalid_vector	hyp_stub_el1_fiq_invalid	// FIQ 64-bit EL1
+	invalid_vector	hyp_stub_el1_error_invalid	// Error 64-bit EL1
+
+	invalid_vector	hyp_stub_32b_el1_sync_invalid	// Synchronous 32-bit EL1
+	invalid_vector	hyp_stub_32b_el1_irq_invalid	// IRQ 32-bit EL1
+	invalid_vector	hyp_stub_32b_el1_fiq_invalid	// FIQ 32-bit EL1
+	invalid_vector	hyp_stub_32b_el1_error_invalid	// Error 32-bit EL1
 	.align 11
 SYM_INNER_LABEL(__hyp_stub_vectors_end, SYM_L_LOCAL)
 SYM_CODE_END(__hyp_stub_vectors)
@@ -173,23 +180,6 @@ SYM_CODE_END(enter_vhe)
 
 	.popsection
 
-.macro invalid_vector	label
-SYM_CODE_START_LOCAL(\label)
-	b \label
-SYM_CODE_END(\label)
-.endm
-
-	invalid_vector	el2_sync_invalid
-	invalid_vector	el2_irq_invalid
-	invalid_vector	el2_fiq_invalid
-	invalid_vector	el2_error_invalid
-	invalid_vector	el1_sync_invalid
-	invalid_vector	el1_irq_invalid
-	invalid_vector	el1_fiq_invalid
-	invalid_vector	el1_error_invalid
-
-	.popsection
-
 /*
  * __hyp_set_vectors: Call this after boot to set the initial hypervisor
  * vectors as part of hypervisor installation.  On an SMP system, this should
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync into the vectors
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
  2021-04-08  4:05 ` [PATCH v13 01/18] arm64: hyp-stub: Check the size of the HYP stub's vectors Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 02/18] arm64: hyp-stub: Move invalid vector entries into the vectors Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08 10:24   ` Marc Zyngier
  2021-04-08  4:05 ` [PATCH v13 04/18] arm64: kernel: add helper for booted at EL2 and not VHE Pavel Tatashin
                   ` (12 subsequent siblings)
  15 siblings, 1 reply; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

From: James Morse <james.morse@arm.com>

The hyp-stub's el1_sync code doesn't do very much, this can easily fit
in the vectors.

With this, all of the hyp-stubs behaviour is contained in its vectors.
This lets kexec and hibernate copy the hyp-stub when they need its
behaviour, instead of re-implementing it.

Signed-off-by: James Morse <james.morse@arm.com>

[Fixed merging issues]

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/kernel/hyp-stub.S | 59 ++++++++++++++++++------------------
 1 file changed, 29 insertions(+), 30 deletions(-)

diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index ff329c5c074d..d1a73d0f74e0 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -21,6 +21,34 @@ SYM_CODE_START_LOCAL(\label)
 	.align 7
 	b	\label
 SYM_CODE_END(\label)
+.endm
+
+.macro hyp_stub_el1_sync
+SYM_CODE_START_LOCAL(hyp_stub_el1_sync)
+	.align 7
+	cmp	x0, #HVC_SET_VECTORS
+	b.ne	2f
+	msr	vbar_el2, x1
+	b	9f
+
+2:	cmp	x0, #HVC_SOFT_RESTART
+	b.ne	3f
+	mov	x0, x2
+	mov	x2, x4
+	mov	x4, x1
+	mov	x1, x3
+	br	x4				// no return
+
+3:	cmp	x0, #HVC_RESET_VECTORS
+	beq	9f				// Nothing to reset!
+
+	/* Someone called kvm_call_hyp() against the hyp-stub... */
+	mov_q	x0, HVC_STUB_ERR
+	eret
+
+9:	mov	x0, xzr
+	eret
+SYM_CODE_END(hyp_stub_el1_sync)
 .endm
 
 	.text
@@ -39,7 +67,7 @@ SYM_CODE_START(__hyp_stub_vectors)
 	invalid_vector	hyp_stub_el2h_fiq_invalid	// FIQ EL2h
 	invalid_vector	hyp_stub_el2h_error_invalid	// Error EL2h
 
-	ventry	el1_sync			// Synchronous 64-bit EL1
+	hyp_stub_el1_sync				// Synchronous 64-bit EL1
 	invalid_vector	hyp_stub_el1_irq_invalid	// IRQ 64-bit EL1
 	invalid_vector	hyp_stub_el1_fiq_invalid	// FIQ 64-bit EL1
 	invalid_vector	hyp_stub_el1_error_invalid	// Error 64-bit EL1
@@ -55,35 +83,6 @@ SYM_CODE_END(__hyp_stub_vectors)
 # Check the __hyp_stub_vectors didn't overflow
 .org . - (__hyp_stub_vectors_end - __hyp_stub_vectors) + SZ_2K
 
-
-SYM_CODE_START_LOCAL(el1_sync)
-	cmp	x0, #HVC_SET_VECTORS
-	b.ne	1f
-	msr	vbar_el2, x1
-	b	9f
-
-1:	cmp	x0, #HVC_VHE_RESTART
-	b.eq	mutate_to_vhe
-
-2:	cmp	x0, #HVC_SOFT_RESTART
-	b.ne	3f
-	mov	x0, x2
-	mov	x2, x4
-	mov	x4, x1
-	mov	x1, x3
-	br	x4				// no return
-
-3:	cmp	x0, #HVC_RESET_VECTORS
-	beq	9f				// Nothing to reset!
-
-	/* Someone called kvm_call_hyp() against the hyp-stub... */
-	mov_q	x0, HVC_STUB_ERR
-	eret
-
-9:	mov	x0, xzr
-	eret
-SYM_CODE_END(el1_sync)
-
 // nVHE? No way! Give me the real thing!
 SYM_CODE_START_LOCAL(mutate_to_vhe)
 	// Sanity check: MMU *must* be off
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 04/18] arm64: kernel: add helper for booted at EL2 and not VHE
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (2 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync " Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 05/18] arm64: trans_pgd: hibernate: Add trans_pgd_copy_el2_vectors Pavel Tatashin
                   ` (11 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Replace places that contain logic like this:
	is_hyp_mode_available() && !is_kernel_in_hyp_mode()

With a dedicated boolean function  is_hyp_callable(). This will be needed
later in kexec in order to sooner switch back to EL2.

Suggested-by: James Morse <james.morse@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/virt.h | 5 +++++
 arch/arm64/kernel/cpu-reset.h | 3 +--
 arch/arm64/kernel/hibernate.c | 9 +++------
 arch/arm64/kernel/sdei.c      | 2 +-
 4 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7379f35ae2c6..4216c8623538 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -128,6 +128,11 @@ static __always_inline bool is_protected_kvm_enabled(void)
 		return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE);
 }
 
+static inline bool is_hyp_callable(void)
+{
+	return is_hyp_mode_available() && !is_kernel_in_hyp_mode();
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index ed50e9587ad8..1922e7a690f8 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -20,8 +20,7 @@ static inline void __noreturn cpu_soft_restart(unsigned long entry,
 {
 	typeof(__cpu_soft_restart) *restart;
 
-	unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
-		is_hyp_mode_available();
+	unsigned long el2_switch = is_hyp_callable();
 	restart = (void *)__pa_symbol(__cpu_soft_restart);
 
 	cpu_install_idmap();
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index b1cef371df2b..c764574a1acb 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -48,9 +48,6 @@
  */
 extern int in_suspend;
 
-/* Do we need to reset el2? */
-#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
-
 /* temporary el2 vectors in the __hibernate_exit_text section. */
 extern char hibernate_el2_vectors[];
 
@@ -125,7 +122,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
 	hdr->reenter_kernel	= _cpu_resume;
 
 	/* We can't use __hyp_get_vectors() because kvm may still be loaded */
-	if (el2_reset_needed())
+	if (is_hyp_callable())
 		hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
 	else
 		hdr->__hyp_stub_vectors = 0;
@@ -387,7 +384,7 @@ int swsusp_arch_suspend(void)
 		dcache_clean_range(__idmap_text_start, __idmap_text_end);
 
 		/* Clean kvm setup code to PoC? */
-		if (el2_reset_needed()) {
+		if (is_hyp_callable()) {
 			dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
 			dcache_clean_range(__hyp_text_start, __hyp_text_end);
 		}
@@ -482,7 +479,7 @@ int swsusp_arch_resume(void)
 	 *
 	 * We can skip this step if we booted at EL1, or are running with VHE.
 	 */
-	if (el2_reset_needed()) {
+	if (is_hyp_callable()) {
 		phys_addr_t el2_vectors = (phys_addr_t)hibernate_exit;
 		el2_vectors += hibernate_el2_vectors -
 			       __hibernate_exit_text_start;     /* offset */
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 2c7ca449dd51..af0ac2f920cf 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -200,7 +200,7 @@ unsigned long sdei_arch_get_entry_point(int conduit)
 	 * dropped to EL1 because we don't support VHE, then we can't support
 	 * SDEI.
 	 */
-	if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
+	if (is_hyp_callable()) {
 		pr_err("Not supported on this hardware/boot configuration\n");
 		goto out_err;
 	}
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 05/18] arm64: trans_pgd: hibernate: Add trans_pgd_copy_el2_vectors
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (3 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 04/18] arm64: kernel: add helper for booted at EL2 and not VHE Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 06/18] arm64: hibernate: abstract ttrb0 setup function Pavel Tatashin
                   ` (10 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Users of trans_pgd may also need a copy of vector table because it is
also may be overwritten if a linear map can be overwritten.

Move setup of EL2 vectors from hibernate to trans_pgd, so it can be
later shared with kexec as well.

Suggested-by: James Morse <james.morse@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/trans_pgd.h |  3 +++
 arch/arm64/include/asm/virt.h      |  3 +++
 arch/arm64/kernel/hibernate.c      | 28 ++++++++++------------------
 arch/arm64/mm/trans_pgd.c          | 20 ++++++++++++++++++++
 4 files changed, 36 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/include/asm/trans_pgd.h b/arch/arm64/include/asm/trans_pgd.h
index 5d08e5adf3d5..e0760e52d36d 100644
--- a/arch/arm64/include/asm/trans_pgd.h
+++ b/arch/arm64/include/asm/trans_pgd.h
@@ -36,4 +36,7 @@ int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
 int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
 			 unsigned long *t0sz, void *page);
 
+int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info,
+			       phys_addr_t *el2_vectors);
+
 #endif /* _ASM_TRANS_TABLE_H */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 4216c8623538..bfbb66018114 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -67,6 +67,9 @@
  */
 extern u32 __boot_cpu_mode[2];
 
+extern char __hyp_stub_vectors[];
+#define ARM64_VECTOR_TABLE_LEN	SZ_2K
+
 void __hyp_set_vectors(phys_addr_t phys_vector_base);
 void __hyp_reset_vectors(void);
 
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index c764574a1acb..0b8bad8bb6eb 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -48,12 +48,6 @@
  */
 extern int in_suspend;
 
-/* temporary el2 vectors in the __hibernate_exit_text section. */
-extern char hibernate_el2_vectors[];
-
-/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
-extern char __hyp_stub_vectors[];
-
 /*
  * The logical cpu number we should resume on, initialised to a non-cpu
  * number.
@@ -428,6 +422,7 @@ int swsusp_arch_resume(void)
 	void *zero_page;
 	size_t exit_size;
 	pgd_t *tmp_pg_dir;
+	phys_addr_t el2_vectors;
 	void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
 					  void *, phys_addr_t, phys_addr_t);
 	struct trans_pgd_info trans_info = {
@@ -455,6 +450,14 @@ int swsusp_arch_resume(void)
 		return -ENOMEM;
 	}
 
+	if (is_hyp_callable()) {
+		rc = trans_pgd_copy_el2_vectors(&trans_info, &el2_vectors);
+		if (rc) {
+			pr_err("Failed to setup el2 vectors\n");
+			return rc;
+		}
+	}
+
 	exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
 	/*
 	 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
@@ -467,25 +470,14 @@ int swsusp_arch_resume(void)
 		return rc;
 	}
 
-	/*
-	 * The hibernate exit text contains a set of el2 vectors, that will
-	 * be executed at el2 with the mmu off in order to reload hyp-stub.
-	 */
-	__flush_dcache_area(hibernate_exit, exit_size);
-
 	/*
 	 * KASLR will cause the el2 vectors to be in a different location in
 	 * the resumed kernel. Load hibernate's temporary copy into el2.
 	 *
 	 * We can skip this step if we booted at EL1, or are running with VHE.
 	 */
-	if (is_hyp_callable()) {
-		phys_addr_t el2_vectors = (phys_addr_t)hibernate_exit;
-		el2_vectors += hibernate_el2_vectors -
-			       __hibernate_exit_text_start;     /* offset */
-
+	if (is_hyp_callable())
 		__hyp_set_vectors(el2_vectors);
-	}
 
 	hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
 		       resume_hdr.reenter_kernel, restore_pblist,
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index 527f0a39c3da..61549451ed3a 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -322,3 +322,23 @@ int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
 
 	return 0;
 }
+
+/*
+ * Create a copy of the vector table so we can call HVC_SET_VECTORS or
+ * HVC_SOFT_RESTART from contexts where the table may be overwritten.
+ */
+int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info,
+			       phys_addr_t *el2_vectors)
+{
+	void *hyp_stub = trans_alloc(info);
+
+	if (!hyp_stub)
+		return -ENOMEM;
+	*el2_vectors = virt_to_phys(hyp_stub);
+	memcpy(hyp_stub, &__hyp_stub_vectors, ARM64_VECTOR_TABLE_LEN);
+	__flush_icache_range((unsigned long)hyp_stub,
+			     (unsigned long)hyp_stub + ARM64_VECTOR_TABLE_LEN);
+	__flush_dcache_area(hyp_stub, ARM64_VECTOR_TABLE_LEN);
+
+	return 0;
+}
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 06/18] arm64: hibernate: abstract ttrb0 setup function
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (4 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 05/18] arm64: trans_pgd: hibernate: Add trans_pgd_copy_el2_vectors Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 07/18] arm64: kexec: flush image and lists during kexec load time Pavel Tatashin
                   ` (9 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Currently, only hibernate sets custom ttbr0 with safe idmaped function.
Kexec, is also going to be using this functinality when relocation code
is going to be idmapped.

Move the setup seqeuence to a dedicated cpu_install_ttbr0() for custom
ttbr0.

Suggested-by: James Morse <james.morse@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/mmu_context.h | 24 ++++++++++++++++++++++++
 arch/arm64/kernel/hibernate.c        | 21 +--------------------
 2 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index bd02e99b1a4c..f64d0d5e1b1f 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -115,6 +115,30 @@ static inline void cpu_install_idmap(void)
 	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
 }
 
+/*
+ * Load our new page tables. A strict BBM approach requires that we ensure that
+ * TLBs are free of any entries that may overlap with the global mappings we are
+ * about to install.
+ *
+ * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
+ * services), while for a userspace-driven test_resume cycle it points to
+ * userspace page tables (and we must point it at a zero page ourselves).
+ *
+ * We change T0SZ as part of installing the idmap. This is undone by
+ * cpu_uninstall_idmap() in __cpu_suspend_exit().
+ */
+static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
+{
+	cpu_set_reserved_ttbr0();
+	local_flush_tlb_all();
+	__cpu_set_tcr_t0sz(t0sz);
+
+	/* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
+	write_sysreg(ttbr0, ttbr0_el1);
+	isb();
+}
+
 /*
  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
  * avoiding the possibility of conflicting TLB entries being allocated.
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 0b8bad8bb6eb..ded5115bcb63 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -206,26 +206,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
 	if (rc)
 		return rc;
 
-	/*
-	 * Load our new page tables. A strict BBM approach requires that we
-	 * ensure that TLBs are free of any entries that may overlap with the
-	 * global mappings we are about to install.
-	 *
-	 * For a real hibernate/resume cycle TTBR0 currently points to a zero
-	 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
-	 * runtime services), while for a userspace-driven test_resume cycle it
-	 * points to userspace page tables (and we must point it at a zero page
-	 * ourselves).
-	 *
-	 * We change T0SZ as part of installing the idmap. This is undone by
-	 * cpu_uninstall_idmap() in __cpu_suspend_exit().
-	 */
-	cpu_set_reserved_ttbr0();
-	local_flush_tlb_all();
-	__cpu_set_tcr_t0sz(t0sz);
-	write_sysreg(trans_ttbr0, ttbr0_el1);
-	isb();
-
+	cpu_install_ttbr0(trans_ttbr0, t0sz);
 	*phys_dst_addr = virt_to_phys(page);
 
 	return 0;
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 07/18] arm64: kexec: flush image and lists during kexec load time
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (5 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 06/18] arm64: hibernate: abstract ttrb0 setup function Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 08/18] arm64: kexec: skip relocation code for inplace kexec Pavel Tatashin
                   ` (8 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Currently, during kexec load we are copying relocation function and
flushing it. However, we can also flush kexec relocation buffers and
if new kernel image is already in place (i.e. crash kernel), we can
also flush the new kernel image itself.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/kernel/machine_kexec.c | 49 +++++++++++++++----------------
 1 file changed, 23 insertions(+), 26 deletions(-)

diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 90a335c74442..3a034bc25709 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -59,23 +59,6 @@ void machine_kexec_cleanup(struct kimage *kimage)
 	/* Empty routine needed to avoid build errors. */
 }
 
-int machine_kexec_post_load(struct kimage *kimage)
-{
-	void *reloc_code = page_to_virt(kimage->control_code_page);
-
-	memcpy(reloc_code, arm64_relocate_new_kernel,
-	       arm64_relocate_new_kernel_size);
-	kimage->arch.kern_reloc = __pa(reloc_code);
-	kexec_image_info(kimage);
-
-	/* Flush the reloc_code in preparation for its execution. */
-	__flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
-	flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
-			   arm64_relocate_new_kernel_size);
-
-	return 0;
-}
-
 /**
  * machine_kexec_prepare - Prepare for a kexec reboot.
  *
@@ -152,6 +135,29 @@ static void kexec_segment_flush(const struct kimage *kimage)
 	}
 }
 
+int machine_kexec_post_load(struct kimage *kimage)
+{
+	void *reloc_code = page_to_virt(kimage->control_code_page);
+
+	/* If in place flush new kernel image, else flush lists and buffers */
+	if (kimage->head & IND_DONE)
+		kexec_segment_flush(kimage);
+	else
+		kexec_list_flush(kimage);
+
+	memcpy(reloc_code, arm64_relocate_new_kernel,
+	       arm64_relocate_new_kernel_size);
+	kimage->arch.kern_reloc = __pa(reloc_code);
+	kexec_image_info(kimage);
+
+	/* Flush the reloc_code in preparation for its execution. */
+	__flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
+	flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
+			   arm64_relocate_new_kernel_size);
+
+	return 0;
+}
+
 /**
  * machine_kexec - Do the kexec reboot.
  *
@@ -169,13 +175,6 @@ void machine_kexec(struct kimage *kimage)
 	WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
 		"Some CPUs may be stale, kdump will be unreliable.\n");
 
-	/* Flush the kimage list and its buffers. */
-	kexec_list_flush(kimage);
-
-	/* Flush the new image if already in place. */
-	if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE))
-		kexec_segment_flush(kimage);
-
 	pr_info("Bye!\n");
 
 	local_daif_mask();
@@ -250,8 +249,6 @@ void arch_kexec_protect_crashkres(void)
 {
 	int i;
 
-	kexec_segment_flush(kexec_crash_image);
-
 	for (i = 0; i < kexec_crash_image->nr_segments; i++)
 		set_memory_valid(
 			__phys_to_virt(kexec_crash_image->segment[i].mem),
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 08/18] arm64: kexec: skip relocation code for inplace kexec
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (6 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 07/18] arm64: kexec: flush image and lists during kexec load time Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 09/18] arm64: kexec: Use dcache ops macros instead of open-coding Pavel Tatashin
                   ` (7 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

In case of kdump or when segments are already in place the relocation
is not needed, therefore the setup of relocation function and call to
it can be skipped.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Suggested-by: James Morse <james.morse@arm.com>
---
 arch/arm64/kernel/machine_kexec.c   | 34 ++++++++++++++++++-----------
 arch/arm64/kernel/relocate_kernel.S |  3 ---
 2 files changed, 21 insertions(+), 16 deletions(-)

diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 3a034bc25709..b150b65f0b84 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -139,21 +139,23 @@ int machine_kexec_post_load(struct kimage *kimage)
 {
 	void *reloc_code = page_to_virt(kimage->control_code_page);
 
-	/* If in place flush new kernel image, else flush lists and buffers */
-	if (kimage->head & IND_DONE)
+	/* If in place, relocation is not used, only flush next kernel */
+	if (kimage->head & IND_DONE) {
 		kexec_segment_flush(kimage);
-	else
-		kexec_list_flush(kimage);
+		kexec_image_info(kimage);
+		return 0;
+	}
 
 	memcpy(reloc_code, arm64_relocate_new_kernel,
 	       arm64_relocate_new_kernel_size);
 	kimage->arch.kern_reloc = __pa(reloc_code);
-	kexec_image_info(kimage);
 
 	/* Flush the reloc_code in preparation for its execution. */
 	__flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
 	flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
 			   arm64_relocate_new_kernel_size);
+	kexec_list_flush(kimage);
+	kexec_image_info(kimage);
 
 	return 0;
 }
@@ -180,19 +182,25 @@ void machine_kexec(struct kimage *kimage)
 	local_daif_mask();
 
 	/*
-	 * cpu_soft_restart will shutdown the MMU, disable data caches, then
-	 * transfer control to the kern_reloc which contains a copy of
-	 * the arm64_relocate_new_kernel routine.  arm64_relocate_new_kernel
-	 * uses physical addressing to relocate the new image to its final
-	 * position and transfers control to the image entry point when the
-	 * relocation is complete.
+	 * Both restart and cpu_soft_restart will shutdown the MMU, disable data
+	 * caches. However, restart will start new kernel or purgatory directly,
+	 * cpu_soft_restart will transfer control to arm64_relocate_new_kernel
 	 * In kexec case, kimage->start points to purgatory assuming that
 	 * kernel entry and dtb address are embedded in purgatory by
 	 * userspace (kexec-tools).
 	 * In kexec_file case, the kernel starts directly without purgatory.
 	 */
-	cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, kimage->start,
-			 kimage->arch.dtb_mem);
+	if (kimage->head & IND_DONE) {
+		typeof(__cpu_soft_restart) *restart;
+
+		cpu_install_idmap();
+		restart = (void *)__pa_symbol(__cpu_soft_restart);
+		restart(is_hyp_callable(), kimage->start, kimage->arch.dtb_mem,
+			0, 0);
+	} else {
+		cpu_soft_restart(kimage->arch.kern_reloc, kimage->head,
+				 kimage->start, kimage->arch.dtb_mem);
+	}
 
 	BUG(); /* Should never get here. */
 }
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index b78ea5de97a4..8058fabe0a76 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -32,8 +32,6 @@ SYM_CODE_START(arm64_relocate_new_kernel)
 	mov	x16, x0				/* x16 = kimage_head */
 	mov	x14, xzr			/* x14 = entry ptr */
 	mov	x13, xzr			/* x13 = copy dest */
-	/* Check if the new image needs relocation. */
-	tbnz	x16, IND_DONE_BIT, .Ldone
 	raw_dcache_line_size x15, x1		/* x15 = dcache line size */
 .Lloop:
 	and	x12, x16, PAGE_MASK		/* x12 = addr */
@@ -65,7 +63,6 @@ SYM_CODE_START(arm64_relocate_new_kernel)
 .Lnext:
 	ldr	x16, [x14], #8			/* entry = *ptr++ */
 	tbz	x16, IND_DONE_BIT, .Lloop	/* while (!(entry & DONE)) */
-.Ldone:
 	/* wait for writes from copy_page to finish */
 	dsb	nsh
 	ic	iallu
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 09/18] arm64: kexec: Use dcache ops macros instead of open-coding
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (7 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 08/18] arm64: kexec: skip relocation code for inplace kexec Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 10/18] arm64: kexec: pass kimage as the only argument to relocation function Pavel Tatashin
                   ` (6 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

From: James Morse <james.morse@arm.com>

kexec does dcache maintenance when it re-writes all memory. Our
dcache_by_line_op macro depends on reading the sanitised DminLine
from memory. Kexec may have overwritten this, so open-codes the
sequence.

dcache_by_line_op is a whole set of macros, it uses dcache_line_size
which uses read_ctr for the sanitsed DminLine. Reading the DminLine
is the first thing the dcache_by_line_op does.

Rename dcache_by_line_op dcache_by_myline_op and take DminLine as
an argument. Kexec can now use the slightly smaller macro.

This makes up-coming changes to the dcache maintenance easier on
the eye.

Code generated by the existing callers is unchanged.

Signed-off-by: James Morse <james.morse@arm.com>

[Fixed merging issues]

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/assembler.h  | 12 ++++++++----
 arch/arm64/kernel/relocate_kernel.S | 13 +++----------
 2 files changed, 11 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ca31594d3d6c..29061b76aab6 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -371,10 +371,9 @@ alternative_else
 alternative_endif
 	.endm
 
-	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
-	dcache_line_size \tmp1, \tmp2
+	.macro dcache_by_myline_op op, domain, kaddr, size, linesz, tmp2
 	add	\size, \kaddr, \size
-	sub	\tmp2, \tmp1, #1
+	sub	\tmp2, \linesz, #1
 	bic	\kaddr, \kaddr, \tmp2
 9998:
 	.ifc	\op, cvau
@@ -394,12 +393,17 @@ alternative_endif
 	.endif
 	.endif
 	.endif
-	add	\kaddr, \kaddr, \tmp1
+	add	\kaddr, \kaddr, \linesz
 	cmp	\kaddr, \size
 	b.lo	9998b
 	dsb	\domain
 	.endm
 
+	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+	dcache_line_size \tmp1, \tmp2
+	dcache_by_myline_op \op, \domain, \kaddr, \size, \tmp1, \tmp2
+	.endm
+
 /*
  * Macro to perform an instruction cache maintenance for the interval
  * [start, end)
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index 8058fabe0a76..718037bef560 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -41,16 +41,9 @@ SYM_CODE_START(arm64_relocate_new_kernel)
 	tbz	x16, IND_SOURCE_BIT, .Ltest_indirection
 
 	/* Invalidate dest page to PoC. */
-	mov     x2, x13
-	add     x20, x2, #PAGE_SIZE
-	sub     x1, x15, #1
-	bic     x2, x2, x1
-2:	dc      ivac, x2
-	add     x2, x2, x15
-	cmp     x2, x20
-	b.lo    2b
-	dsb     sy
-
+	mov	x2, x13
+	mov	x1, #PAGE_SIZE
+	dcache_by_myline_op ivac, sy, x2, x1, x15, x20
 	copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
 	b	.Lnext
 .Ltest_indirection:
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 10/18] arm64: kexec: pass kimage as the only argument to relocation function
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (8 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 09/18] arm64: kexec: Use dcache ops macros instead of open-coding Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 12/18] arm64: kexec: relocate in EL1 mode Pavel Tatashin
                   ` (5 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Currently, kexec relocation function (arm64_relocate_new_kernel) accepts
the following arguments:

head:		start of array that contains relocation information.
entry:		entry point for new kernel or purgatory.
dtb_mem:	first and only argument to entry.

The number of arguments cannot be easily expended, because this
function is also called from HVC_SOFT_RESTART, which preserves only
three arguments. And, also arm64_relocate_new_kernel is written in
assembly but called without stack, thus no place to move extra arguments
to free registers.

Soon, we will need to pass more arguments: once we enable MMU we
will need to pass information about page tables.

Pass kimage to arm64_relocate_new_kernel, and teach it to get the
required fields from kimage.

Suggested-by: James Morse <james.morse@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/kernel/asm-offsets.c     |  7 +++++++
 arch/arm64/kernel/machine_kexec.c   |  6 ++++--
 arch/arm64/kernel/relocate_kernel.S | 10 ++++------
 3 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a36e2fc330d4..0c92e193f866 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -9,6 +9,7 @@
 
 #include <linux/arm_sdei.h>
 #include <linux/sched.h>
+#include <linux/kexec.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/kvm_host.h>
@@ -153,6 +154,12 @@ int main(void)
   DEFINE(PTRAUTH_USER_KEY_APGA,		offsetof(struct ptrauth_keys_user, apga));
   DEFINE(PTRAUTH_KERNEL_KEY_APIA,	offsetof(struct ptrauth_keys_kernel, apia));
   BLANK();
+#endif
+#ifdef CONFIG_KEXEC_CORE
+  DEFINE(KIMAGE_ARCH_DTB_MEM,		offsetof(struct kimage, arch.dtb_mem));
+  DEFINE(KIMAGE_HEAD,			offsetof(struct kimage, head));
+  DEFINE(KIMAGE_START,			offsetof(struct kimage, start));
+  BLANK();
 #endif
   return 0;
 }
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index b150b65f0b84..2e734e4ae12e 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -83,6 +83,8 @@ static void kexec_list_flush(struct kimage *kimage)
 {
 	kimage_entry_t *entry;
 
+	__flush_dcache_area(kimage, sizeof(*kimage));
+
 	for (entry = &kimage->head; ; entry++) {
 		unsigned int flag;
 		void *addr;
@@ -198,8 +200,8 @@ void machine_kexec(struct kimage *kimage)
 		restart(is_hyp_callable(), kimage->start, kimage->arch.dtb_mem,
 			0, 0);
 	} else {
-		cpu_soft_restart(kimage->arch.kern_reloc, kimage->head,
-				 kimage->start, kimage->arch.dtb_mem);
+		cpu_soft_restart(kimage->arch.kern_reloc, virt_to_phys(kimage),
+				 0, 0);
 	}
 
 	BUG(); /* Should never get here. */
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index 718037bef560..36b4496524c3 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -27,9 +27,7 @@
  */
 SYM_CODE_START(arm64_relocate_new_kernel)
 	/* Setup the list loop variables. */
-	mov	x18, x2				/* x18 = dtb address */
-	mov	x17, x1				/* x17 = kimage_start */
-	mov	x16, x0				/* x16 = kimage_head */
+	ldr	x16, [x0, #KIMAGE_HEAD]		/* x16 = kimage_head */
 	mov	x14, xzr			/* x14 = entry ptr */
 	mov	x13, xzr			/* x13 = copy dest */
 	raw_dcache_line_size x15, x1		/* x15 = dcache line size */
@@ -63,12 +61,12 @@ SYM_CODE_START(arm64_relocate_new_kernel)
 	isb
 
 	/* Start new image. */
-	mov	x0, x18
+	ldr	x4, [x0, #KIMAGE_START]		/* relocation start */
+	ldr	x0, [x0, #KIMAGE_ARCH_DTB_MEM]	/* dtb address */
 	mov	x1, xzr
 	mov	x2, xzr
 	mov	x3, xzr
-	br	x17
-
+	br	x4
 SYM_CODE_END(arm64_relocate_new_kernel)
 
 .align 3	/* To keep the 64-bit values below naturally aligned. */
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 12/18] arm64: kexec: relocate in EL1 mode
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (9 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 10/18] arm64: kexec: pass kimage as the only argument to relocation function Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 13/18] arm64: kexec: use ld script for relocation function Pavel Tatashin
                   ` (4 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Since we are going to keep MMU enabled during relocation, we need to
keep EL1 mode throughout the relocation.

Keep EL1 enabled, and switch EL2 only before enterying the new world.

Suggested-by: James Morse <james.morse@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/kernel/cpu-reset.h       |  3 +--
 arch/arm64/kernel/machine_kexec.c   |  4 ++--
 arch/arm64/kernel/relocate_kernel.S | 13 +++++++++++--
 3 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index 1922e7a690f8..f6d95512fec6 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -20,11 +20,10 @@ static inline void __noreturn cpu_soft_restart(unsigned long entry,
 {
 	typeof(__cpu_soft_restart) *restart;
 
-	unsigned long el2_switch = is_hyp_callable();
 	restart = (void *)__pa_symbol(__cpu_soft_restart);
 
 	cpu_install_idmap();
-	restart(el2_switch, entry, arg0, arg1, arg2);
+	restart(0, entry, arg0, arg1, arg2);
 	unreachable();
 }
 
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index fb03b6676fb9..d5940b7889f8 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -231,8 +231,8 @@ void machine_kexec(struct kimage *kimage)
 	} else {
 		if (is_hyp_callable())
 			__hyp_set_vectors(kimage->arch.el2_vectors);
-		cpu_soft_restart(kimage->arch.kern_reloc, virt_to_phys(kimage),
-				 0, 0);
+		cpu_soft_restart(kimage->arch.kern_reloc,
+				 virt_to_phys(kimage), 0, 0);
 	}
 
 	BUG(); /* Should never get here. */
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index 36b4496524c3..df023b82544b 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -13,6 +13,7 @@
 #include <asm/kexec.h>
 #include <asm/page.h>
 #include <asm/sysreg.h>
+#include <asm/virt.h>
 
 /*
  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
@@ -61,12 +62,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
 	isb
 
 	/* Start new image. */
+	ldr	x1, [x0, #KIMAGE_ARCH_EL2_VECTORS]	/* relocation start */
+	cbz	x1, .Lel1
+	ldr	x1, [x0, #KIMAGE_START]		/* relocation start */
+	ldr	x2, [x0, #KIMAGE_ARCH_DTB_MEM]	/* dtb address */
+	mov	x3, xzr
+	mov	x4, xzr
+	mov     x0, #HVC_SOFT_RESTART
+	hvc	#0				/* Jumps from el2 */
+.Lel1:
 	ldr	x4, [x0, #KIMAGE_START]		/* relocation start */
 	ldr	x0, [x0, #KIMAGE_ARCH_DTB_MEM]	/* dtb address */
-	mov	x1, xzr
 	mov	x2, xzr
 	mov	x3, xzr
-	br	x4
+	br	x4				/* Jumps from el1 */
 SYM_CODE_END(arm64_relocate_new_kernel)
 
 .align 3	/* To keep the 64-bit values below naturally aligned. */
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 13/18] arm64: kexec: use ld script for relocation function
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (10 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 12/18] arm64: kexec: relocate in EL1 mode Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 15/18] arm64: kexec: keep MMU enabled during kexec relocation Pavel Tatashin
                   ` (3 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Currently, relocation code declares start and end variables
which are used to compute its size.

The better way to do this is to use ld script incited, and put relocation
function in its own section.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/sections.h   |  1 +
 arch/arm64/kernel/machine_kexec.c   | 14 ++++++--------
 arch/arm64/kernel/relocate_kernel.S | 15 ++-------------
 arch/arm64/kernel/vmlinux.lds.S     | 19 +++++++++++++++++++
 4 files changed, 28 insertions(+), 21 deletions(-)

diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 2f36b16a5b5d..31e459af89f6 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -20,5 +20,6 @@ extern char __exittext_begin[], __exittext_end[];
 extern char __irqentry_text_start[], __irqentry_text_end[];
 extern char __mmuoff_data_start[], __mmuoff_data_end[];
 extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
 
 #endif /* __ASM_SECTIONS_H */
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index d5940b7889f8..f1451d807708 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -20,14 +20,11 @@
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
 #include <asm/page.h>
+#include <asm/sections.h>
 #include <asm/trans_pgd.h>
 
 #include "cpu-reset.h"
 
-/* Global variables for the arm64_relocate_new_kernel routine. */
-extern const unsigned char arm64_relocate_new_kernel[];
-extern const unsigned long arm64_relocate_new_kernel_size;
-
 /**
  * kexec_image_info - For debugging output.
  */
@@ -157,6 +154,7 @@ static void *kexec_page_alloc(void *arg)
 int machine_kexec_post_load(struct kimage *kimage)
 {
 	void *reloc_code = page_to_virt(kimage->control_code_page);
+	long reloc_size;
 	struct trans_pgd_info info = {
 		.trans_alloc_page	= kexec_page_alloc,
 		.trans_alloc_arg	= kimage,
@@ -177,14 +175,14 @@ int machine_kexec_post_load(struct kimage *kimage)
 			return rc;
 	}
 
-	memcpy(reloc_code, arm64_relocate_new_kernel,
-	       arm64_relocate_new_kernel_size);
+	reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
+	memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
 	kimage->arch.kern_reloc = __pa(reloc_code);
 
 	/* Flush the reloc_code in preparation for its execution. */
-	__flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
+	__flush_dcache_area(reloc_code, reloc_size);
 	flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
-			   arm64_relocate_new_kernel_size);
+			   reloc_size);
 	kexec_list_flush(kimage);
 	kexec_image_info(kimage);
 
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index df023b82544b..7a600ba33ae1 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -15,6 +15,7 @@
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
+.pushsection    ".kexec_relocate.text", "ax"
 /*
  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
  *
@@ -77,16 +78,4 @@ SYM_CODE_START(arm64_relocate_new_kernel)
 	mov	x3, xzr
 	br	x4				/* Jumps from el1 */
 SYM_CODE_END(arm64_relocate_new_kernel)
-
-.align 3	/* To keep the 64-bit values below naturally aligned. */
-
-.Lcopy_end:
-.org	KEXEC_CONTROL_PAGE_SIZE
-
-/*
- * arm64_relocate_new_kernel_size - Number of bytes to copy to the
- * control_code_page.
- */
-.globl arm64_relocate_new_kernel_size
-arm64_relocate_new_kernel_size:
-	.quad	.Lcopy_end - arm64_relocate_new_kernel
+.popsection
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 7eea7888bb02..0d9d5e6af66f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -12,6 +12,7 @@
 #include <asm/cache.h>
 #include <asm/hyp_image.h>
 #include <asm/kernel-pgtable.h>
+#include <asm/kexec.h>
 #include <asm/memory.h>
 #include <asm/page.h>
 
@@ -92,6 +93,16 @@ jiffies = jiffies_64;
 #define HIBERNATE_TEXT
 #endif
 
+#ifdef CONFIG_KEXEC_CORE
+#define KEXEC_TEXT					\
+	. = ALIGN(SZ_4K);				\
+	__relocate_new_kernel_start = .;		\
+	*(.kexec_relocate.text)				\
+	__relocate_new_kernel_end = .;
+#else
+#define KEXEC_TEXT
+#endif
+
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 #define TRAMP_TEXT					\
 	. = ALIGN(PAGE_SIZE);				\
@@ -152,6 +163,7 @@ SECTIONS
 			HYPERVISOR_TEXT
 			IDMAP_TEXT
 			HIBERNATE_TEXT
+			KEXEC_TEXT
 			TRAMP_TEXT
 			*(.fixup)
 			*(.gnu.warning)
@@ -336,3 +348,10 @@ ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
 ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
        "TRAMP_SWAPPER_OFFSET is wrong!")
 #endif
+
+#ifdef CONFIG_KEXEC_CORE
+/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
+ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
+	<= SZ_4K, "kexec relocation code is too big or misaligned")
+ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is brokern")
+#endif
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 15/18] arm64: kexec: keep MMU enabled during kexec relocation
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (11 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 13/18] arm64: kexec: use ld script for relocation function Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 16/18] arm64: kexec: remove the pre-kexec PoC maintenance Pavel Tatashin
                   ` (2 subsequent siblings)
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Now, that we have linear map page tables configured, keep MMU enabled
to allow faster relocation of segments to final destination.


Cavium ThunderX2:
Kernel Image size: 38M Iniramfs size: 46M Total relocation size: 84M
MMU-disabled:
relocation	7.489539915s
MMU-enabled:
relocation	0.03946095s

Broadcom Stingray:
The performance data: for a moderate size kernel + initramfs: 25M the
relocation was taking 0.382s, with enabled MMU it now takes
0.019s only or x20 improvement.

The time is proportional to the size of relocation, therefore if initramfs
is larger, 100M it could take over a second.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/kexec.h      |  3 +++
 arch/arm64/kernel/asm-offsets.c     |  1 +
 arch/arm64/kernel/machine_kexec.c   | 16 ++++++++++----
 arch/arm64/kernel/relocate_kernel.S | 33 +++++++++++++++++++----------
 4 files changed, 38 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 59ac166daf53..5fc87b51f8a9 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -97,8 +97,11 @@ struct kimage_arch {
 	phys_addr_t dtb_mem;
 	phys_addr_t kern_reloc;
 	phys_addr_t el2_vectors;
+	phys_addr_t ttbr0;
 	phys_addr_t ttbr1;
 	phys_addr_t zero_page;
+	unsigned long phys_offset;
+	unsigned long t0sz;
 	/* Core ELF header buffer */
 	void *elf_headers;
 	unsigned long elf_headers_mem;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 609362b5aa76..ec7bb80aedc8 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -159,6 +159,7 @@ int main(void)
   DEFINE(KIMAGE_ARCH_DTB_MEM,		offsetof(struct kimage, arch.dtb_mem));
   DEFINE(KIMAGE_ARCH_EL2_VECTORS,	offsetof(struct kimage, arch.el2_vectors));
   DEFINE(KIMAGE_ARCH_ZERO_PAGE,		offsetof(struct kimage, arch.zero_page));
+  DEFINE(KIMAGE_ARCH_PHYS_OFFSET,	offsetof(struct kimage, arch.phys_offset));
   DEFINE(KIMAGE_ARCH_TTBR1,		offsetof(struct kimage, arch.ttbr1));
   DEFINE(KIMAGE_HEAD,			offsetof(struct kimage, head));
   DEFINE(KIMAGE_START,			offsetof(struct kimage, start));
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index c875ef522e53..d5c8aefc66f3 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -190,6 +190,11 @@ int machine_kexec_post_load(struct kimage *kimage)
 	reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
 	memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
 	kimage->arch.kern_reloc = __pa(reloc_code);
+	rc = trans_pgd_idmap_page(&info, &kimage->arch.ttbr0,
+				  &kimage->arch.t0sz, reloc_code);
+	if (rc)
+		return rc;
+	kimage->arch.phys_offset = virt_to_phys(kimage) - (long)kimage;
 
 	/* Flush the reloc_code in preparation for its execution. */
 	__flush_dcache_area(reloc_code, reloc_size);
@@ -223,9 +228,9 @@ void machine_kexec(struct kimage *kimage)
 	local_daif_mask();
 
 	/*
-	 * Both restart and cpu_soft_restart will shutdown the MMU, disable data
+	 * Both restart and kernel_reloc will shutdown the MMU, disable data
 	 * caches. However, restart will start new kernel or purgatory directly,
-	 * cpu_soft_restart will transfer control to arm64_relocate_new_kernel
+	 * kernel_reloc contains the body of arm64_relocate_new_kernel
 	 * In kexec case, kimage->start points to purgatory assuming that
 	 * kernel entry and dtb address are embedded in purgatory by
 	 * userspace (kexec-tools).
@@ -239,10 +244,13 @@ void machine_kexec(struct kimage *kimage)
 		restart(is_hyp_callable(), kimage->start, kimage->arch.dtb_mem,
 			0, 0);
 	} else {
+		void (*kernel_reloc)(struct kimage *kimage);
+
 		if (is_hyp_callable())
 			__hyp_set_vectors(kimage->arch.el2_vectors);
-		cpu_soft_restart(kimage->arch.kern_reloc,
-				 virt_to_phys(kimage), 0, 0);
+		cpu_install_ttbr0(kimage->arch.ttbr0, kimage->arch.t0sz);
+		kernel_reloc = (void *)kimage->arch.kern_reloc;
+		kernel_reloc(kimage);
 	}
 
 	BUG(); /* Should never get here. */
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index e83b6380907d..433a57b3d76e 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -4,6 +4,8 @@
  *
  * Copyright (C) Linaro.
  * Copyright (C) Huawei Futurewei Technologies.
+ * Copyright (C) 2020, Microsoft Corporation.
+ * Pavel Tatashin <pasha.tatashin@soleen.com>
  */
 
 #include <linux/kexec.h>
@@ -15,6 +17,15 @@
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
+.macro turn_off_mmu tmp1, tmp2
+	mrs	\tmp1, sctlr_el1
+	mov_q	\tmp2, SCTLR_ELx_FLAGS
+	bic	\tmp1, \tmp1, \tmp2
+	pre_disable_mmu_workaround
+	msr	sctlr_el1, \tmp1
+	isb
+.endm
+
 .pushsection    ".kexec_relocate.text", "ax"
 /*
  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
@@ -32,22 +43,21 @@ SYM_CODE_START(arm64_relocate_new_kernel)
 	ldr	x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
 	ldr	x17, [x0, #KIMAGE_ARCH_TTBR1]	/* x17 = linear map copy */
 	ldr	x16, [x0, #KIMAGE_HEAD]		/* x16 = kimage_head */
-	mov	x14, xzr			/* x14 = entry ptr */
-	mov	x13, xzr			/* x13 = copy dest */
+	ldr	x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET]	/* x22 phys_offset */
 	raw_dcache_line_size x15, x1		/* x15 = dcache line size */
 	break_before_make_ttbr_switch	x18, x17, x1, x2 /* set linear map */
 .Lloop:
 	and	x12, x16, PAGE_MASK		/* x12 = addr */
-
+	sub	x12, x12, x22			/* Convert x12 to virt */
 	/* Test the entry flags. */
 .Ltest_source:
 	tbz	x16, IND_SOURCE_BIT, .Ltest_indirection
 
 	/* Invalidate dest page to PoC. */
-	mov	x2, x13
-	mov	x1, #PAGE_SIZE
-	dcache_by_myline_op ivac, sy, x2, x1, x15, x20
+	mov	x19, x13
 	copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
+	mov	x1, #PAGE_SIZE
+	dcache_by_myline_op civac, sy, x19, x1, x15, x20
 	b	.Lnext
 .Ltest_indirection:
 	tbz	x16, IND_INDIRECTION_BIT, .Ltest_destination
@@ -64,19 +74,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
 	ic	iallu
 	dsb	nsh
 	isb
+	ldr	x4, [x0, #KIMAGE_START]			/* relocation start */
+	ldr	x1, [x0, #KIMAGE_ARCH_EL2_VECTORS]	/* relocation start */
+	ldr	x0, [x0, #KIMAGE_ARCH_DTB_MEM]		/* dtb address */
+	turn_off_mmu x12, x13
 
 	/* Start new image. */
-	ldr	x1, [x0, #KIMAGE_ARCH_EL2_VECTORS]	/* relocation start */
 	cbz	x1, .Lel1
-	ldr	x1, [x0, #KIMAGE_START]		/* relocation start */
-	ldr	x2, [x0, #KIMAGE_ARCH_DTB_MEM]	/* dtb address */
+	mov	x1, x4				/* relocation start */
+	mov	x2, x0				/* dtb address */
 	mov	x3, xzr
 	mov	x4, xzr
 	mov     x0, #HVC_SOFT_RESTART
 	hvc	#0				/* Jumps from el2 */
 .Lel1:
-	ldr	x4, [x0, #KIMAGE_START]		/* relocation start */
-	ldr	x0, [x0, #KIMAGE_ARCH_DTB_MEM]	/* dtb address */
 	mov	x2, xzr
 	mov	x3, xzr
 	br	x4				/* Jumps from el1 */
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 16/18] arm64: kexec: remove the pre-kexec PoC maintenance
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (12 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 15/18] arm64: kexec: keep MMU enabled during kexec relocation Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 17/18] arm64: kexec: Remove cpu-reset.h Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 18/18] arm64/mm: remove useless trans_pgd_map_page() Pavel Tatashin
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

Now that kexec does its relocations with the MMU enabled, we no longer
need to clean the relocation data to the PoC.

Co-developed-by: James Morse <james.morse@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/kernel/machine_kexec.c | 40 -------------------------------
 1 file changed, 40 deletions(-)

diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index d5c8aefc66f3..a1c9bee0cddd 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -76,45 +76,6 @@ int machine_kexec_prepare(struct kimage *kimage)
 	return 0;
 }
 
-/**
- * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
- */
-static void kexec_list_flush(struct kimage *kimage)
-{
-	kimage_entry_t *entry;
-
-	__flush_dcache_area(kimage, sizeof(*kimage));
-
-	for (entry = &kimage->head; ; entry++) {
-		unsigned int flag;
-		void *addr;
-
-		/* flush the list entries. */
-		__flush_dcache_area(entry, sizeof(kimage_entry_t));
-
-		flag = *entry & IND_FLAGS;
-		if (flag == IND_DONE)
-			break;
-
-		addr = phys_to_virt(*entry & PAGE_MASK);
-
-		switch (flag) {
-		case IND_INDIRECTION:
-			/* Set entry point just before the new list page. */
-			entry = (kimage_entry_t *)addr - 1;
-			break;
-		case IND_SOURCE:
-			/* flush the source pages. */
-			__flush_dcache_area(addr, PAGE_SIZE);
-			break;
-		case IND_DESTINATION:
-			break;
-		default:
-			BUG();
-		}
-	}
-}
-
 /**
  * kexec_segment_flush - Helper to flush the kimage segments to PoC.
  */
@@ -200,7 +161,6 @@ int machine_kexec_post_load(struct kimage *kimage)
 	__flush_dcache_area(reloc_code, reloc_size);
 	flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
 			   reloc_size);
-	kexec_list_flush(kimage);
 	kexec_image_info(kimage);
 
 	return 0;
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 17/18] arm64: kexec: Remove cpu-reset.h
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (13 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 16/18] arm64: kexec: remove the pre-kexec PoC maintenance Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  2021-04-08  4:05 ` [PATCH v13 18/18] arm64/mm: remove useless trans_pgd_map_page() Pavel Tatashin
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

This header contains only cpu_soft_restart() which is never used directly
anymore. So, remove this header, and rename the helper to be
cpu_soft_restart().

Suggested-by: James Morse <james.morse@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/kexec.h    |  6 ++++++
 arch/arm64/kernel/cpu-reset.S     |  7 +++----
 arch/arm64/kernel/cpu-reset.h     | 30 ------------------------------
 arch/arm64/kernel/machine_kexec.c |  6 ++----
 4 files changed, 11 insertions(+), 38 deletions(-)
 delete mode 100644 arch/arm64/kernel/cpu-reset.h

diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 5fc87b51f8a9..ee71ae3b93ed 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -90,6 +90,12 @@ static inline void crash_prepare_suspend(void) {}
 static inline void crash_post_resume(void) {}
 #endif
 
+#if defined(CONFIG_KEXEC_CORE)
+void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
+		      unsigned long arg0, unsigned long arg1,
+		      unsigned long arg2);
+#endif
+
 #define ARCH_HAS_KIMAGE_ARCH
 
 struct kimage_arch {
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 37721eb6f9a1..5d47d6c92634 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -16,8 +16,7 @@
 .pushsection    .idmap.text, "awx"
 
 /*
- * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
- * cpu_soft_restart.
+ * cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2)
  *
  * @el2_switch: Flag to indicate a switch to EL2 is needed.
  * @entry: Location to jump to for soft reset.
@@ -29,7 +28,7 @@
  * branch to what would be the reset vector. It must be executed with the
  * flat identity mapping.
  */
-SYM_CODE_START(__cpu_soft_restart)
+SYM_CODE_START(cpu_soft_restart)
 	/* Clear sctlr_el1 flags. */
 	mrs	x12, sctlr_el1
 	mov_q	x13, SCTLR_ELx_FLAGS
@@ -51,6 +50,6 @@ SYM_CODE_START(__cpu_soft_restart)
 	mov	x1, x3				// arg1
 	mov	x2, x4				// arg2
 	br	x8
-SYM_CODE_END(__cpu_soft_restart)
+SYM_CODE_END(cpu_soft_restart)
 
 .popsection
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
deleted file mode 100644
index f6d95512fec6..000000000000
--- a/arch/arm64/kernel/cpu-reset.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * CPU reset routines
- *
- * Copyright (C) 2015 Huawei Futurewei Technologies.
- */
-
-#ifndef _ARM64_CPU_RESET_H
-#define _ARM64_CPU_RESET_H
-
-#include <asm/virt.h>
-
-void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
-	unsigned long arg0, unsigned long arg1, unsigned long arg2);
-
-static inline void __noreturn cpu_soft_restart(unsigned long entry,
-					       unsigned long arg0,
-					       unsigned long arg1,
-					       unsigned long arg2)
-{
-	typeof(__cpu_soft_restart) *restart;
-
-	restart = (void *)__pa_symbol(__cpu_soft_restart);
-
-	cpu_install_idmap();
-	restart(0, entry, arg0, arg1, arg2);
-	unreachable();
-}
-
-#endif
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index a1c9bee0cddd..ef7ba93f2bd6 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -23,8 +23,6 @@
 #include <asm/sections.h>
 #include <asm/trans_pgd.h>
 
-#include "cpu-reset.h"
-
 /**
  * kexec_image_info - For debugging output.
  */
@@ -197,10 +195,10 @@ void machine_kexec(struct kimage *kimage)
 	 * In kexec_file case, the kernel starts directly without purgatory.
 	 */
 	if (kimage->head & IND_DONE) {
-		typeof(__cpu_soft_restart) *restart;
+		typeof(cpu_soft_restart) *restart;
 
 		cpu_install_idmap();
-		restart = (void *)__pa_symbol(__cpu_soft_restart);
+		restart = (void *)__pa_symbol(cpu_soft_restart);
 		restart(is_hyp_callable(), kimage->start, kimage->arch.dtb_mem,
 			0, 0);
 	} else {
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH v13 18/18] arm64/mm: remove useless trans_pgd_map_page()
       [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
                   ` (14 preceding siblings ...)
  2021-04-08  4:05 ` [PATCH v13 17/18] arm64: kexec: Remove cpu-reset.h Pavel Tatashin
@ 2021-04-08  4:05 ` Pavel Tatashin
  15 siblings, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08  4:05 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, ebiederm, kexec, linux-kernel,
	corbet, catalin.marinas, will, linux-arm-kernel, maz,
	james.morse, vladimir.murzin, matthias.bgg, linux-mm,
	mark.rutland, steve.capper, rfontana, tglx, selindag, tyhicks,
	kernelfans

From: Pingfan Liu <kernelfans@gmail.com>

The intend of trans_pgd_map_page() was to map contigous range of VA
memory to the memory that is getting relocated during kexec. However,
since we are now using linear map instead of contigous range this
function is not needed

Signed-off-by: Pingfan Liu <kernelfans@gmail.com>
[Changed commit message]
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/trans_pgd.h |  5 +--
 arch/arm64/mm/trans_pgd.c          | 57 ------------------------------
 2 files changed, 1 insertion(+), 61 deletions(-)

diff --git a/arch/arm64/include/asm/trans_pgd.h b/arch/arm64/include/asm/trans_pgd.h
index e0760e52d36d..234353df2f13 100644
--- a/arch/arm64/include/asm/trans_pgd.h
+++ b/arch/arm64/include/asm/trans_pgd.h
@@ -15,7 +15,7 @@
 /*
  * trans_alloc_page
  *	- Allocator that should return exactly one zeroed page, if this
- *	  allocator fails, trans_pgd_create_copy() and trans_pgd_map_page()
+ *	  allocator fails, trans_pgd_create_copy() and trans_pgd_idmap_page()
  *	  return -ENOMEM error.
  *
  * trans_alloc_arg
@@ -30,9 +30,6 @@ struct trans_pgd_info {
 int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **trans_pgd,
 			  unsigned long start, unsigned long end);
 
-int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
-		       void *page, unsigned long dst_addr, pgprot_t pgprot);
-
 int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
 			 unsigned long *t0sz, void *page);
 
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index 61549451ed3a..e24a749013c1 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -217,63 +217,6 @@ int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp,
 	return rc;
 }
 
-/*
- * Add map entry to trans_pgd for a base-size page at PTE level.
- * info:	contains allocator and its argument
- * trans_pgd:	page table in which new map is added.
- * page:	page to be mapped.
- * dst_addr:	new VA address for the page
- * pgprot:	protection for the page.
- *
- * Returns 0 on success, and -ENOMEM on failure.
- */
-int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
-		       void *page, unsigned long dst_addr, pgprot_t pgprot)
-{
-	pgd_t *pgdp;
-	p4d_t *p4dp;
-	pud_t *pudp;
-	pmd_t *pmdp;
-	pte_t *ptep;
-
-	pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
-	if (pgd_none(READ_ONCE(*pgdp))) {
-		p4dp = trans_alloc(info);
-		if (!pgdp)
-			return -ENOMEM;
-		pgd_populate(NULL, pgdp, p4dp);
-	}
-
-	p4dp = p4d_offset(pgdp, dst_addr);
-	if (p4d_none(READ_ONCE(*p4dp))) {
-		pudp = trans_alloc(info);
-		if (!pudp)
-			return -ENOMEM;
-		p4d_populate(NULL, p4dp, pudp);
-	}
-
-	pudp = pud_offset(p4dp, dst_addr);
-	if (pud_none(READ_ONCE(*pudp))) {
-		pmdp = trans_alloc(info);
-		if (!pmdp)
-			return -ENOMEM;
-		pud_populate(NULL, pudp, pmdp);
-	}
-
-	pmdp = pmd_offset(pudp, dst_addr);
-	if (pmd_none(READ_ONCE(*pmdp))) {
-		ptep = trans_alloc(info);
-		if (!ptep)
-			return -ENOMEM;
-		pmd_populate_kernel(NULL, pmdp, ptep);
-	}
-
-	ptep = pte_offset_kernel(pmdp, dst_addr);
-	set_pte(ptep, pfn_pte(virt_to_pfn(page), pgprot));
-
-	return 0;
-}
-
 /*
  * The page we want to idmap may be outside the range covered by VA_BITS that
  * can be built using the kernel's p?d_populate() helpers. As a one off, for a
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync into the vectors
  2021-04-08  4:05 ` [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync " Pavel Tatashin
@ 2021-04-08 10:24   ` Marc Zyngier
  2021-04-08 14:45     ` Pavel Tatashin
  0 siblings, 1 reply; 23+ messages in thread
From: Marc Zyngier @ 2021-04-08 10:24 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: jmorris, sashal, ebiederm, kexec, linux-kernel, corbet,
	catalin.marinas, will, linux-arm-kernel, james.morse,
	vladimir.murzin, matthias.bgg, linux-mm, mark.rutland,
	steve.capper, rfontana, tglx, selindag, tyhicks, kernelfans

On 2021-04-08 05:05, Pavel Tatashin wrote:
> From: James Morse <james.morse@arm.com>
> 
> The hyp-stub's el1_sync code doesn't do very much, this can easily fit
> in the vectors.
> 
> With this, all of the hyp-stubs behaviour is contained in its vectors.
> This lets kexec and hibernate copy the hyp-stub when they need its
> behaviour, instead of re-implementing it.
> 
> Signed-off-by: James Morse <james.morse@arm.com>
> 
> [Fixed merging issues]

That's a pretty odd fix IMO.

> 
> Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
> ---
>  arch/arm64/kernel/hyp-stub.S | 59 ++++++++++++++++++------------------
>  1 file changed, 29 insertions(+), 30 deletions(-)
> 
> diff --git a/arch/arm64/kernel/hyp-stub.S 
> b/arch/arm64/kernel/hyp-stub.S
> index ff329c5c074d..d1a73d0f74e0 100644
> --- a/arch/arm64/kernel/hyp-stub.S
> +++ b/arch/arm64/kernel/hyp-stub.S
> @@ -21,6 +21,34 @@ SYM_CODE_START_LOCAL(\label)
>  	.align 7
>  	b	\label
>  SYM_CODE_END(\label)
> +.endm
> +
> +.macro hyp_stub_el1_sync
> +SYM_CODE_START_LOCAL(hyp_stub_el1_sync)
> +	.align 7
> +	cmp	x0, #HVC_SET_VECTORS
> +	b.ne	2f
> +	msr	vbar_el2, x1
> +	b	9f
> +
> +2:	cmp	x0, #HVC_SOFT_RESTART
> +	b.ne	3f
> +	mov	x0, x2
> +	mov	x2, x4
> +	mov	x4, x1
> +	mov	x1, x3
> +	br	x4				// no return
> +
> +3:	cmp	x0, #HVC_RESET_VECTORS
> +	beq	9f				// Nothing to reset!
> +
> +	/* Someone called kvm_call_hyp() against the hyp-stub... */
> +	mov_q	x0, HVC_STUB_ERR
> +	eret
> +
> +9:	mov	x0, xzr
> +	eret
> +SYM_CODE_END(hyp_stub_el1_sync)

You said you tested this on a TX2. I guess you don't care whether
it runs VHE or not...

         M.

>  .endm
> 
>  	.text
> @@ -39,7 +67,7 @@ SYM_CODE_START(__hyp_stub_vectors)
>  	invalid_vector	hyp_stub_el2h_fiq_invalid	// FIQ EL2h
>  	invalid_vector	hyp_stub_el2h_error_invalid	// Error EL2h
> 
> -	ventry	el1_sync			// Synchronous 64-bit EL1
> +	hyp_stub_el1_sync				// Synchronous 64-bit EL1
>  	invalid_vector	hyp_stub_el1_irq_invalid	// IRQ 64-bit EL1
>  	invalid_vector	hyp_stub_el1_fiq_invalid	// FIQ 64-bit EL1
>  	invalid_vector	hyp_stub_el1_error_invalid	// Error 64-bit EL1
> @@ -55,35 +83,6 @@ SYM_CODE_END(__hyp_stub_vectors)
>  # Check the __hyp_stub_vectors didn't overflow
>  .org . - (__hyp_stub_vectors_end - __hyp_stub_vectors) + SZ_2K
> 
> -
> -SYM_CODE_START_LOCAL(el1_sync)
> -	cmp	x0, #HVC_SET_VECTORS
> -	b.ne	1f
> -	msr	vbar_el2, x1
> -	b	9f
> -
> -1:	cmp	x0, #HVC_VHE_RESTART
> -	b.eq	mutate_to_vhe
> -
> -2:	cmp	x0, #HVC_SOFT_RESTART
> -	b.ne	3f
> -	mov	x0, x2
> -	mov	x2, x4
> -	mov	x4, x1
> -	mov	x1, x3
> -	br	x4				// no return
> -
> -3:	cmp	x0, #HVC_RESET_VECTORS
> -	beq	9f				// Nothing to reset!
> -
> -	/* Someone called kvm_call_hyp() against the hyp-stub... */
> -	mov_q	x0, HVC_STUB_ERR
> -	eret
> -
> -9:	mov	x0, xzr
> -	eret
> -SYM_CODE_END(el1_sync)
> -
>  // nVHE? No way! Give me the real thing!
>  SYM_CODE_START_LOCAL(mutate_to_vhe)
>  	// Sanity check: MMU *must* be off

-- 
Jazz is not dead. It just smells funny...


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync into the vectors
  2021-04-08 10:24   ` Marc Zyngier
@ 2021-04-08 14:45     ` Pavel Tatashin
  2021-04-08 15:01       ` Marc Zyngier
  0 siblings, 1 reply; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08 14:45 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: James Morris, Sasha Levin, Eric W. Biederman, kexec mailing list,
	LKML, Jonathan Corbet, Catalin Marinas, Will Deacon, Linux ARM,
	James Morse, Vladimir Murzin, Matthias Brugger, linux-mm,
	Mark Rutland, steve.capper, rfontana, Thomas Gleixner, Selin Dag,
	Tyler Hicks, Pingfan Liu

On Thu, Apr 8, 2021 at 6:24 AM Marc Zyngier <maz@kernel.org> wrote:
>
> On 2021-04-08 05:05, Pavel Tatashin wrote:
> > From: James Morse <james.morse@arm.com>
> >
> > The hyp-stub's el1_sync code doesn't do very much, this can easily fit
> > in the vectors.
> >
> > With this, all of the hyp-stubs behaviour is contained in its vectors.
> > This lets kexec and hibernate copy the hyp-stub when they need its
> > behaviour, instead of re-implementing it.
> >
> > Signed-off-by: James Morse <james.morse@arm.com>
> >
> > [Fixed merging issues]
>
> That's a pretty odd fix IMO.
>
> >
> > Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
> > ---
> >  arch/arm64/kernel/hyp-stub.S | 59 ++++++++++++++++++------------------
> >  1 file changed, 29 insertions(+), 30 deletions(-)
> >
> > diff --git a/arch/arm64/kernel/hyp-stub.S
> > b/arch/arm64/kernel/hyp-stub.S
> > index ff329c5c074d..d1a73d0f74e0 100644
> > --- a/arch/arm64/kernel/hyp-stub.S
> > +++ b/arch/arm64/kernel/hyp-stub.S
> > @@ -21,6 +21,34 @@ SYM_CODE_START_LOCAL(\label)
> >       .align 7
> >       b       \label
> >  SYM_CODE_END(\label)
> > +.endm
> > +
> > +.macro hyp_stub_el1_sync
> > +SYM_CODE_START_LOCAL(hyp_stub_el1_sync)
> > +     .align 7
> > +     cmp     x0, #HVC_SET_VECTORS
> > +     b.ne    2f
> > +     msr     vbar_el2, x1
> > +     b       9f
> > +
> > +2:   cmp     x0, #HVC_SOFT_RESTART
> > +     b.ne    3f
> > +     mov     x0, x2
> > +     mov     x2, x4
> > +     mov     x4, x1
> > +     mov     x1, x3
> > +     br      x4                              // no return
> > +
> > +3:   cmp     x0, #HVC_RESET_VECTORS
> > +     beq     9f                              // Nothing to reset!
> > +
> > +     /* Someone called kvm_call_hyp() against the hyp-stub... */
> > +     mov_q   x0, HVC_STUB_ERR
> > +     eret
> > +
> > +9:   mov     x0, xzr
> > +     eret
> > +SYM_CODE_END(hyp_stub_el1_sync)
>
> You said you tested this on a TX2. I guess you don't care whether
> it runs VHE or not...

Hi Marc,

Thank you for noticing this. Not sure how this missmerge happened. I
have added the missing case, and VHE is initialized correctly during
boot.
[   14.698175] kvm [1]: VHE mode initialized successfully

During normal boot, kexec reboot, and kdump reboot. I will respin the
series and send the version 14 soon.

Thanks,
Pasha

>
>          M.
>
> >  .endm
> >
> >       .text
> > @@ -39,7 +67,7 @@ SYM_CODE_START(__hyp_stub_vectors)
> >       invalid_vector  hyp_stub_el2h_fiq_invalid       // FIQ EL2h
> >       invalid_vector  hyp_stub_el2h_error_invalid     // Error EL2h
> >
> > -     ventry  el1_sync                        // Synchronous 64-bit EL1
> > +     hyp_stub_el1_sync                               // Synchronous 64-bit EL1
> >       invalid_vector  hyp_stub_el1_irq_invalid        // IRQ 64-bit EL1
> >       invalid_vector  hyp_stub_el1_fiq_invalid        // FIQ 64-bit EL1
> >       invalid_vector  hyp_stub_el1_error_invalid      // Error 64-bit EL1
> > @@ -55,35 +83,6 @@ SYM_CODE_END(__hyp_stub_vectors)
> >  # Check the __hyp_stub_vectors didn't overflow
> >  .org . - (__hyp_stub_vectors_end - __hyp_stub_vectors) + SZ_2K
> >
> > -
> > -SYM_CODE_START_LOCAL(el1_sync)
> > -     cmp     x0, #HVC_SET_VECTORS
> > -     b.ne    1f
> > -     msr     vbar_el2, x1
> > -     b       9f
> > -
> > -1:   cmp     x0, #HVC_VHE_RESTART
> > -     b.eq    mutate_to_vhe
> > -
> > -2:   cmp     x0, #HVC_SOFT_RESTART
> > -     b.ne    3f
> > -     mov     x0, x2
> > -     mov     x2, x4
> > -     mov     x4, x1
> > -     mov     x1, x3
> > -     br      x4                              // no return
> > -
> > -3:   cmp     x0, #HVC_RESET_VECTORS
> > -     beq     9f                              // Nothing to reset!
> > -
> > -     /* Someone called kvm_call_hyp() against the hyp-stub... */
> > -     mov_q   x0, HVC_STUB_ERR
> > -     eret
> > -
> > -9:   mov     x0, xzr
> > -     eret
> > -SYM_CODE_END(el1_sync)
> > -
> >  // nVHE? No way! Give me the real thing!
> >  SYM_CODE_START_LOCAL(mutate_to_vhe)
> >       // Sanity check: MMU *must* be off
>
> --
> Jazz is not dead. It just smells funny...


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync into the vectors
  2021-04-08 14:45     ` Pavel Tatashin
@ 2021-04-08 15:01       ` Marc Zyngier
  2021-04-08 16:28         ` Pavel Tatashin
  0 siblings, 1 reply; 23+ messages in thread
From: Marc Zyngier @ 2021-04-08 15:01 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: James Morris, Sasha Levin, Eric W. Biederman, kexec mailing list,
	LKML, Jonathan Corbet, Catalin Marinas, Will Deacon, Linux ARM,
	James Morse, Vladimir Murzin, Matthias Brugger, linux-mm,
	Mark Rutland, steve.capper, rfontana, Thomas Gleixner, Selin Dag,
	Tyler Hicks, Pingfan Liu

On Thu, 08 Apr 2021 15:45:18 +0100,
Pavel Tatashin <pasha.tatashin@soleen.com> wrote:
> 
> On Thu, Apr 8, 2021 at 6:24 AM Marc Zyngier <maz@kernel.org> wrote:
> >
> > On 2021-04-08 05:05, Pavel Tatashin wrote:
> > > From: James Morse <james.morse@arm.com>
> > >
> > > The hyp-stub's el1_sync code doesn't do very much, this can easily fit
> > > in the vectors.
> > >
> > > With this, all of the hyp-stubs behaviour is contained in its vectors.
> > > This lets kexec and hibernate copy the hyp-stub when they need its
> > > behaviour, instead of re-implementing it.
> > >
> > > Signed-off-by: James Morse <james.morse@arm.com>
> > >
> > > [Fixed merging issues]
> >
> > That's a pretty odd fix IMO.
> >
> > >
> > > Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
> > > ---
> > >  arch/arm64/kernel/hyp-stub.S | 59 ++++++++++++++++++------------------
> > >  1 file changed, 29 insertions(+), 30 deletions(-)
> > >
> > > diff --git a/arch/arm64/kernel/hyp-stub.S
> > > b/arch/arm64/kernel/hyp-stub.S
> > > index ff329c5c074d..d1a73d0f74e0 100644
> > > --- a/arch/arm64/kernel/hyp-stub.S
> > > +++ b/arch/arm64/kernel/hyp-stub.S
> > > @@ -21,6 +21,34 @@ SYM_CODE_START_LOCAL(\label)
> > >       .align 7
> > >       b       \label
> > >  SYM_CODE_END(\label)
> > > +.endm
> > > +
> > > +.macro hyp_stub_el1_sync
> > > +SYM_CODE_START_LOCAL(hyp_stub_el1_sync)
> > > +     .align 7
> > > +     cmp     x0, #HVC_SET_VECTORS
> > > +     b.ne    2f
> > > +     msr     vbar_el2, x1
> > > +     b       9f
> > > +
> > > +2:   cmp     x0, #HVC_SOFT_RESTART
> > > +     b.ne    3f
> > > +     mov     x0, x2
> > > +     mov     x2, x4
> > > +     mov     x4, x1
> > > +     mov     x1, x3
> > > +     br      x4                              // no return
> > > +
> > > +3:   cmp     x0, #HVC_RESET_VECTORS
> > > +     beq     9f                              // Nothing to reset!
> > > +
> > > +     /* Someone called kvm_call_hyp() against the hyp-stub... */
> > > +     mov_q   x0, HVC_STUB_ERR
> > > +     eret
> > > +
> > > +9:   mov     x0, xzr
> > > +     eret
> > > +SYM_CODE_END(hyp_stub_el1_sync)
> >
> > You said you tested this on a TX2. I guess you don't care whether
> > it runs VHE or not...
> 
> Hi Marc,
> 
> Thank you for noticing this. Not sure how this missmerge happened. I
> have added the missing case, and VHE is initialized correctly during
> boot.
> [   14.698175] kvm [1]: VHE mode initialized successfully
> 
> During normal boot, kexec reboot, and kdump reboot. I will respin the
> series and send the version 14 soon.

Please give people a chance to review this lot first. This isn't code
that is easy to digest, and immediate re-spinning does more harm than
good (this isn't targeting 5.13, I would assume).

Thanks,

	M.

-- 
Without deviation from the norm, progress is not possible.


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync into the vectors
  2021-04-08 15:01       ` Marc Zyngier
@ 2021-04-08 16:28         ` Pavel Tatashin
  2021-04-26 18:10           ` Pavel Tatashin
  0 siblings, 1 reply; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-08 16:28 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: James Morris, Sasha Levin, Eric W. Biederman, kexec mailing list,
	LKML, Jonathan Corbet, Catalin Marinas, Will Deacon, Linux ARM,
	James Morse, Vladimir Murzin, Matthias Brugger, linux-mm,
	Mark Rutland, steve.capper, rfontana, Thomas Gleixner, Selin Dag,
	Tyler Hicks, Pingfan Liu

> > Thank you for noticing this. Not sure how this missmerge happened. I
> > have added the missing case, and VHE is initialized correctly during
> > boot.
> > [   14.698175] kvm [1]: VHE mode initialized successfully
> >
> > During normal boot, kexec reboot, and kdump reboot. I will respin the
> > series and send the version 14 soon.
>
> Please give people a chance to review this lot first. This isn't code
> that is easy to digest, and immediate re-spinning does more harm than
> good (this isn't targeting 5.13, I would assume).
>

There are people who are testing this series, this is why I wanted to
respin. But, I will wait for review comments before sending the next
version. In the meantime I will send a fixed version of this patch as
a reply to this thread instead.

Thanks,
Pasha


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync into the vectors
  2021-04-08 16:28         ` Pavel Tatashin
@ 2021-04-26 18:10           ` Pavel Tatashin
  2021-04-26 18:11             ` Pavel Tatashin
  2021-04-26 18:24             ` Marc Zyngier
  0 siblings, 2 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-26 18:10 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: James Morris, Sasha Levin, Eric W. Biederman, kexec mailing list,
	LKML, Jonathan Corbet, Catalin Marinas, Will Deacon, Linux ARM,
	James Morse, Vladimir Murzin, Matthias Brugger, linux-mm,
	Mark Rutland, steve.capper, rfontana, Thomas Gleixner, Selin Dag,
	Tyler Hicks, Pingfan Liu

Hi Marc

Are you planning to send more review comments, or should I send the new version?

Thanks,
Pasha

On Thu, Apr 8, 2021 at 12:28 PM Pavel Tatashin
<pasha.tatashin@soleen.com> wrote:
>
> > > Thank you for noticing this. Not sure how this missmerge happened. I
> > > have added the missing case, and VHE is initialized correctly during
> > > boot.
> > > [   14.698175] kvm [1]: VHE mode initialized successfully
> > >
> > > During normal boot, kexec reboot, and kdump reboot. I will respin the
> > > series and send the version 14 soon.
> >
> > Please give people a chance to review this lot first. This isn't code
> > that is easy to digest, and immediate re-spinning does more harm than
> > good (this isn't targeting 5.13, I would assume).
> >
>
> There are people who are testing this series, this is why I wanted to
> respin. But, I will wait for review comments before sending the next
> version. In the meantime I will send a fixed version of this patch as
> a reply to this thread instead.
>
> Thanks,
> Pasha


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync into the vectors
  2021-04-26 18:10           ` Pavel Tatashin
@ 2021-04-26 18:11             ` Pavel Tatashin
  2021-04-26 18:24             ` Marc Zyngier
  1 sibling, 0 replies; 23+ messages in thread
From: Pavel Tatashin @ 2021-04-26 18:11 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: James Morris, Sasha Levin, Eric W. Biederman, kexec mailing list,
	LKML, Jonathan Corbet, Catalin Marinas, Will Deacon, Linux ARM,
	James Morse, Vladimir Murzin, Matthias Brugger, linux-mm,
	Mark Rutland, steve.capper, rfontana, Thomas Gleixner, Selin Dag,
	Tyler Hicks, Pingfan Liu

> > > Please give people a chance to review this lot first. This isn't code
> > > that is easy to digest, and immediate re-spinning does more harm than
> > > good (this isn't targeting 5.13, I would assume).

Sorry for top posting, the previous e-mail intended as a reply to the above.

Pasha


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync into the vectors
  2021-04-26 18:10           ` Pavel Tatashin
  2021-04-26 18:11             ` Pavel Tatashin
@ 2021-04-26 18:24             ` Marc Zyngier
  1 sibling, 0 replies; 23+ messages in thread
From: Marc Zyngier @ 2021-04-26 18:24 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: James Morris, Sasha Levin, Eric W. Biederman, kexec mailing list,
	LKML, Jonathan Corbet, Catalin Marinas, Will Deacon, Linux ARM,
	James Morse, Vladimir Murzin, Matthias Brugger, linux-mm,
	Mark Rutland, steve.capper, rfontana, Thomas Gleixner, Selin Dag,
	Tyler Hicks, Pingfan Liu

On 2021-04-26 19:10, Pavel Tatashin wrote:
> Hi Marc
> 
> Are you planning to send more review comments, or should I send the new 
> version?

Yeah, I'll hopefully have a bit more bandwidth in a few days.
You shouldn't post this kind of code during the merge window anyway... 
;-)

Thanks,

         M.
-- 
Jazz is not dead. It just smells funny...


^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2021-04-26 18:24 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20210408040537.2703241-1-pasha.tatashin@soleen.com>
2021-04-08  4:05 ` [PATCH v13 01/18] arm64: hyp-stub: Check the size of the HYP stub's vectors Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 02/18] arm64: hyp-stub: Move invalid vector entries into the vectors Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 03/18] arm64: hyp-stub: Move el1_sync " Pavel Tatashin
2021-04-08 10:24   ` Marc Zyngier
2021-04-08 14:45     ` Pavel Tatashin
2021-04-08 15:01       ` Marc Zyngier
2021-04-08 16:28         ` Pavel Tatashin
2021-04-26 18:10           ` Pavel Tatashin
2021-04-26 18:11             ` Pavel Tatashin
2021-04-26 18:24             ` Marc Zyngier
2021-04-08  4:05 ` [PATCH v13 04/18] arm64: kernel: add helper for booted at EL2 and not VHE Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 05/18] arm64: trans_pgd: hibernate: Add trans_pgd_copy_el2_vectors Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 06/18] arm64: hibernate: abstract ttrb0 setup function Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 07/18] arm64: kexec: flush image and lists during kexec load time Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 08/18] arm64: kexec: skip relocation code for inplace kexec Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 09/18] arm64: kexec: Use dcache ops macros instead of open-coding Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 10/18] arm64: kexec: pass kimage as the only argument to relocation function Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 12/18] arm64: kexec: relocate in EL1 mode Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 13/18] arm64: kexec: use ld script for relocation function Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 15/18] arm64: kexec: keep MMU enabled during kexec relocation Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 16/18] arm64: kexec: remove the pre-kexec PoC maintenance Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 17/18] arm64: kexec: Remove cpu-reset.h Pavel Tatashin
2021-04-08  4:05 ` [PATCH v13 18/18] arm64/mm: remove useless trans_pgd_map_page() Pavel Tatashin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).