From: geoff@infradead.org (Geoff Levand) To: linux-arm-kernel@lists.infradead.org Subject: [PATCH 4/8] arm64: Add EL2 switch to soft_restart Date: Fri, 03 Oct 2014 23:12:23 +0000 [thread overview] Message-ID: <0829d85d769e7c40abe83f4006e7885765d0f0ff.1412376956.git.geoff@infradead.org> (raw) In-Reply-To: <cover.1412376956.git.geoff@infradead.org> When a CPU is reset it needs to be put into the exception level it had when it entered the kernel. Update cpu_reset() to accept an argument el2_switch which signals cpu_reset() to enter the soft reset address at EL2. If el2_switch is not set the soft reset address will be entered at EL1. Update cpu_soft_restart() and soft_restart() to pass the return of is_hyp_mode_available() as the el2_switch value to cpu_reset(). Also update the comments of cpu_reset(), cpu_soft_restart() and soft_restart() to reflect this change. Signed-off-by: Geoff Levand <geoff@infradead.org> --- arch/arm64/include/asm/proc-fns.h | 4 ++-- arch/arm64/kernel/process.c | 6 ++++- arch/arm64/mm/proc.S | 47 +++++++++++++++++++++++++++++---------- 3 files changed, 42 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84..339394d 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -32,8 +32,8 @@ extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); -void cpu_soft_restart(phys_addr_t cpu_reset, - unsigned long addr) __attribute__((noreturn)); +void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch, + unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index bf66922..0a3414b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -50,6 +50,7 @@ #include <asm/mmu_context.h> #include <asm/processor.h> #include <asm/stacktrace.h> +#include <asm/virt.h> #ifdef CONFIG_CC_STACKPROTECTOR #include <linux/stackprotector.h> @@ -60,7 +61,10 @@ EXPORT_SYMBOL(__stack_chk_guard); void soft_restart(unsigned long addr) { setup_mm_for_reboot(); - cpu_soft_restart(virt_to_phys(cpu_reset), addr); + + cpu_soft_restart(virt_to_phys(cpu_reset), is_hyp_mode_available(), + addr); + /* Should never get here */ BUG(); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 4e778b1..7467199 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -25,6 +25,7 @@ #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> +#include <asm/virt.h> #include "proc-macros.S" @@ -59,27 +60,48 @@ ENTRY(cpu_cache_off) ENDPROC(cpu_cache_off) /* - * cpu_reset(loc) + * cpu_reset(el2_switch, loc) - Helper for cpu_soft_restart. * - * Perform a soft reset of the system. Put the CPU into the same state - * as it would be if it had been reset, and branch to what would be the - * reset vector. It must be executed with the flat identity mapping. + * @cpu_reset: Physical address of the cpu_reset routine. + * @el2_switch: Flag to indicate a swich to EL2 is needed. + * @addr: Location to jump to for soft reset. * - * - loc - location to jump to for soft reset + * Put the CPU into the same state as it would be if it had been reset, and + * branch to what would be the reset vector. It must be executed with the + * flat identity mapping. */ + .align 5 + ENTRY(cpu_reset) - mrs x1, sctlr_el1 - bic x1, x1, #1 - msr sctlr_el1, x1 // disable the MMU + mrs x2, sctlr_el1 + bic x2, x2, #1 + msr sctlr_el1, x2 // disable the MMU isb - ret x0 + + cbz x0, 1f // el2_switch? + mov x0, x1 + mov x1, xzr + mov x2, xzr + mov x3, xzr + hvc #HVC_CALL_FUNC // no return + +1: ret x1 ENDPROC(cpu_reset) +/* + * cpu_soft_restart(cpu_reset, el2_switch, addr) - Perform a cpu soft reset. + * + * @cpu_reset: Physical address of the cpu_reset routine. + * @el2_switch: Flag to indicate a swich to EL2 is needed, passed to cpu_reset. + * @addr: Location to jump to for soft reset, passed to cpu_reset. + * + */ + ENTRY(cpu_soft_restart) - /* Save address of cpu_reset() and reset address */ - mov x19, x0 - mov x20, x1 + mov x19, x0 // cpu_reset + mov x20, x1 // el2_switch + mov x21, x2 // addr /* Turn D-cache off */ bl cpu_cache_off @@ -88,6 +110,7 @@ ENTRY(cpu_soft_restart) bl flush_cache_all mov x0, x20 + mov x1, x21 ret x19 ENDPROC(cpu_soft_restart) -- 1.9.1
WARNING: multiple messages have this Message-ID (diff)
From: Geoff Levand <geoff@infradead.org> To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will.deacon@arm.com> Cc: marc.zyngier@arm.com, kexec@lists.infradead.org, linux-arm-kernel@lists.infradead.org, christoffer.dall@linaro.org Subject: [PATCH 4/8] arm64: Add EL2 switch to soft_restart Date: Fri, 03 Oct 2014 23:12:23 +0000 [thread overview] Message-ID: <0829d85d769e7c40abe83f4006e7885765d0f0ff.1412376956.git.geoff@infradead.org> (raw) In-Reply-To: <cover.1412376956.git.geoff@infradead.org> When a CPU is reset it needs to be put into the exception level it had when it entered the kernel. Update cpu_reset() to accept an argument el2_switch which signals cpu_reset() to enter the soft reset address at EL2. If el2_switch is not set the soft reset address will be entered at EL1. Update cpu_soft_restart() and soft_restart() to pass the return of is_hyp_mode_available() as the el2_switch value to cpu_reset(). Also update the comments of cpu_reset(), cpu_soft_restart() and soft_restart() to reflect this change. Signed-off-by: Geoff Levand <geoff@infradead.org> --- arch/arm64/include/asm/proc-fns.h | 4 ++-- arch/arm64/kernel/process.c | 6 ++++- arch/arm64/mm/proc.S | 47 +++++++++++++++++++++++++++++---------- 3 files changed, 42 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84..339394d 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -32,8 +32,8 @@ extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); -void cpu_soft_restart(phys_addr_t cpu_reset, - unsigned long addr) __attribute__((noreturn)); +void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch, + unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index bf66922..0a3414b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -50,6 +50,7 @@ #include <asm/mmu_context.h> #include <asm/processor.h> #include <asm/stacktrace.h> +#include <asm/virt.h> #ifdef CONFIG_CC_STACKPROTECTOR #include <linux/stackprotector.h> @@ -60,7 +61,10 @@ EXPORT_SYMBOL(__stack_chk_guard); void soft_restart(unsigned long addr) { setup_mm_for_reboot(); - cpu_soft_restart(virt_to_phys(cpu_reset), addr); + + cpu_soft_restart(virt_to_phys(cpu_reset), is_hyp_mode_available(), + addr); + /* Should never get here */ BUG(); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 4e778b1..7467199 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -25,6 +25,7 @@ #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> +#include <asm/virt.h> #include "proc-macros.S" @@ -59,27 +60,48 @@ ENTRY(cpu_cache_off) ENDPROC(cpu_cache_off) /* - * cpu_reset(loc) + * cpu_reset(el2_switch, loc) - Helper for cpu_soft_restart. * - * Perform a soft reset of the system. Put the CPU into the same state - * as it would be if it had been reset, and branch to what would be the - * reset vector. It must be executed with the flat identity mapping. + * @cpu_reset: Physical address of the cpu_reset routine. + * @el2_switch: Flag to indicate a swich to EL2 is needed. + * @addr: Location to jump to for soft reset. * - * - loc - location to jump to for soft reset + * Put the CPU into the same state as it would be if it had been reset, and + * branch to what would be the reset vector. It must be executed with the + * flat identity mapping. */ + .align 5 + ENTRY(cpu_reset) - mrs x1, sctlr_el1 - bic x1, x1, #1 - msr sctlr_el1, x1 // disable the MMU + mrs x2, sctlr_el1 + bic x2, x2, #1 + msr sctlr_el1, x2 // disable the MMU isb - ret x0 + + cbz x0, 1f // el2_switch? + mov x0, x1 + mov x1, xzr + mov x2, xzr + mov x3, xzr + hvc #HVC_CALL_FUNC // no return + +1: ret x1 ENDPROC(cpu_reset) +/* + * cpu_soft_restart(cpu_reset, el2_switch, addr) - Perform a cpu soft reset. + * + * @cpu_reset: Physical address of the cpu_reset routine. + * @el2_switch: Flag to indicate a swich to EL2 is needed, passed to cpu_reset. + * @addr: Location to jump to for soft reset, passed to cpu_reset. + * + */ + ENTRY(cpu_soft_restart) - /* Save address of cpu_reset() and reset address */ - mov x19, x0 - mov x20, x1 + mov x19, x0 // cpu_reset + mov x20, x1 // el2_switch + mov x21, x2 // addr /* Turn D-cache off */ bl cpu_cache_off @@ -88,6 +110,7 @@ ENTRY(cpu_soft_restart) bl flush_cache_all mov x0, x20 + mov x1, x21 ret x19 ENDPROC(cpu_soft_restart) -- 1.9.1 _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec
next prev parent reply other threads:[~2014-10-03 23:12 UTC|newest] Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top 2014-10-03 23:12 [PATCH 0/8] arm64 kexec kernel patches V4 Geoff Levand 2014-10-03 23:12 ` Geoff Levand 2014-10-03 23:12 ` [PATCH 2/8] arm64: Convert hcalls to use ISS field Geoff Levand 2014-10-03 23:12 ` Geoff Levand 2014-10-03 23:12 ` Geoff Levand [this message] 2014-10-03 23:12 ` [PATCH 4/8] arm64: Add EL2 switch to soft_restart Geoff Levand 2014-10-03 23:12 ` [PATCH 3/8] arm64: Add new hcall HVC_CALL_FUNC Geoff Levand 2014-10-03 23:12 ` Geoff Levand 2014-10-03 23:12 ` [PATCH 1/8] arm64/kvm: Fix assembler compatibility of macros Geoff Levand 2014-10-03 23:12 ` Geoff Levand 2014-10-03 23:12 ` [PATCH 7/8] arm64/kexec: Enable kexec in the arm64 defconfig Geoff Levand 2014-10-03 23:12 ` Geoff Levand 2014-10-03 23:12 ` [PATCH 6/8] arm64/kexec: Add core kexec support Geoff Levand 2014-10-03 23:12 ` Geoff Levand 2014-10-03 23:12 ` [PATCH 5/8] arm64: Move proc-macros.S to include/asm Geoff Levand 2014-10-03 23:12 ` Geoff Levand 2014-10-03 23:12 ` [PATCH 8/8] arm64/kexec: Add pr_devel output Geoff Levand 2014-10-03 23:12 ` Geoff Levand 2015-01-17 0:23 [PATCH 0/8] arm64 kexec kernel patches V7 Geoff Levand 2015-01-17 0:23 ` [PATCH 4/8] arm64: Add EL2 switch to soft_restart Geoff Levand 2015-01-17 0:23 ` Geoff Levand 2015-01-26 19:02 ` Mark Rutland 2015-01-26 19:02 ` Mark Rutland 2015-01-26 21:48 ` Geoff Levand 2015-01-26 21:48 ` Geoff Levand 2015-01-27 16:46 ` Mark Rutland 2015-01-27 16:46 ` Mark Rutland 2015-01-27 18:34 ` Geoff Levand 2015-01-27 18:34 ` Geoff Levand 2015-01-27 17:57 ` Catalin Marinas 2015-01-27 17:57 ` Catalin Marinas 2015-01-30 21:47 ` Geoff Levand 2015-01-30 21:47 ` Geoff Levand
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=0829d85d769e7c40abe83f4006e7885765d0f0ff.1412376956.git.geoff@infradead.org \ --to=geoff@infradead.org \ --cc=linux-arm-kernel@lists.infradead.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.