All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 5/6] arm64/kexec: Add core kexec support
  2015-03-19 20:35 ` Geoff Levand
@ 2015-03-19 20:35   ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: linux-arm-kernel

Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the
arm64 architecture that add support for the kexec re-boot mechanism
(CONFIG_KEXEC) on arm64 platforms.

With the addition of arm64 kexec support shutdown code paths through the kernel
are executed that previously were not.  To avoid system instability do to
problems in the current arm64 KVM kernel implementation add a Kconfig dependency
on !KEXEC to the arm64 KVM menu item.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/Kconfig                  |   9 +++
 arch/arm64/include/asm/kexec.h      |  48 ++++++++++++
 arch/arm64/kernel/Makefile          |   1 +
 arch/arm64/kernel/machine_kexec.c   | 125 ++++++++++++++++++++++++++++++
 arch/arm64/kernel/relocate_kernel.S | 149 ++++++++++++++++++++++++++++++++++++
 arch/arm64/kvm/Kconfig              |   1 +
 include/uapi/linux/kexec.h          |   1 +
 7 files changed, 334 insertions(+)
 create mode 100644 arch/arm64/include/asm/kexec.h
 create mode 100644 arch/arm64/kernel/machine_kexec.c
 create mode 100644 arch/arm64/kernel/relocate_kernel.S

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b8e973..5a606d1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -528,6 +528,15 @@ config SECCOMP
 	  and the task is only allowed to execute a few safe syscalls
 	  defined by each seccomp mode.
 
+config KEXEC
+	depends on (!SMP || PM_SLEEP_SMP)
+	bool "kexec system call"
+	---help---
+	  kexec is a system call that implements the ability to shutdown your
+	  current kernel, and to start another kernel.  It is like a reboot
+	  but it is independent of the system firmware.   And like a reboot
+	  you can start any kernel with it, not just Linux.
+
 config XEN_DOM0
 	def_bool y
 	depends on XEN
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
new file mode 100644
index 0000000..3530ff5
--- /dev/null
+++ b/arch/arm64/include/asm/kexec.h
@@ -0,0 +1,48 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(_ARM64_KEXEC_H)
+#define _ARM64_KEXEC_H
+
+/* Maximum physical address we can use pages from */
+
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+#define KEXEC_CONTROL_PAGE_SIZE	4096
+
+#define KEXEC_ARCH KEXEC_ARCH_ARM64
+
+#if !defined(__ASSEMBLY__)
+
+/**
+ * crash_setup_regs() - save registers for the panic kernel
+ *
+ * @newregs: registers are saved here
+ * @oldregs: registers to be saved (may be %NULL)
+ */
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+				    struct pt_regs *oldregs)
+{
+	/* Empty routine needed to avoid build errors. */
+}
+
+#endif /* !defined(__ASSEMBLY__) */
+
+#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5ee07ee..da9a7ee 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -35,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB)		+= kgdb.o
 arm64-obj-$(CONFIG_EFI)			+= efi.o efi-stub.o efi-entry.o
 arm64-obj-$(CONFIG_PCI)			+= pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)	+= armv8_deprecated.o
+arm64-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
 
 obj-y					+= $(arm64-obj-y) vdso/
 obj-m					+= $(arm64-obj-m)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
new file mode 100644
index 0000000..f1387d0
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -0,0 +1,125 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+#include <linux/of_fdt.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
+
+/* Global variables for the relocate_kernel routine. */
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned long relocate_new_kernel_size;
+extern unsigned long arm64_kexec_dtb_addr;
+extern unsigned long arm64_kexec_kimage_head;
+extern unsigned long arm64_kexec_kimage_start;
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+	/* Empty routine needed to avoid build errors. */
+}
+
+/**
+ * machine_kexec_prepare - Prepare for a kexec reboot.
+ *
+ * Called from the core kexec code when a kernel image is loaded.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+	arm64_kexec_kimage_start = image->start;
+	return 0;
+}
+
+/**
+ * kexec_list_flush - Helper to flush the kimage list to PoC.
+ */
+static void kexec_list_flush(unsigned long kimage_head)
+{
+	void *dest;
+	unsigned long *entry;
+
+	for (entry = &kimage_head, dest = NULL; ; entry++) {
+		unsigned int flag = *entry &
+			(IND_DESTINATION | IND_INDIRECTION | IND_DONE |
+			IND_SOURCE);
+		void *addr = phys_to_virt(*entry & PAGE_MASK);
+
+		switch (flag) {
+		case IND_INDIRECTION:
+			entry = (unsigned long *)addr - 1;
+			__flush_dcache_area(addr, PAGE_SIZE);
+			break;
+		case IND_DESTINATION:
+			dest = addr;
+			break;
+		case IND_SOURCE:
+			__flush_dcache_area(addr, PAGE_SIZE);
+			dest += PAGE_SIZE;
+			break;
+		case IND_DONE:
+			return;
+		default:
+			BUG();
+		}
+	}
+}
+
+/**
+ * machine_kexec - Do the kexec reboot.
+ *
+ * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
+ */
+void machine_kexec(struct kimage *image)
+{
+	phys_addr_t reboot_code_buffer_phys;
+	void *reboot_code_buffer;
+
+	BUG_ON(num_online_cpus() > 1);
+
+	arm64_kexec_kimage_head = image->head;
+
+	reboot_code_buffer_phys = page_to_phys(image->control_code_page);
+	reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
+
+	/*
+	 * Copy relocate_new_kernel to the reboot_code_buffer for use
+	 * after the kernel is shut down.
+	 */
+	memcpy(reboot_code_buffer, relocate_new_kernel,
+		relocate_new_kernel_size);
+
+	/* Flush the reboot_code_buffer in preparation for its execution. */
+	__flush_dcache_area(reboot_code_buffer, relocate_new_kernel_size);
+
+	/* Flush the kimage list. */
+	kexec_list_flush(image->head);
+
+	pr_info("Bye!\n");
+
+	/* Disable all DAIF exceptions. */
+	asm volatile ("msr daifset, #0xf" : : : "memory");
+
+	/*
+	 * soft_restart() will shutdown the MMU, disable data caches, then
+	 * transfer control to the reboot_code_buffer which contains a copy of
+	 * the relocate_new_kernel routine.  relocate_new_kernel will use
+	 * physical addressing to relocate the new kernel to its final position
+	 * and then will transfer control to the entry point of the new kernel.
+	 */
+	soft_restart(reboot_code_buffer_phys);
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+	/* Empty routine needed to avoid build errors. */
+}
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
new file mode 100644
index 0000000..166d960
--- /dev/null
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -0,0 +1,149 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+
+#include <asm/assembler.h>
+#include <asm/kexec.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+
+/*
+ * relocate_new_kernel - Put a 2nd stage kernel image in place and boot it.
+ *
+ * The memory that the old kernel occupies may be overwritten when coping the
+ * new image to its final location.  To assure that the relocate_new_kernel
+ * routine which does that copy is not overwritten all code and data needed
+ * by relocate_new_kernel must be between the symbols relocate_new_kernel and
+ * relocate_new_kernel_end.  The machine_kexec() routine will copy
+ * relocate_new_kernel to the kexec control_code_page, a special page which
+ * has been set up to be preserved during the copy operation.
+ */
+.globl relocate_new_kernel
+relocate_new_kernel:
+
+	/* Setup the list loop variables. */
+	ldr	x18, arm64_kexec_kimage_head	/* x18 = list entry */
+	dcache_line_size x17, x0		/* x17 = dcache line size */
+	mov	x16, xzr			/* x16 = segment start */
+	mov	x15, xzr			/* x15 = entry ptr */
+	mov	x14, xzr			/* x14 = copy dest */
+
+	/* Check if the new image needs relocation. */
+	cbz	x18, .Ldone
+	tbnz	x18, IND_DONE_BIT, .Ldone
+
+.Lloop:
+	and	x13, x18, PAGE_MASK		/* x13 = addr */
+
+	/* Test the entry flags. */
+.Ltest_source:
+	tbz	x18, IND_SOURCE_BIT, .Ltest_indirection
+
+	mov x20, x14				/*  x20 = copy dest */
+	mov x21, x13				/*  x21 = copy src */
+
+	/* Invalidate dest page to PoC. */
+	mov	x0, x20
+	add	x19, x0, #PAGE_SIZE
+	sub	x1, x17, #1
+	bic	x0, x0, x1
+1:	dc	ivac, x0
+	add	x0, x0, x17
+	cmp	x0, x19
+	b.lo	1b
+	dsb	sy
+
+	/* Copy page. */
+1:	ldp	x22, x23, [x21]
+	ldp	x24, x25, [x21, #16]
+	ldp	x26, x27, [x21, #32]
+	ldp	x28, x29, [x21, #48]
+	add	x21, x21, #64
+	stnp	x22, x23, [x20]
+	stnp	x24, x25, [x20, #16]
+	stnp	x26, x27, [x20, #32]
+	stnp	x28, x29, [x20, #48]
+	add	x20, x20, #64
+	tst	x21, #(PAGE_SIZE - 1)
+	b.ne	1b
+
+	/* dest += PAGE_SIZE */
+	add	x14, x14, PAGE_SIZE
+	b	.Lnext
+
+.Ltest_indirection:
+	tbz	x18, IND_INDIRECTION_BIT, .Ltest_destination
+
+	/* ptr = addr */
+	mov	x15, x13
+	b	.Lnext
+
+.Ltest_destination:
+	tbz	x18, IND_DESTINATION_BIT, .Lnext
+
+	mov	x16, x13
+
+	/* dest = addr */
+	mov	x14, x13
+
+.Lnext:
+	/* entry = *ptr++ */
+	ldr	x18, [x15], #8
+
+	/* while (!(entry & DONE)) */
+	tbz	x18, IND_DONE_BIT, .Lloop
+
+.Ldone:
+	dsb	sy
+	isb
+	ic	ialluis
+	dsb	sy
+	isb
+
+	/* Start new image. */
+	ldr	x4, arm64_kexec_kimage_start
+	mov	x0, xzr
+	mov	x1, xzr
+	mov	x2, xzr
+	mov	x3, xzr
+	br	x4
+
+.align 3	/* To keep the 64-bit values below naturally aligned. */
+
+/* The machine_kexec routines set these variables. */
+
+/*
+ * arm64_kexec_kimage_start - Copy of image->start, the entry point of the new
+ * image.
+ */
+.globl arm64_kexec_kimage_start
+arm64_kexec_kimage_start:
+	.quad	0x0
+
+/*
+ * arm64_kexec_kimage_head - Copy of image->head, the list of kimage entries.
+ */
+.globl arm64_kexec_kimage_head
+arm64_kexec_kimage_head:
+	.quad	0x0
+
+.Lrelocate_new_kernel_end:
+
+/*
+ * relocate_new_kernel_size - Number of bytes to copy to the control_code_page.
+ */
+.globl relocate_new_kernel_size
+relocate_new_kernel_size:
+	.quad .Lrelocate_new_kernel_end - relocate_new_kernel
+
+.org	KEXEC_CONTROL_PAGE_SIZE
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f5590c8..30ae7a7 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
 
 config KVM
 	bool "Kernel-based Virtual Machine (KVM) support"
+	depends on !KEXEC
 	select MMU_NOTIFIER
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 99048e5..ccec467 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -39,6 +39,7 @@
 #define KEXEC_ARCH_SH      (42 << 16)
 #define KEXEC_ARCH_MIPS_LE (10 << 16)
 #define KEXEC_ARCH_MIPS    ( 8 << 16)
+#define KEXEC_ARCH_ARM64   (183 << 16)
 
 /* The artificial cap on the number of segments passed to kexec_load. */
 #define KEXEC_SEGMENT_MAX 16
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 2/6] arm64: Convert hcalls to use HVC immediate value
  2015-03-19 20:35 ` Geoff Levand
@ 2015-03-19 20:35   ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: linux-arm-kernel

The existing arm64 hcall implementations are limited in that they only allow
for two distinct hcalls; with the x0 register either zero or not zero.  Also,
the API of the hyp-stub exception vector routines and the KVM exception vector
routines differ; hyp-stub uses a non-zero value in x0 to implement
__hyp_set_vectors, whereas KVM uses it to implement kvm_call_hyp.

To allow for additional hcalls to be defined and to make the arm64 hcall API
more consistent across exception vector routines, change the hcall
implementations to use the 16 bit immediate value of the HVC instruction to
specify the hcall type.

Define three new preprocessor macros HVC_CALL_HYP, HVC_GET_VECTORS, and
HVC_SET_VECTORS to be used as hcall type specifiers and convert the
existing __hyp_get_vectors(), __hyp_set_vectors() and kvm_call_hyp() routines
to use these new macros when executing an HVC call.  Also, change the
corresponding hyp-stub and KVM el1_sync exception vector routines to use these
new macros.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/virt.h | 27 +++++++++++++++++++++++++++
 arch/arm64/kernel/hyp-stub.S  | 32 +++++++++++++++++++++-----------
 arch/arm64/kvm/hyp.S          | 16 +++++++++-------
 3 files changed, 57 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7a5df52..eb10368 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,6 +18,33 @@
 #ifndef __ASM__VIRT_H
 #define __ASM__VIRT_H
 
+/*
+ * The arm64 hcall implementation uses the ISS field of the ESR_EL2 register to
+ * specify the hcall type.  The exception handlers are allowed to use registers
+ * x17 and x18 in their implementation.  Any routine issuing an hcall must not
+ * expect these registers to be preserved.
+ */
+
+/*
+ * HVC_CALL_HYP - Execute a hyp routine.
+ */
+
+#define HVC_CALL_HYP 0
+
+/*
+ * HVC_GET_VECTORS - Return the value of the vbar_el2 register.
+ */
+
+#define HVC_GET_VECTORS 1
+
+/*
+ * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
+ *
+ * @x0: Physical address of the new vector table.
+ */
+
+#define HVC_SET_VECTORS 2
+
 #define BOOT_CPU_MODE_EL1	(0xe11)
 #define BOOT_CPU_MODE_EL2	(0xe12)
 
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index a272f33..017ab519 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -22,6 +22,7 @@
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
+#include <asm/kvm_arm.h>
 #include <asm/ptrace.h>
 #include <asm/virt.h>
 
@@ -53,14 +54,22 @@ ENDPROC(__hyp_stub_vectors)
 	.align 11
 
 el1_sync:
-	mrs	x1, esr_el2
-	lsr	x1, x1, #26
-	cmp	x1, #0x16
+	mrs	x18, esr_el2
+	lsr	x17, x18, #ESR_ELx_EC_SHIFT
+	and	x18, x18, #ESR_ELx_ISS_MASK
+
+	cmp	x17, #ESR_ELx_EC_HVC64
 	b.ne	2f				// Not an HVC trap
-	cbz	x0, 1f
-	msr	vbar_el2, x0			// Set vbar_el2
+
+	cmp	x18, #HVC_GET_VECTORS
+	b.ne	1f
+	mrs	x0, vbar_el2
 	b	2f
-1:	mrs	x0, vbar_el2			// Return vbar_el2
+
+1:	cmp	x18, #HVC_SET_VECTORS
+	b.ne	2f
+	msr	vbar_el2, x0
+
 2:	eret
 ENDPROC(el1_sync)
 
@@ -100,11 +109,12 @@ ENDPROC(\label)
  * initialisation entry point.
  */
 
-ENTRY(__hyp_get_vectors)
-	mov	x0, xzr
-	// fall through
 ENTRY(__hyp_set_vectors)
-	hvc	#0
+	hvc	#HVC_SET_VECTORS
 	ret
-ENDPROC(__hyp_get_vectors)
 ENDPROC(__hyp_set_vectors)
+
+ENTRY(__hyp_get_vectors)
+	hvc	#HVC_GET_VECTORS
+	ret
+ENDPROC(__hyp_get_vectors)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 5befd01..fd085ec 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -27,6 +27,7 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
 #include <asm/memory.h>
+#include <asm/virt.h>
 
 #define CPU_GP_REG_OFFSET(x)	(CPU_GP_REGS + x)
 #define CPU_XREG_OFFSET(x)	CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
@@ -1129,12 +1130,9 @@ __hyp_panic_str:
  * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
  * passed in r0 and r1.
  *
- * A function pointer with a value of 0 has a special meaning, and is
- * used to implement __hyp_get_vectors in the same way as in
- * arch/arm64/kernel/hyp_stub.S.
  */
 ENTRY(kvm_call_hyp)
-	hvc	#0
+	hvc	#HVC_CALL_HYP
 	ret
 ENDPROC(kvm_call_hyp)
 
@@ -1165,6 +1163,7 @@ el1_sync:					// Guest trapped into EL2
 
 	mrs	x1, esr_el2
 	lsr	x2, x1, #ESR_ELx_EC_SHIFT
+	and	x0, x1, #ESR_ELx_ISS_MASK
 
 	cmp	x2, #ESR_ELx_EC_HVC64
 	b.ne	el1_trap
@@ -1173,15 +1172,18 @@ el1_sync:					// Guest trapped into EL2
 	cbnz	x3, el1_trap			// called HVC
 
 	/* Here, we're pretty sure the host called HVC. */
+	mov	x18, x0
 	pop	x2, x3
 	pop	x0, x1
 
-	/* Check for __hyp_get_vectors */
-	cbnz	x0, 1f
+	cmp	x18, #HVC_GET_VECTORS
+	b.ne	1f
 	mrs	x0, vbar_el2
 	b	2f
 
-1:	push	lr, xzr
+1:	/* Default to HVC_CALL_HYP. */
+
+	push	lr, xzr
 
 	/*
 	 * Compute the function address in EL2, and shuffle the parameters.
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 1/6] arm64: Fold proc-macros.S into assembler.h
  2015-03-19 20:35 ` Geoff Levand
@ 2015-03-19 20:35   ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: linux-arm-kernel

To allow the assembler macros defined in arch/arm64/mm/proc-macros.S to be used
outside the mm code move the contents of proc-macros.S to asm/assembler.h.  Also,
delete proc-macros.S, and fix up all references to proc-macros.S.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/assembler.h | 37 +++++++++++++++++++++++++-
 arch/arm64/mm/cache.S              |  2 --
 arch/arm64/mm/proc-macros.S        | 54 --------------------------------------
 arch/arm64/mm/proc.S               |  2 --
 4 files changed, 36 insertions(+), 59 deletions(-)
 delete mode 100644 arch/arm64/mm/proc-macros.S

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 750bac4..47962f6be 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -1,5 +1,5 @@
 /*
- * Based on arch/arm/include/asm/assembler.h
+ * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
  *
  * Copyright (C) 1996-2000 Russell King
  * Copyright (C) 2012 ARM Ltd.
@@ -23,6 +23,7 @@
 #ifndef __ASM_ASSEMBLER_H
 #define __ASM_ASSEMBLER_H
 
+#include <asm/asm-offsets.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
@@ -159,4 +160,38 @@ lr	.req	x30		// link register
 	orr	\rd, \lbits, \hbits, lsl #32
 	.endm
 
+/*
+ * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
+ */
+	.macro	vma_vm_mm, rd, rn
+	ldr	\rd, [\rn, #VMA_VM_MM]
+	.endm
+
+/*
+ * mmid - get context id from mm pointer (mm->context.id)
+ */
+	.macro	mmid, rd, rn
+	ldr	\rd, [\rn, #MM_CONTEXT_ID]
+	.endm
+
+/*
+ * dcache_line_size - get the minimum D-cache line size from the CTR register.
+ */
+	.macro	dcache_line_size, reg, tmp
+	mrs	\tmp, ctr_el0			// read CTR
+	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
+	mov	\reg, #4			// bytes per word
+	lsl	\reg, \reg, \tmp		// actual cache line size
+	.endm
+
+/*
+ * icache_line_size - get the minimum I-cache line size from the CTR register.
+ */
+	.macro	icache_line_size, reg, tmp
+	mrs	\tmp, ctr_el0			// read CTR
+	and	\tmp, \tmp, #0xf		// cache line size encoding
+	mov	\reg, #4			// bytes per word
+	lsl	\reg, \reg, \tmp		// actual cache line size
+	.endm
+
 #endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2560e1e..2d7a67c 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -24,8 +24,6 @@
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
-#include "proc-macros.S"
-
 /*
  *	__flush_dcache_all()
  *
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
deleted file mode 100644
index 005d29e..0000000
--- a/arch/arm64/mm/proc-macros.S
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Based on arch/arm/mm/proc-macros.S
- *
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-
-/*
- * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
- */
-	.macro	vma_vm_mm, rd, rn
-	ldr	\rd, [\rn, #VMA_VM_MM]
-	.endm
-
-/*
- * mmid - get context id from mm pointer (mm->context.id)
- */
-	.macro	mmid, rd, rn
-	ldr	\rd, [\rn, #MM_CONTEXT_ID]
-	.endm
-
-/*
- * dcache_line_size - get the minimum D-cache line size from the CTR register.
- */
-	.macro	dcache_line_size, reg, tmp
-	mrs	\tmp, ctr_el0			// read CTR
-	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
-	mov	\reg, #4			// bytes per word
-	lsl	\reg, \reg, \tmp		// actual cache line size
-	.endm
-
-/*
- * icache_line_size - get the minimum I-cache line size from the CTR register.
- */
-	.macro	icache_line_size, reg, tmp
-	mrs	\tmp, ctr_el0			// read CTR
-	and	\tmp, \tmp, #0xf		// cache line size encoding
-	mov	\reg, #4			// bytes per word
-	lsl	\reg, \reg, \tmp		// actual cache line size
-	.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 28eebfb..fe69f6e 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -26,8 +26,6 @@
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
 
-#include "proc-macros.S"
-
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
 #else
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 4/6] arm64: Add EL2 switch to soft_restart
  2015-03-19 20:35 ` Geoff Levand
@ 2015-03-19 20:35   ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: linux-arm-kernel

When a CPU is reset it needs to be put into the exception level it had when it
entered the kernel.  Update cpu_reset() to accept an argument which signals if
the soft reset address needs to be entered at EL1 or EL2.

This implementation updates cpu_soft_restart() and soft_restart() to pass the
return value of is_hyp_mode_available() as the switch argument to cpu_reset().
Also, update the comments of cpu_reset(), cpu_soft_restart() and soft_restart()
to reflect this change.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/proc-fns.h |  4 ++--
 arch/arm64/kernel/process.c       |  6 ++++-
 arch/arm64/mm/proc.S              | 47 +++++++++++++++++++++++++++++----------
 3 files changed, 42 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 9a8fd84..339394d 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -32,8 +32,8 @@ extern void cpu_cache_off(void);
 extern void cpu_do_idle(void);
 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
 extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
-void cpu_soft_restart(phys_addr_t cpu_reset,
-		unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch,
+		      unsigned long addr) __attribute__((noreturn));
 extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
 extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c6b1f3b..d894d3e 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -51,6 +51,7 @@
 #include <asm/mmu_context.h>
 #include <asm/processor.h>
 #include <asm/stacktrace.h>
+#include <asm/virt.h>
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 #include <linux/stackprotector.h>
@@ -61,7 +62,10 @@ EXPORT_SYMBOL(__stack_chk_guard);
 void soft_restart(unsigned long addr)
 {
 	setup_mm_for_reboot();
-	cpu_soft_restart(virt_to_phys(cpu_reset), addr);
+
+	cpu_soft_restart(virt_to_phys(cpu_reset),
+		is_hyp_mode_available(), addr);
+
 	/* Should never get here */
 	BUG();
 }
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index fe69f6e..4fe4b7d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -25,6 +25,7 @@
 #include <asm/hwcap.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
+#include <asm/virt.h>
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
@@ -57,27 +58,48 @@ ENTRY(cpu_cache_off)
 ENDPROC(cpu_cache_off)
 
 /*
- *	cpu_reset(loc)
+ * cpu_reset(el2_switch, loc) - Helper for cpu_soft_restart.
  *
- *	Perform a soft reset of the system.  Put the CPU into the same state
- *	as it would be if it had been reset, and branch to what would be the
- *	reset vector. It must be executed with the flat identity mapping.
+ * @cpu_reset: Physical address of the cpu_reset routine.
+ * @el2_switch: Flag to indicate a swich to EL2 is needed.
+ * @addr: Location to jump to for soft reset.
  *
- *	- loc   - location to jump to for soft reset
+ * Put the CPU into the same state as it would be if it had been reset, and
+ * branch to what would be the reset vector. It must be executed with the
+ * flat identity mapping.
  */
+
 	.align	5
+
 ENTRY(cpu_reset)
-	mrs	x1, sctlr_el1
-	bic	x1, x1, #1
-	msr	sctlr_el1, x1			// disable the MMU
+	mrs	x2, sctlr_el1
+	bic	x2, x2, #1
+	msr	sctlr_el1, x2			// disable the MMU
 	isb
-	ret	x0
+
+	cbz	x0, 1f				// el2_switch?
+	mov	x0, x1
+	mov	x1, xzr
+	mov	x2, xzr
+	mov	x3, xzr
+	hvc	#HVC_CALL_FUNC			// no return
+
+1:	ret	x1
 ENDPROC(cpu_reset)
 
+/*
+ * cpu_soft_restart(cpu_reset, el2_switch, addr) - Perform a cpu soft reset.
+ *
+ * @cpu_reset: Physical address of the cpu_reset routine.
+ * @el2_switch: Flag to indicate a swich to EL2 is needed, passed to cpu_reset.
+ * @addr: Location to jump to for soft reset, passed to cpu_reset.
+ *
+ */
+
 ENTRY(cpu_soft_restart)
-	/* Save address of cpu_reset() and reset address */
-	mov	x19, x0
-	mov	x20, x1
+	mov	x19, x0				// cpu_reset
+	mov	x20, x1				// el2_switch
+	mov	x21, x2				// addr
 
 	/* Turn D-cache off */
 	bl	cpu_cache_off
@@ -86,6 +108,7 @@ ENTRY(cpu_soft_restart)
 	bl	flush_cache_all
 
 	mov	x0, x20
+	mov	x1, x21
 	ret	x19
 ENDPROC(cpu_soft_restart)
 
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 0/6] arm64 kexec kernel patches V8
@ 2015-03-19 20:35 ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: linux-arm-kernel

Hi All,

This series adds the core support for kexec re-boots on arm64.  This v8 of the
series is mainly just a rebase to Linux-4.0-rc3, and a few very minor changes
requested for v7.

To load a second stage kernel and execute a kexec re-boot on arm64 my patches to
kexec-tools [2], which have not yet been merged upstream, are needed.

I have tested with the ARM VE fast model, the ARM Base model and the ARM
Foundation model with various kernel config options for both the first and
second stage kernels.  Kexec on EFI systems works correctly.  With the ACPI
kernel patches from [3] applied, kexec on ACPI systems seeems to work correctly.
More ACPI + kexec testing is needed.

Patch 1 here moves the macros from proc-macros.S to asm/assembler.h so that the
dcache_line_size macro it defines can be uesd by kexec's relocate kernel
routine.

Patches 2-4 rework the arm64 hcall mechanism to give the arm64 soft_restart()
routine the ability to switch exception levels from EL1 to EL2 for kernels that
were entered in EL2.

Patches 5-6 add the actual kexec support.

Please consider all patches for inclusion.

[1]  https://git.kernel.org/cgit/linux/kernel/git/geoff/linux-kexec.git
[2]  https://git.kernel.org/cgit/linux/kernel/git/geoff/kexec-tools.git
[3]  http://git.linaro.org/leg/acpi/acpi.git #acpi-topic-juno-fvp

Several things are known to have problems on kexec re-boot:

spin-table
----------

PROBLEM: The spin-table enable method does not implement all the methods needed
for CPU hot-plug, so the first stage kernel cannot be shutdown properly.

WORK-AROUND: Upgrade to system firmware that provides PSCI enable method
support, OR build the first stage kernel with CONFIG_SMP=n, OR pass 'maxcpus=1'
on the first stage kernel command line.

FIX: Upgrade system firmware to provide PSCI enable method support or add
missing spin-table support to the kernel.

KVM
---

PROBLEM: KVM acquires hypervisor resources on startup, but does not free those
resources on shutdown, so the first stage kernel cannot be shutdown properly
when using kexec.

WORK-AROUND: Build the first stage kernel with CONFIG_KVM=n, or apply KVM bug
fix patches from [1].

FIX: Takahiro Akashi has preliminary patches to fix the KVM shutdown problem.  I
have those in my master branch at [1].  KVM + kexec works properly with that
branch.  Patches neeeded:

 arm64: kvm: add a cpu tear-down function
 arm64: kexec: fix kvm issue
 arm64/kvm: Remove !KEXEC Kconfig dependency
 arm64/kexec: Enable kexec in the arm64 defconfig

/memreserve/
----------

PROBLEM: Device tree /memreserve/ entries are not available in
/proc/device-tree.  For systems that have /memreserve/ entries and use
/proc/device-tree during kexec, the second stage kernel will use the reserved
regions and the system will become unstable.

WORK-AROUND: Enable the kernel config option CONFIG_SYSFS=y to expose a binary
device tree to user space at /sys/firmware/fdt that includes /memreserve/
entries OR pass a user specified DTB using the kexec --dtb option.

FIX: This is expected behavior.  To maximize user support, rework device tree
definitions to not use /memreserve/ entries.

-Geoff

The following changes since commit 06e5801b8cb3fc057d88cb4dc03c0b64b2744cda:

  Linux 4.0-rc4 (2015-03-15 17:38:20 -0700)

are available in the git repository at:

  git://git.kernel.org/pub/scm/linux/kernel/git/geoff/linux-kexec.git kexec-v8

for you to fetch changes up to 9d94104463ae6e3472526fb69e8111201c4a1fa7:

  arm64/kexec: Add pr_devel output (2015-03-19 12:21:43 -0700)

----------------------------------------------------------------
Geoff Levand (6):
      arm64: Fold proc-macros.S into assembler.h
      arm64: Convert hcalls to use HVC immediate value
      arm64: Add new hcall HVC_CALL_FUNC
      arm64: Add EL2 switch to soft_restart
      arm64/kexec: Add core kexec support
      arm64/kexec: Add pr_devel output

 arch/arm64/Kconfig                  |   9 ++
 arch/arm64/include/asm/assembler.h  |  37 ++++++-
 arch/arm64/include/asm/kexec.h      |  48 +++++++++
 arch/arm64/include/asm/proc-fns.h   |   4 +-
 arch/arm64/include/asm/virt.h       |  40 ++++++++
 arch/arm64/kernel/Makefile          |   1 +
 arch/arm64/kernel/hyp-stub.S        |  43 +++++---
 arch/arm64/kernel/machine_kexec.c   | 189 ++++++++++++++++++++++++++++++++++++
 arch/arm64/kernel/process.c         |   6 +-
 arch/arm64/kernel/relocate_kernel.S | 149 ++++++++++++++++++++++++++++
 arch/arm64/kvm/Kconfig              |   1 +
 arch/arm64/kvm/hyp.S                |  16 +--
 arch/arm64/mm/cache.S               |   2 -
 arch/arm64/mm/proc-macros.S         |  54 -----------
 arch/arm64/mm/proc.S                |  49 +++++++---
 include/uapi/linux/kexec.h          |   1 +
 16 files changed, 557 insertions(+), 92 deletions(-)
 create mode 100644 arch/arm64/include/asm/kexec.h
 create mode 100644 arch/arm64/kernel/machine_kexec.c
 create mode 100644 arch/arm64/kernel/relocate_kernel.S
 delete mode 100644 arch/arm64/mm/proc-macros.S

-- 
2.1.0

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 3/6] arm64: Add new hcall HVC_CALL_FUNC
  2015-03-19 20:35 ` Geoff Levand
@ 2015-03-19 20:35   ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: linux-arm-kernel

Add the new hcall HVC_CALL_FUNC that allows execution of a function at EL2.
During CPU reset the CPU must be brought to the exception level it had on
entry to the kernel.  The HVC_CALL_FUNC hcall will provide the mechanism
needed for this exception level switch.

To allow the HVC_CALL_FUNC exception vector to work without a stack, which is
needed to support an hcall at CPU reset, this implementation uses register x18
to store the link register across the caller provided function.  This dictates
that the caller provided function must preserve the contents of register x18.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/virt.h | 13 +++++++++++++
 arch/arm64/kernel/hyp-stub.S  | 13 ++++++++++++-
 2 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index eb10368..3070096 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -45,6 +45,19 @@
 
 #define HVC_SET_VECTORS 2
 
+/*
+ * HVC_CALL_FUNC - Execute a function at EL2.
+ *
+ * @x0: Physical address of the function to be executed.
+ * @x1: Passed as the first argument to the function.
+ * @x2: Passed as the second argument to the function.
+ * @x3: Passed as the third argument to the function.
+ *
+ * The called function must preserve the contents of register x18.
+ */
+
+#define HVC_CALL_FUNC 3
+
 #define BOOT_CPU_MODE_EL1	(0xe11)
 #define BOOT_CPU_MODE_EL2	(0xe12)
 
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 017ab519..e8febe9 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -67,8 +67,19 @@ el1_sync:
 	b	2f
 
 1:	cmp	x18, #HVC_SET_VECTORS
-	b.ne	2f
+	b.ne	1f
 	msr	vbar_el2, x0
+	b	2f
+
+1:	cmp	x18, #HVC_CALL_FUNC
+	b.ne	2f
+	mov	x18, lr
+	mov	lr, x0
+	mov	x0, x1
+	mov	x1, x2
+	mov	x2, x3
+	blr	lr
+	mov	lr, x18
 
 2:	eret
 ENDPROC(el1_sync)
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 5/6] arm64/kexec: Add core kexec support
@ 2015-03-19 20:35   ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon
  Cc: marc.zyngier, kexec, linux-arm-kernel, christoffer.dall

Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the
arm64 architecture that add support for the kexec re-boot mechanism
(CONFIG_KEXEC) on arm64 platforms.

With the addition of arm64 kexec support shutdown code paths through the kernel
are executed that previously were not.  To avoid system instability do to
problems in the current arm64 KVM kernel implementation add a Kconfig dependency
on !KEXEC to the arm64 KVM menu item.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/Kconfig                  |   9 +++
 arch/arm64/include/asm/kexec.h      |  48 ++++++++++++
 arch/arm64/kernel/Makefile          |   1 +
 arch/arm64/kernel/machine_kexec.c   | 125 ++++++++++++++++++++++++++++++
 arch/arm64/kernel/relocate_kernel.S | 149 ++++++++++++++++++++++++++++++++++++
 arch/arm64/kvm/Kconfig              |   1 +
 include/uapi/linux/kexec.h          |   1 +
 7 files changed, 334 insertions(+)
 create mode 100644 arch/arm64/include/asm/kexec.h
 create mode 100644 arch/arm64/kernel/machine_kexec.c
 create mode 100644 arch/arm64/kernel/relocate_kernel.S

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b8e973..5a606d1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -528,6 +528,15 @@ config SECCOMP
 	  and the task is only allowed to execute a few safe syscalls
 	  defined by each seccomp mode.
 
+config KEXEC
+	depends on (!SMP || PM_SLEEP_SMP)
+	bool "kexec system call"
+	---help---
+	  kexec is a system call that implements the ability to shutdown your
+	  current kernel, and to start another kernel.  It is like a reboot
+	  but it is independent of the system firmware.   And like a reboot
+	  you can start any kernel with it, not just Linux.
+
 config XEN_DOM0
 	def_bool y
 	depends on XEN
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
new file mode 100644
index 0000000..3530ff5
--- /dev/null
+++ b/arch/arm64/include/asm/kexec.h
@@ -0,0 +1,48 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(_ARM64_KEXEC_H)
+#define _ARM64_KEXEC_H
+
+/* Maximum physical address we can use pages from */
+
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+#define KEXEC_CONTROL_PAGE_SIZE	4096
+
+#define KEXEC_ARCH KEXEC_ARCH_ARM64
+
+#if !defined(__ASSEMBLY__)
+
+/**
+ * crash_setup_regs() - save registers for the panic kernel
+ *
+ * @newregs: registers are saved here
+ * @oldregs: registers to be saved (may be %NULL)
+ */
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+				    struct pt_regs *oldregs)
+{
+	/* Empty routine needed to avoid build errors. */
+}
+
+#endif /* !defined(__ASSEMBLY__) */
+
+#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5ee07ee..da9a7ee 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -35,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB)		+= kgdb.o
 arm64-obj-$(CONFIG_EFI)			+= efi.o efi-stub.o efi-entry.o
 arm64-obj-$(CONFIG_PCI)			+= pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)	+= armv8_deprecated.o
+arm64-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
 
 obj-y					+= $(arm64-obj-y) vdso/
 obj-m					+= $(arm64-obj-m)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
new file mode 100644
index 0000000..f1387d0
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -0,0 +1,125 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+#include <linux/of_fdt.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
+
+/* Global variables for the relocate_kernel routine. */
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned long relocate_new_kernel_size;
+extern unsigned long arm64_kexec_dtb_addr;
+extern unsigned long arm64_kexec_kimage_head;
+extern unsigned long arm64_kexec_kimage_start;
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+	/* Empty routine needed to avoid build errors. */
+}
+
+/**
+ * machine_kexec_prepare - Prepare for a kexec reboot.
+ *
+ * Called from the core kexec code when a kernel image is loaded.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+	arm64_kexec_kimage_start = image->start;
+	return 0;
+}
+
+/**
+ * kexec_list_flush - Helper to flush the kimage list to PoC.
+ */
+static void kexec_list_flush(unsigned long kimage_head)
+{
+	void *dest;
+	unsigned long *entry;
+
+	for (entry = &kimage_head, dest = NULL; ; entry++) {
+		unsigned int flag = *entry &
+			(IND_DESTINATION | IND_INDIRECTION | IND_DONE |
+			IND_SOURCE);
+		void *addr = phys_to_virt(*entry & PAGE_MASK);
+
+		switch (flag) {
+		case IND_INDIRECTION:
+			entry = (unsigned long *)addr - 1;
+			__flush_dcache_area(addr, PAGE_SIZE);
+			break;
+		case IND_DESTINATION:
+			dest = addr;
+			break;
+		case IND_SOURCE:
+			__flush_dcache_area(addr, PAGE_SIZE);
+			dest += PAGE_SIZE;
+			break;
+		case IND_DONE:
+			return;
+		default:
+			BUG();
+		}
+	}
+}
+
+/**
+ * machine_kexec - Do the kexec reboot.
+ *
+ * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
+ */
+void machine_kexec(struct kimage *image)
+{
+	phys_addr_t reboot_code_buffer_phys;
+	void *reboot_code_buffer;
+
+	BUG_ON(num_online_cpus() > 1);
+
+	arm64_kexec_kimage_head = image->head;
+
+	reboot_code_buffer_phys = page_to_phys(image->control_code_page);
+	reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
+
+	/*
+	 * Copy relocate_new_kernel to the reboot_code_buffer for use
+	 * after the kernel is shut down.
+	 */
+	memcpy(reboot_code_buffer, relocate_new_kernel,
+		relocate_new_kernel_size);
+
+	/* Flush the reboot_code_buffer in preparation for its execution. */
+	__flush_dcache_area(reboot_code_buffer, relocate_new_kernel_size);
+
+	/* Flush the kimage list. */
+	kexec_list_flush(image->head);
+
+	pr_info("Bye!\n");
+
+	/* Disable all DAIF exceptions. */
+	asm volatile ("msr daifset, #0xf" : : : "memory");
+
+	/*
+	 * soft_restart() will shutdown the MMU, disable data caches, then
+	 * transfer control to the reboot_code_buffer which contains a copy of
+	 * the relocate_new_kernel routine.  relocate_new_kernel will use
+	 * physical addressing to relocate the new kernel to its final position
+	 * and then will transfer control to the entry point of the new kernel.
+	 */
+	soft_restart(reboot_code_buffer_phys);
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+	/* Empty routine needed to avoid build errors. */
+}
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
new file mode 100644
index 0000000..166d960
--- /dev/null
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -0,0 +1,149 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+
+#include <asm/assembler.h>
+#include <asm/kexec.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+
+/*
+ * relocate_new_kernel - Put a 2nd stage kernel image in place and boot it.
+ *
+ * The memory that the old kernel occupies may be overwritten when coping the
+ * new image to its final location.  To assure that the relocate_new_kernel
+ * routine which does that copy is not overwritten all code and data needed
+ * by relocate_new_kernel must be between the symbols relocate_new_kernel and
+ * relocate_new_kernel_end.  The machine_kexec() routine will copy
+ * relocate_new_kernel to the kexec control_code_page, a special page which
+ * has been set up to be preserved during the copy operation.
+ */
+.globl relocate_new_kernel
+relocate_new_kernel:
+
+	/* Setup the list loop variables. */
+	ldr	x18, arm64_kexec_kimage_head	/* x18 = list entry */
+	dcache_line_size x17, x0		/* x17 = dcache line size */
+	mov	x16, xzr			/* x16 = segment start */
+	mov	x15, xzr			/* x15 = entry ptr */
+	mov	x14, xzr			/* x14 = copy dest */
+
+	/* Check if the new image needs relocation. */
+	cbz	x18, .Ldone
+	tbnz	x18, IND_DONE_BIT, .Ldone
+
+.Lloop:
+	and	x13, x18, PAGE_MASK		/* x13 = addr */
+
+	/* Test the entry flags. */
+.Ltest_source:
+	tbz	x18, IND_SOURCE_BIT, .Ltest_indirection
+
+	mov x20, x14				/*  x20 = copy dest */
+	mov x21, x13				/*  x21 = copy src */
+
+	/* Invalidate dest page to PoC. */
+	mov	x0, x20
+	add	x19, x0, #PAGE_SIZE
+	sub	x1, x17, #1
+	bic	x0, x0, x1
+1:	dc	ivac, x0
+	add	x0, x0, x17
+	cmp	x0, x19
+	b.lo	1b
+	dsb	sy
+
+	/* Copy page. */
+1:	ldp	x22, x23, [x21]
+	ldp	x24, x25, [x21, #16]
+	ldp	x26, x27, [x21, #32]
+	ldp	x28, x29, [x21, #48]
+	add	x21, x21, #64
+	stnp	x22, x23, [x20]
+	stnp	x24, x25, [x20, #16]
+	stnp	x26, x27, [x20, #32]
+	stnp	x28, x29, [x20, #48]
+	add	x20, x20, #64
+	tst	x21, #(PAGE_SIZE - 1)
+	b.ne	1b
+
+	/* dest += PAGE_SIZE */
+	add	x14, x14, PAGE_SIZE
+	b	.Lnext
+
+.Ltest_indirection:
+	tbz	x18, IND_INDIRECTION_BIT, .Ltest_destination
+
+	/* ptr = addr */
+	mov	x15, x13
+	b	.Lnext
+
+.Ltest_destination:
+	tbz	x18, IND_DESTINATION_BIT, .Lnext
+
+	mov	x16, x13
+
+	/* dest = addr */
+	mov	x14, x13
+
+.Lnext:
+	/* entry = *ptr++ */
+	ldr	x18, [x15], #8
+
+	/* while (!(entry & DONE)) */
+	tbz	x18, IND_DONE_BIT, .Lloop
+
+.Ldone:
+	dsb	sy
+	isb
+	ic	ialluis
+	dsb	sy
+	isb
+
+	/* Start new image. */
+	ldr	x4, arm64_kexec_kimage_start
+	mov	x0, xzr
+	mov	x1, xzr
+	mov	x2, xzr
+	mov	x3, xzr
+	br	x4
+
+.align 3	/* To keep the 64-bit values below naturally aligned. */
+
+/* The machine_kexec routines set these variables. */
+
+/*
+ * arm64_kexec_kimage_start - Copy of image->start, the entry point of the new
+ * image.
+ */
+.globl arm64_kexec_kimage_start
+arm64_kexec_kimage_start:
+	.quad	0x0
+
+/*
+ * arm64_kexec_kimage_head - Copy of image->head, the list of kimage entries.
+ */
+.globl arm64_kexec_kimage_head
+arm64_kexec_kimage_head:
+	.quad	0x0
+
+.Lrelocate_new_kernel_end:
+
+/*
+ * relocate_new_kernel_size - Number of bytes to copy to the control_code_page.
+ */
+.globl relocate_new_kernel_size
+relocate_new_kernel_size:
+	.quad .Lrelocate_new_kernel_end - relocate_new_kernel
+
+.org	KEXEC_CONTROL_PAGE_SIZE
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f5590c8..30ae7a7 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
 
 config KVM
 	bool "Kernel-based Virtual Machine (KVM) support"
+	depends on !KEXEC
 	select MMU_NOTIFIER
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 99048e5..ccec467 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -39,6 +39,7 @@
 #define KEXEC_ARCH_SH      (42 << 16)
 #define KEXEC_ARCH_MIPS_LE (10 << 16)
 #define KEXEC_ARCH_MIPS    ( 8 << 16)
+#define KEXEC_ARCH_ARM64   (183 << 16)
 
 /* The artificial cap on the number of segments passed to kexec_load. */
 #define KEXEC_SEGMENT_MAX 16
-- 
2.1.0



_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 4/6] arm64: Add EL2 switch to soft_restart
@ 2015-03-19 20:35   ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon; +Cc: kexec, linux-arm-kernel

When a CPU is reset it needs to be put into the exception level it had when it
entered the kernel.  Update cpu_reset() to accept an argument which signals if
the soft reset address needs to be entered at EL1 or EL2.

This implementation updates cpu_soft_restart() and soft_restart() to pass the
return value of is_hyp_mode_available() as the switch argument to cpu_reset().
Also, update the comments of cpu_reset(), cpu_soft_restart() and soft_restart()
to reflect this change.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/proc-fns.h |  4 ++--
 arch/arm64/kernel/process.c       |  6 ++++-
 arch/arm64/mm/proc.S              | 47 +++++++++++++++++++++++++++++----------
 3 files changed, 42 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 9a8fd84..339394d 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -32,8 +32,8 @@ extern void cpu_cache_off(void);
 extern void cpu_do_idle(void);
 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
 extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
-void cpu_soft_restart(phys_addr_t cpu_reset,
-		unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch,
+		      unsigned long addr) __attribute__((noreturn));
 extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
 extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c6b1f3b..d894d3e 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -51,6 +51,7 @@
 #include <asm/mmu_context.h>
 #include <asm/processor.h>
 #include <asm/stacktrace.h>
+#include <asm/virt.h>
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 #include <linux/stackprotector.h>
@@ -61,7 +62,10 @@ EXPORT_SYMBOL(__stack_chk_guard);
 void soft_restart(unsigned long addr)
 {
 	setup_mm_for_reboot();
-	cpu_soft_restart(virt_to_phys(cpu_reset), addr);
+
+	cpu_soft_restart(virt_to_phys(cpu_reset),
+		is_hyp_mode_available(), addr);
+
 	/* Should never get here */
 	BUG();
 }
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index fe69f6e..4fe4b7d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -25,6 +25,7 @@
 #include <asm/hwcap.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
+#include <asm/virt.h>
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
@@ -57,27 +58,48 @@ ENTRY(cpu_cache_off)
 ENDPROC(cpu_cache_off)
 
 /*
- *	cpu_reset(loc)
+ * cpu_reset(el2_switch, loc) - Helper for cpu_soft_restart.
  *
- *	Perform a soft reset of the system.  Put the CPU into the same state
- *	as it would be if it had been reset, and branch to what would be the
- *	reset vector. It must be executed with the flat identity mapping.
+ * @cpu_reset: Physical address of the cpu_reset routine.
+ * @el2_switch: Flag to indicate a swich to EL2 is needed.
+ * @addr: Location to jump to for soft reset.
  *
- *	- loc   - location to jump to for soft reset
+ * Put the CPU into the same state as it would be if it had been reset, and
+ * branch to what would be the reset vector. It must be executed with the
+ * flat identity mapping.
  */
+
 	.align	5
+
 ENTRY(cpu_reset)
-	mrs	x1, sctlr_el1
-	bic	x1, x1, #1
-	msr	sctlr_el1, x1			// disable the MMU
+	mrs	x2, sctlr_el1
+	bic	x2, x2, #1
+	msr	sctlr_el1, x2			// disable the MMU
 	isb
-	ret	x0
+
+	cbz	x0, 1f				// el2_switch?
+	mov	x0, x1
+	mov	x1, xzr
+	mov	x2, xzr
+	mov	x3, xzr
+	hvc	#HVC_CALL_FUNC			// no return
+
+1:	ret	x1
 ENDPROC(cpu_reset)
 
+/*
+ * cpu_soft_restart(cpu_reset, el2_switch, addr) - Perform a cpu soft reset.
+ *
+ * @cpu_reset: Physical address of the cpu_reset routine.
+ * @el2_switch: Flag to indicate a swich to EL2 is needed, passed to cpu_reset.
+ * @addr: Location to jump to for soft reset, passed to cpu_reset.
+ *
+ */
+
 ENTRY(cpu_soft_restart)
-	/* Save address of cpu_reset() and reset address */
-	mov	x19, x0
-	mov	x20, x1
+	mov	x19, x0				// cpu_reset
+	mov	x20, x1				// el2_switch
+	mov	x21, x2				// addr
 
 	/* Turn D-cache off */
 	bl	cpu_cache_off
@@ -86,6 +108,7 @@ ENTRY(cpu_soft_restart)
 	bl	flush_cache_all
 
 	mov	x0, x20
+	mov	x1, x21
 	ret	x19
 ENDPROC(cpu_soft_restart)
 
-- 
2.1.0



_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 0/6] arm64 kexec kernel patches V8
@ 2015-03-19 20:35 ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon
  Cc: Ard Biesheuvel, marc.zyngier, kexec, christoffer.dall,
	Hanjun Guo, linux-arm-kernel

Hi All,

This series adds the core support for kexec re-boots on arm64.  This v8 of the
series is mainly just a rebase to Linux-4.0-rc3, and a few very minor changes
requested for v7.

To load a second stage kernel and execute a kexec re-boot on arm64 my patches to
kexec-tools [2], which have not yet been merged upstream, are needed.

I have tested with the ARM VE fast model, the ARM Base model and the ARM
Foundation model with various kernel config options for both the first and
second stage kernels.  Kexec on EFI systems works correctly.  With the ACPI
kernel patches from [3] applied, kexec on ACPI systems seeems to work correctly.
More ACPI + kexec testing is needed.

Patch 1 here moves the macros from proc-macros.S to asm/assembler.h so that the
dcache_line_size macro it defines can be uesd by kexec's relocate kernel
routine.

Patches 2-4 rework the arm64 hcall mechanism to give the arm64 soft_restart()
routine the ability to switch exception levels from EL1 to EL2 for kernels that
were entered in EL2.

Patches 5-6 add the actual kexec support.

Please consider all patches for inclusion.

[1]  https://git.kernel.org/cgit/linux/kernel/git/geoff/linux-kexec.git
[2]  https://git.kernel.org/cgit/linux/kernel/git/geoff/kexec-tools.git
[3]  http://git.linaro.org/leg/acpi/acpi.git #acpi-topic-juno-fvp

Several things are known to have problems on kexec re-boot:

spin-table
----------

PROBLEM: The spin-table enable method does not implement all the methods needed
for CPU hot-plug, so the first stage kernel cannot be shutdown properly.

WORK-AROUND: Upgrade to system firmware that provides PSCI enable method
support, OR build the first stage kernel with CONFIG_SMP=n, OR pass 'maxcpus=1'
on the first stage kernel command line.

FIX: Upgrade system firmware to provide PSCI enable method support or add
missing spin-table support to the kernel.

KVM
---

PROBLEM: KVM acquires hypervisor resources on startup, but does not free those
resources on shutdown, so the first stage kernel cannot be shutdown properly
when using kexec.

WORK-AROUND: Build the first stage kernel with CONFIG_KVM=n, or apply KVM bug
fix patches from [1].

FIX: Takahiro Akashi has preliminary patches to fix the KVM shutdown problem.  I
have those in my master branch at [1].  KVM + kexec works properly with that
branch.  Patches neeeded:

 arm64: kvm: add a cpu tear-down function
 arm64: kexec: fix kvm issue
 arm64/kvm: Remove !KEXEC Kconfig dependency
 arm64/kexec: Enable kexec in the arm64 defconfig

/memreserve/
----------

PROBLEM: Device tree /memreserve/ entries are not available in
/proc/device-tree.  For systems that have /memreserve/ entries and use
/proc/device-tree during kexec, the second stage kernel will use the reserved
regions and the system will become unstable.

WORK-AROUND: Enable the kernel config option CONFIG_SYSFS=y to expose a binary
device tree to user space at /sys/firmware/fdt that includes /memreserve/
entries OR pass a user specified DTB using the kexec --dtb option.

FIX: This is expected behavior.  To maximize user support, rework device tree
definitions to not use /memreserve/ entries.

-Geoff

The following changes since commit 06e5801b8cb3fc057d88cb4dc03c0b64b2744cda:

  Linux 4.0-rc4 (2015-03-15 17:38:20 -0700)

are available in the git repository at:

  git://git.kernel.org/pub/scm/linux/kernel/git/geoff/linux-kexec.git kexec-v8

for you to fetch changes up to 9d94104463ae6e3472526fb69e8111201c4a1fa7:

  arm64/kexec: Add pr_devel output (2015-03-19 12:21:43 -0700)

----------------------------------------------------------------
Geoff Levand (6):
      arm64: Fold proc-macros.S into assembler.h
      arm64: Convert hcalls to use HVC immediate value
      arm64: Add new hcall HVC_CALL_FUNC
      arm64: Add EL2 switch to soft_restart
      arm64/kexec: Add core kexec support
      arm64/kexec: Add pr_devel output

 arch/arm64/Kconfig                  |   9 ++
 arch/arm64/include/asm/assembler.h  |  37 ++++++-
 arch/arm64/include/asm/kexec.h      |  48 +++++++++
 arch/arm64/include/asm/proc-fns.h   |   4 +-
 arch/arm64/include/asm/virt.h       |  40 ++++++++
 arch/arm64/kernel/Makefile          |   1 +
 arch/arm64/kernel/hyp-stub.S        |  43 +++++---
 arch/arm64/kernel/machine_kexec.c   | 189 ++++++++++++++++++++++++++++++++++++
 arch/arm64/kernel/process.c         |   6 +-
 arch/arm64/kernel/relocate_kernel.S | 149 ++++++++++++++++++++++++++++
 arch/arm64/kvm/Kconfig              |   1 +
 arch/arm64/kvm/hyp.S                |  16 +--
 arch/arm64/mm/cache.S               |   2 -
 arch/arm64/mm/proc-macros.S         |  54 -----------
 arch/arm64/mm/proc.S                |  49 +++++++---
 include/uapi/linux/kexec.h          |   1 +
 16 files changed, 557 insertions(+), 92 deletions(-)
 create mode 100644 arch/arm64/include/asm/kexec.h
 create mode 100644 arch/arm64/kernel/machine_kexec.c
 create mode 100644 arch/arm64/kernel/relocate_kernel.S
 delete mode 100644 arch/arm64/mm/proc-macros.S

-- 
2.1.0


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 3/6] arm64: Add new hcall HVC_CALL_FUNC
@ 2015-03-19 20:35   ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon; +Cc: kexec, linux-arm-kernel

Add the new hcall HVC_CALL_FUNC that allows execution of a function at EL2.
During CPU reset the CPU must be brought to the exception level it had on
entry to the kernel.  The HVC_CALL_FUNC hcall will provide the mechanism
needed for this exception level switch.

To allow the HVC_CALL_FUNC exception vector to work without a stack, which is
needed to support an hcall at CPU reset, this implementation uses register x18
to store the link register across the caller provided function.  This dictates
that the caller provided function must preserve the contents of register x18.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/virt.h | 13 +++++++++++++
 arch/arm64/kernel/hyp-stub.S  | 13 ++++++++++++-
 2 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index eb10368..3070096 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -45,6 +45,19 @@
 
 #define HVC_SET_VECTORS 2
 
+/*
+ * HVC_CALL_FUNC - Execute a function at EL2.
+ *
+ * @x0: Physical address of the function to be executed.
+ * @x1: Passed as the first argument to the function.
+ * @x2: Passed as the second argument to the function.
+ * @x3: Passed as the third argument to the function.
+ *
+ * The called function must preserve the contents of register x18.
+ */
+
+#define HVC_CALL_FUNC 3
+
 #define BOOT_CPU_MODE_EL1	(0xe11)
 #define BOOT_CPU_MODE_EL2	(0xe12)
 
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 017ab519..e8febe9 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -67,8 +67,19 @@ el1_sync:
 	b	2f
 
 1:	cmp	x18, #HVC_SET_VECTORS
-	b.ne	2f
+	b.ne	1f
 	msr	vbar_el2, x0
+	b	2f
+
+1:	cmp	x18, #HVC_CALL_FUNC
+	b.ne	2f
+	mov	x18, lr
+	mov	lr, x0
+	mov	x0, x1
+	mov	x1, x2
+	mov	x2, x3
+	blr	lr
+	mov	lr, x18
 
 2:	eret
 ENDPROC(el1_sync)
-- 
2.1.0



_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 2/6] arm64: Convert hcalls to use HVC immediate value
@ 2015-03-19 20:35   ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon
  Cc: marc.zyngier, kexec, linux-arm-kernel, christoffer.dall

The existing arm64 hcall implementations are limited in that they only allow
for two distinct hcalls; with the x0 register either zero or not zero.  Also,
the API of the hyp-stub exception vector routines and the KVM exception vector
routines differ; hyp-stub uses a non-zero value in x0 to implement
__hyp_set_vectors, whereas KVM uses it to implement kvm_call_hyp.

To allow for additional hcalls to be defined and to make the arm64 hcall API
more consistent across exception vector routines, change the hcall
implementations to use the 16 bit immediate value of the HVC instruction to
specify the hcall type.

Define three new preprocessor macros HVC_CALL_HYP, HVC_GET_VECTORS, and
HVC_SET_VECTORS to be used as hcall type specifiers and convert the
existing __hyp_get_vectors(), __hyp_set_vectors() and kvm_call_hyp() routines
to use these new macros when executing an HVC call.  Also, change the
corresponding hyp-stub and KVM el1_sync exception vector routines to use these
new macros.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/virt.h | 27 +++++++++++++++++++++++++++
 arch/arm64/kernel/hyp-stub.S  | 32 +++++++++++++++++++++-----------
 arch/arm64/kvm/hyp.S          | 16 +++++++++-------
 3 files changed, 57 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7a5df52..eb10368 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,6 +18,33 @@
 #ifndef __ASM__VIRT_H
 #define __ASM__VIRT_H
 
+/*
+ * The arm64 hcall implementation uses the ISS field of the ESR_EL2 register to
+ * specify the hcall type.  The exception handlers are allowed to use registers
+ * x17 and x18 in their implementation.  Any routine issuing an hcall must not
+ * expect these registers to be preserved.
+ */
+
+/*
+ * HVC_CALL_HYP - Execute a hyp routine.
+ */
+
+#define HVC_CALL_HYP 0
+
+/*
+ * HVC_GET_VECTORS - Return the value of the vbar_el2 register.
+ */
+
+#define HVC_GET_VECTORS 1
+
+/*
+ * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
+ *
+ * @x0: Physical address of the new vector table.
+ */
+
+#define HVC_SET_VECTORS 2
+
 #define BOOT_CPU_MODE_EL1	(0xe11)
 #define BOOT_CPU_MODE_EL2	(0xe12)
 
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index a272f33..017ab519 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -22,6 +22,7 @@
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
+#include <asm/kvm_arm.h>
 #include <asm/ptrace.h>
 #include <asm/virt.h>
 
@@ -53,14 +54,22 @@ ENDPROC(__hyp_stub_vectors)
 	.align 11
 
 el1_sync:
-	mrs	x1, esr_el2
-	lsr	x1, x1, #26
-	cmp	x1, #0x16
+	mrs	x18, esr_el2
+	lsr	x17, x18, #ESR_ELx_EC_SHIFT
+	and	x18, x18, #ESR_ELx_ISS_MASK
+
+	cmp	x17, #ESR_ELx_EC_HVC64
 	b.ne	2f				// Not an HVC trap
-	cbz	x0, 1f
-	msr	vbar_el2, x0			// Set vbar_el2
+
+	cmp	x18, #HVC_GET_VECTORS
+	b.ne	1f
+	mrs	x0, vbar_el2
 	b	2f
-1:	mrs	x0, vbar_el2			// Return vbar_el2
+
+1:	cmp	x18, #HVC_SET_VECTORS
+	b.ne	2f
+	msr	vbar_el2, x0
+
 2:	eret
 ENDPROC(el1_sync)
 
@@ -100,11 +109,12 @@ ENDPROC(\label)
  * initialisation entry point.
  */
 
-ENTRY(__hyp_get_vectors)
-	mov	x0, xzr
-	// fall through
 ENTRY(__hyp_set_vectors)
-	hvc	#0
+	hvc	#HVC_SET_VECTORS
 	ret
-ENDPROC(__hyp_get_vectors)
 ENDPROC(__hyp_set_vectors)
+
+ENTRY(__hyp_get_vectors)
+	hvc	#HVC_GET_VECTORS
+	ret
+ENDPROC(__hyp_get_vectors)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 5befd01..fd085ec 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -27,6 +27,7 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
 #include <asm/memory.h>
+#include <asm/virt.h>
 
 #define CPU_GP_REG_OFFSET(x)	(CPU_GP_REGS + x)
 #define CPU_XREG_OFFSET(x)	CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
@@ -1129,12 +1130,9 @@ __hyp_panic_str:
  * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
  * passed in r0 and r1.
  *
- * A function pointer with a value of 0 has a special meaning, and is
- * used to implement __hyp_get_vectors in the same way as in
- * arch/arm64/kernel/hyp_stub.S.
  */
 ENTRY(kvm_call_hyp)
-	hvc	#0
+	hvc	#HVC_CALL_HYP
 	ret
 ENDPROC(kvm_call_hyp)
 
@@ -1165,6 +1163,7 @@ el1_sync:					// Guest trapped into EL2
 
 	mrs	x1, esr_el2
 	lsr	x2, x1, #ESR_ELx_EC_SHIFT
+	and	x0, x1, #ESR_ELx_ISS_MASK
 
 	cmp	x2, #ESR_ELx_EC_HVC64
 	b.ne	el1_trap
@@ -1173,15 +1172,18 @@ el1_sync:					// Guest trapped into EL2
 	cbnz	x3, el1_trap			// called HVC
 
 	/* Here, we're pretty sure the host called HVC. */
+	mov	x18, x0
 	pop	x2, x3
 	pop	x0, x1
 
-	/* Check for __hyp_get_vectors */
-	cbnz	x0, 1f
+	cmp	x18, #HVC_GET_VECTORS
+	b.ne	1f
 	mrs	x0, vbar_el2
 	b	2f
 
-1:	push	lr, xzr
+1:	/* Default to HVC_CALL_HYP. */
+
+	push	lr, xzr
 
 	/*
 	 * Compute the function address in EL2, and shuffle the parameters.
-- 
2.1.0



_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 1/6] arm64: Fold proc-macros.S into assembler.h
@ 2015-03-19 20:35   ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon; +Cc: kexec, linux-arm-kernel

To allow the assembler macros defined in arch/arm64/mm/proc-macros.S to be used
outside the mm code move the contents of proc-macros.S to asm/assembler.h.  Also,
delete proc-macros.S, and fix up all references to proc-macros.S.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/assembler.h | 37 +++++++++++++++++++++++++-
 arch/arm64/mm/cache.S              |  2 --
 arch/arm64/mm/proc-macros.S        | 54 --------------------------------------
 arch/arm64/mm/proc.S               |  2 --
 4 files changed, 36 insertions(+), 59 deletions(-)
 delete mode 100644 arch/arm64/mm/proc-macros.S

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 750bac4..47962f6be 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -1,5 +1,5 @@
 /*
- * Based on arch/arm/include/asm/assembler.h
+ * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
  *
  * Copyright (C) 1996-2000 Russell King
  * Copyright (C) 2012 ARM Ltd.
@@ -23,6 +23,7 @@
 #ifndef __ASM_ASSEMBLER_H
 #define __ASM_ASSEMBLER_H
 
+#include <asm/asm-offsets.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
@@ -159,4 +160,38 @@ lr	.req	x30		// link register
 	orr	\rd, \lbits, \hbits, lsl #32
 	.endm
 
+/*
+ * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
+ */
+	.macro	vma_vm_mm, rd, rn
+	ldr	\rd, [\rn, #VMA_VM_MM]
+	.endm
+
+/*
+ * mmid - get context id from mm pointer (mm->context.id)
+ */
+	.macro	mmid, rd, rn
+	ldr	\rd, [\rn, #MM_CONTEXT_ID]
+	.endm
+
+/*
+ * dcache_line_size - get the minimum D-cache line size from the CTR register.
+ */
+	.macro	dcache_line_size, reg, tmp
+	mrs	\tmp, ctr_el0			// read CTR
+	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
+	mov	\reg, #4			// bytes per word
+	lsl	\reg, \reg, \tmp		// actual cache line size
+	.endm
+
+/*
+ * icache_line_size - get the minimum I-cache line size from the CTR register.
+ */
+	.macro	icache_line_size, reg, tmp
+	mrs	\tmp, ctr_el0			// read CTR
+	and	\tmp, \tmp, #0xf		// cache line size encoding
+	mov	\reg, #4			// bytes per word
+	lsl	\reg, \reg, \tmp		// actual cache line size
+	.endm
+
 #endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2560e1e..2d7a67c 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -24,8 +24,6 @@
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
-#include "proc-macros.S"
-
 /*
  *	__flush_dcache_all()
  *
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
deleted file mode 100644
index 005d29e..0000000
--- a/arch/arm64/mm/proc-macros.S
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Based on arch/arm/mm/proc-macros.S
- *
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-
-/*
- * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
- */
-	.macro	vma_vm_mm, rd, rn
-	ldr	\rd, [\rn, #VMA_VM_MM]
-	.endm
-
-/*
- * mmid - get context id from mm pointer (mm->context.id)
- */
-	.macro	mmid, rd, rn
-	ldr	\rd, [\rn, #MM_CONTEXT_ID]
-	.endm
-
-/*
- * dcache_line_size - get the minimum D-cache line size from the CTR register.
- */
-	.macro	dcache_line_size, reg, tmp
-	mrs	\tmp, ctr_el0			// read CTR
-	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
-	mov	\reg, #4			// bytes per word
-	lsl	\reg, \reg, \tmp		// actual cache line size
-	.endm
-
-/*
- * icache_line_size - get the minimum I-cache line size from the CTR register.
- */
-	.macro	icache_line_size, reg, tmp
-	mrs	\tmp, ctr_el0			// read CTR
-	and	\tmp, \tmp, #0xf		// cache line size encoding
-	mov	\reg, #4			// bytes per word
-	lsl	\reg, \reg, \tmp		// actual cache line size
-	.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 28eebfb..fe69f6e 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -26,8 +26,6 @@
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
 
-#include "proc-macros.S"
-
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
 #else
-- 
2.1.0



_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 6/6] arm64/kexec: Add pr_devel output
  2015-03-19 20:35 ` Geoff Levand
@ 2015-03-19 20:35   ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: linux-arm-kernel

To aid in debugging kexec problems or when adding new functionality to kexec add
a new routine kexec_image_info() and several inline pr_devel statements.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/kernel/machine_kexec.c | 64 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 64 insertions(+)

diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index f1387d0..7aa2fa2 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -24,6 +24,48 @@ extern unsigned long arm64_kexec_dtb_addr;
 extern unsigned long arm64_kexec_kimage_head;
 extern unsigned long arm64_kexec_kimage_start;
 
+/**
+ * kexec_is_dtb - Helper routine to check the device tree header signature.
+ */
+static bool kexec_is_dtb(const void *dtb)
+{
+	__be32 magic;
+
+	return get_user(magic, (__be32 *)dtb) ? false :
+		(be32_to_cpu(magic) == OF_DT_HEADER);
+}
+
+/**
+ * kexec_image_info - For debugging output.
+ */
+#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
+static void _kexec_image_info(const char *func, int line,
+	const struct kimage *image)
+{
+	unsigned long i;
+
+#if !defined(DEBUG)
+	return;
+#endif
+	pr_devel("%s:%d:\n", func, line);
+	pr_devel("  kexec image info:\n");
+	pr_devel("    type:        %d\n", image->type);
+	pr_devel("    start:       %lx\n", image->start);
+	pr_devel("    head:        %lx\n", image->head);
+	pr_devel("    nr_segments: %lu\n", image->nr_segments);
+
+	for (i = 0; i < image->nr_segments; i++) {
+		pr_devel("      segment[%lu]: %016lx - %016lx, %lx bytes, %lu pages%s\n",
+			i,
+			image->segment[i].mem,
+			image->segment[i].mem + image->segment[i].memsz,
+			image->segment[i].memsz,
+			image->segment[i].memsz /  PAGE_SIZE,
+			(kexec_is_dtb(image->segment[i].buf) ?
+				", dtb segment" : ""));
+	}
+}
+
 void machine_kexec_cleanup(struct kimage *image)
 {
 	/* Empty routine needed to avoid build errors. */
@@ -37,6 +79,7 @@ void machine_kexec_cleanup(struct kimage *image)
 int machine_kexec_prepare(struct kimage *image)
 {
 	arm64_kexec_kimage_start = image->start;
+	kexec_image_info(image);
 	return 0;
 }
 
@@ -91,6 +134,27 @@ void machine_kexec(struct kimage *image)
 	reboot_code_buffer_phys = page_to_phys(image->control_code_page);
 	reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
 
+	kexec_image_info(image);
+
+	pr_devel("%s:%d: control_code_page:        %p\n", __func__, __LINE__,
+		image->control_code_page);
+	pr_devel("%s:%d: reboot_code_buffer_phys:  %pa\n", __func__, __LINE__,
+		&reboot_code_buffer_phys);
+	pr_devel("%s:%d: reboot_code_buffer:       %p\n", __func__, __LINE__,
+		reboot_code_buffer);
+	pr_devel("%s:%d: relocate_new_kernel:      %p\n", __func__, __LINE__,
+		relocate_new_kernel);
+	pr_devel("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
+		__func__, __LINE__, relocate_new_kernel_size,
+		relocate_new_kernel_size);
+
+	pr_devel("%s:%d: kexec_dtb_addr:           %lx\n", __func__, __LINE__,
+		arm64_kexec_dtb_addr);
+	pr_devel("%s:%d: kexec_kimage_head:        %lx\n", __func__, __LINE__,
+		arm64_kexec_kimage_head);
+	pr_devel("%s:%d: kexec_kimage_start:       %lx\n", __func__, __LINE__,
+		arm64_kexec_kimage_start);
+
 	/*
 	 * Copy relocate_new_kernel to the reboot_code_buffer for use
 	 * after the kernel is shut down.
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 6/6] arm64/kexec: Add pr_devel output
@ 2015-03-19 20:35   ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-03-19 20:35 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon; +Cc: kexec, linux-arm-kernel

To aid in debugging kexec problems or when adding new functionality to kexec add
a new routine kexec_image_info() and several inline pr_devel statements.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/kernel/machine_kexec.c | 64 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 64 insertions(+)

diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index f1387d0..7aa2fa2 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -24,6 +24,48 @@ extern unsigned long arm64_kexec_dtb_addr;
 extern unsigned long arm64_kexec_kimage_head;
 extern unsigned long arm64_kexec_kimage_start;
 
+/**
+ * kexec_is_dtb - Helper routine to check the device tree header signature.
+ */
+static bool kexec_is_dtb(const void *dtb)
+{
+	__be32 magic;
+
+	return get_user(magic, (__be32 *)dtb) ? false :
+		(be32_to_cpu(magic) == OF_DT_HEADER);
+}
+
+/**
+ * kexec_image_info - For debugging output.
+ */
+#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
+static void _kexec_image_info(const char *func, int line,
+	const struct kimage *image)
+{
+	unsigned long i;
+
+#if !defined(DEBUG)
+	return;
+#endif
+	pr_devel("%s:%d:\n", func, line);
+	pr_devel("  kexec image info:\n");
+	pr_devel("    type:        %d\n", image->type);
+	pr_devel("    start:       %lx\n", image->start);
+	pr_devel("    head:        %lx\n", image->head);
+	pr_devel("    nr_segments: %lu\n", image->nr_segments);
+
+	for (i = 0; i < image->nr_segments; i++) {
+		pr_devel("      segment[%lu]: %016lx - %016lx, %lx bytes, %lu pages%s\n",
+			i,
+			image->segment[i].mem,
+			image->segment[i].mem + image->segment[i].memsz,
+			image->segment[i].memsz,
+			image->segment[i].memsz /  PAGE_SIZE,
+			(kexec_is_dtb(image->segment[i].buf) ?
+				", dtb segment" : ""));
+	}
+}
+
 void machine_kexec_cleanup(struct kimage *image)
 {
 	/* Empty routine needed to avoid build errors. */
@@ -37,6 +79,7 @@ void machine_kexec_cleanup(struct kimage *image)
 int machine_kexec_prepare(struct kimage *image)
 {
 	arm64_kexec_kimage_start = image->start;
+	kexec_image_info(image);
 	return 0;
 }
 
@@ -91,6 +134,27 @@ void machine_kexec(struct kimage *image)
 	reboot_code_buffer_phys = page_to_phys(image->control_code_page);
 	reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
 
+	kexec_image_info(image);
+
+	pr_devel("%s:%d: control_code_page:        %p\n", __func__, __LINE__,
+		image->control_code_page);
+	pr_devel("%s:%d: reboot_code_buffer_phys:  %pa\n", __func__, __LINE__,
+		&reboot_code_buffer_phys);
+	pr_devel("%s:%d: reboot_code_buffer:       %p\n", __func__, __LINE__,
+		reboot_code_buffer);
+	pr_devel("%s:%d: relocate_new_kernel:      %p\n", __func__, __LINE__,
+		relocate_new_kernel);
+	pr_devel("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
+		__func__, __LINE__, relocate_new_kernel_size,
+		relocate_new_kernel_size);
+
+	pr_devel("%s:%d: kexec_dtb_addr:           %lx\n", __func__, __LINE__,
+		arm64_kexec_dtb_addr);
+	pr_devel("%s:%d: kexec_kimage_head:        %lx\n", __func__, __LINE__,
+		arm64_kexec_kimage_head);
+	pr_devel("%s:%d: kexec_kimage_start:       %lx\n", __func__, __LINE__,
+		arm64_kexec_kimage_start);
+
 	/*
 	 * Copy relocate_new_kernel to the reboot_code_buffer for use
 	 * after the kernel is shut down.
-- 
2.1.0


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 0/6] arm64 kexec kernel patches V8
  2015-03-19 20:35 ` Geoff Levand
@ 2015-03-20 19:48   ` Mark Rutland
  -1 siblings, 0 replies; 36+ messages in thread
From: Mark Rutland @ 2015-03-20 19:48 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Mar 19, 2015 at 08:35:27PM +0000, Geoff Levand wrote:
> Hi All,

Hi Geoff,

As a heads-up, I'm not going to have the chanec to review this until
after the FW summit next week.

[...]

> KVM
> ---
> 
> PROBLEM: KVM acquires hypervisor resources on startup, but does not free those
> resources on shutdown, so the first stage kernel cannot be shutdown properly
> when using kexec.
> 
> WORK-AROUND: Build the first stage kernel with CONFIG_KVM=n, or apply KVM bug
> fix patches from [1].
> 
> FIX: Takahiro Akashi has preliminary patches to fix the KVM shutdown problem.  I
> have those in my master branch at [1].  KVM + kexec works properly with that
> branch.  Patches neeeded:
> 
>  arm64: kvm: add a cpu tear-down function
>  arm64: kexec: fix kvm issue
>  arm64/kvm: Remove !KEXEC Kconfig dependency
>  arm64/kexec: Enable kexec in the arm64 defconfig

These (or some descendents thereof) are going to be a prerequisite for
kexec.

> /memreserve/
> ----------
> 
> PROBLEM: Device tree /memreserve/ entries are not available in
> /proc/device-tree.  For systems that have /memreserve/ entries and use
> /proc/device-tree during kexec, the second stage kernel will use the reserved
> regions and the system will become unstable.
> 
> WORK-AROUND: Enable the kernel config option CONFIG_SYSFS=y to expose a binary
> device tree to user space at /sys/firmware/fdt that includes /memreserve/
> entries OR pass a user specified DTB using the kexec --dtb option.
> 
> FIX: This is expected behavior.  To maximize user support, rework device tree
> definitions to not use /memreserve/ entries.

I'd say that requiring CONFIG_SYSFS is the real fix here. That's a
kernel change that maximises compatibility with DTBs (which we don't
really have control over).

I hope that the tools warn somehow if they cannot see the fdt.

Mark.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 0/6] arm64 kexec kernel patches V8
@ 2015-03-20 19:48   ` Mark Rutland
  0 siblings, 0 replies; 36+ messages in thread
From: Mark Rutland @ 2015-03-20 19:48 UTC (permalink / raw)
  To: Geoff Levand
  Cc: linux-arm-kernel, Ard Biesheuvel, Marc Zyngier, Catalin Marinas,
	Will Deacon, hanjun.guo, kexec, christoffer.dall

On Thu, Mar 19, 2015 at 08:35:27PM +0000, Geoff Levand wrote:
> Hi All,

Hi Geoff,

As a heads-up, I'm not going to have the chanec to review this until
after the FW summit next week.

[...]

> KVM
> ---
> 
> PROBLEM: KVM acquires hypervisor resources on startup, but does not free those
> resources on shutdown, so the first stage kernel cannot be shutdown properly
> when using kexec.
> 
> WORK-AROUND: Build the first stage kernel with CONFIG_KVM=n, or apply KVM bug
> fix patches from [1].
> 
> FIX: Takahiro Akashi has preliminary patches to fix the KVM shutdown problem.  I
> have those in my master branch at [1].  KVM + kexec works properly with that
> branch.  Patches neeeded:
> 
>  arm64: kvm: add a cpu tear-down function
>  arm64: kexec: fix kvm issue
>  arm64/kvm: Remove !KEXEC Kconfig dependency
>  arm64/kexec: Enable kexec in the arm64 defconfig

These (or some descendents thereof) are going to be a prerequisite for
kexec.

> /memreserve/
> ----------
> 
> PROBLEM: Device tree /memreserve/ entries are not available in
> /proc/device-tree.  For systems that have /memreserve/ entries and use
> /proc/device-tree during kexec, the second stage kernel will use the reserved
> regions and the system will become unstable.
> 
> WORK-AROUND: Enable the kernel config option CONFIG_SYSFS=y to expose a binary
> device tree to user space at /sys/firmware/fdt that includes /memreserve/
> entries OR pass a user specified DTB using the kexec --dtb option.
> 
> FIX: This is expected behavior.  To maximize user support, rework device tree
> definitions to not use /memreserve/ entries.

I'd say that requiring CONFIG_SYSFS is the real fix here. That's a
kernel change that maximises compatibility with DTBs (which we don't
really have control over).

I hope that the tools warn somehow if they cannot see the fdt.

Mark.

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 0/6] arm64 kexec kernel patches V8
  2015-03-19 20:35 ` Geoff Levand
@ 2015-04-03 16:48   ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-04-03 16:48 UTC (permalink / raw)
  To: linux-arm-kernel

Hi,

I rebased my series to the latest arm64 for-next/core (v4.0-rc4)
and pushed it out to:

  git://git.kernel.org/pub/scm/linux/kernel/git/geoff/linux-kexec.git kexec-v9

-Geoff

On Thu, 2015-03-19 at 20:35 +0000, Geoff Levand wrote:
> Hi All,
> 
> This series adds the core support for kexec re-boots on arm64.  This v8 of the
> series is mainly just a rebase to Linux-4.0-rc3, and a few very minor changes
> requested for v7.
> 
> To load a second stage kernel and execute a kexec re-boot on arm64 my patches to
> kexec-tools [2], which have not yet been merged upstream, are needed.
> 
> I have tested with the ARM VE fast model, the ARM Base model and the ARM
> Foundation model with various kernel config options for both the first and
> second stage kernels.  Kexec on EFI systems works correctly.  With the ACPI
> kernel patches from [3] applied, kexec on ACPI systems seeems to work correctly.
> More ACPI + kexec testing is needed.
> 
> Patch 1 here moves the macros from proc-macros.S to asm/assembler.h so that the
> dcache_line_size macro it defines can be uesd by kexec's relocate kernel
> routine.
> 
> Patches 2-4 rework the arm64 hcall mechanism to give the arm64 soft_restart()
> routine the ability to switch exception levels from EL1 to EL2 for kernels that
> were entered in EL2.
> 
> Patches 5-6 add the actual kexec support.
> 
> Please consider all patches for inclusion.
> 
> [1]  https://git.kernel.org/cgit/linux/kernel/git/geoff/linux-kexec.git
> [2]  https://git.kernel.org/cgit/linux/kernel/git/geoff/kexec-tools.git
> [3]  http://git.linaro.org/leg/acpi/acpi.git #acpi-topic-juno-fvp
> 
> Several things are known to have problems on kexec re-boot:
> 
> spin-table
> ----------
> 
> PROBLEM: The spin-table enable method does not implement all the methods needed
> for CPU hot-plug, so the first stage kernel cannot be shutdown properly.
> 
> WORK-AROUND: Upgrade to system firmware that provides PSCI enable method
> support, OR build the first stage kernel with CONFIG_SMP=n, OR pass 'maxcpus=1'
> on the first stage kernel command line.
> 
> FIX: Upgrade system firmware to provide PSCI enable method support or add
> missing spin-table support to the kernel.
> 
> KVM
> ---
> 
> PROBLEM: KVM acquires hypervisor resources on startup, but does not free those
> resources on shutdown, so the first stage kernel cannot be shutdown properly
> when using kexec.
> 
> WORK-AROUND: Build the first stage kernel with CONFIG_KVM=n, or apply KVM bug
> fix patches from [1].
> 
> FIX: Takahiro Akashi has preliminary patches to fix the KVM shutdown problem.  I
> have those in my master branch at [1].  KVM + kexec works properly with that
> branch.  Patches neeeded:
> 
>  arm64: kvm: add a cpu tear-down function
>  arm64: kexec: fix kvm issue
>  arm64/kvm: Remove !KEXEC Kconfig dependency
>  arm64/kexec: Enable kexec in the arm64 defconfig
> 
> /memreserve/
> ----------
> 
> PROBLEM: Device tree /memreserve/ entries are not available in
> /proc/device-tree.  For systems that have /memreserve/ entries and use
> /proc/device-tree during kexec, the second stage kernel will use the reserved
> regions and the system will become unstable.
> 
> WORK-AROUND: Enable the kernel config option CONFIG_SYSFS=y to expose a binary
> device tree to user space at /sys/firmware/fdt that includes /memreserve/
> entries OR pass a user specified DTB using the kexec --dtb option.
> 
> FIX: This is expected behavior.  To maximize user support, rework device tree
> definitions to not use /memreserve/ entries.
> 
> -Geoff

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 0/6] arm64 kexec kernel patches V8
@ 2015-04-03 16:48   ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-04-03 16:48 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Ard Biesheuvel, marc.zyngier, Will Deacon, kexec,
	christoffer.dall, linux-arm-kernel, Hanjun Guo

Hi,

I rebased my series to the latest arm64 for-next/core (v4.0-rc4)
and pushed it out to:

  git://git.kernel.org/pub/scm/linux/kernel/git/geoff/linux-kexec.git kexec-v9

-Geoff

On Thu, 2015-03-19 at 20:35 +0000, Geoff Levand wrote:
> Hi All,
> 
> This series adds the core support for kexec re-boots on arm64.  This v8 of the
> series is mainly just a rebase to Linux-4.0-rc3, and a few very minor changes
> requested for v7.
> 
> To load a second stage kernel and execute a kexec re-boot on arm64 my patches to
> kexec-tools [2], which have not yet been merged upstream, are needed.
> 
> I have tested with the ARM VE fast model, the ARM Base model and the ARM
> Foundation model with various kernel config options for both the first and
> second stage kernels.  Kexec on EFI systems works correctly.  With the ACPI
> kernel patches from [3] applied, kexec on ACPI systems seeems to work correctly.
> More ACPI + kexec testing is needed.
> 
> Patch 1 here moves the macros from proc-macros.S to asm/assembler.h so that the
> dcache_line_size macro it defines can be uesd by kexec's relocate kernel
> routine.
> 
> Patches 2-4 rework the arm64 hcall mechanism to give the arm64 soft_restart()
> routine the ability to switch exception levels from EL1 to EL2 for kernels that
> were entered in EL2.
> 
> Patches 5-6 add the actual kexec support.
> 
> Please consider all patches for inclusion.
> 
> [1]  https://git.kernel.org/cgit/linux/kernel/git/geoff/linux-kexec.git
> [2]  https://git.kernel.org/cgit/linux/kernel/git/geoff/kexec-tools.git
> [3]  http://git.linaro.org/leg/acpi/acpi.git #acpi-topic-juno-fvp
> 
> Several things are known to have problems on kexec re-boot:
> 
> spin-table
> ----------
> 
> PROBLEM: The spin-table enable method does not implement all the methods needed
> for CPU hot-plug, so the first stage kernel cannot be shutdown properly.
> 
> WORK-AROUND: Upgrade to system firmware that provides PSCI enable method
> support, OR build the first stage kernel with CONFIG_SMP=n, OR pass 'maxcpus=1'
> on the first stage kernel command line.
> 
> FIX: Upgrade system firmware to provide PSCI enable method support or add
> missing spin-table support to the kernel.
> 
> KVM
> ---
> 
> PROBLEM: KVM acquires hypervisor resources on startup, but does not free those
> resources on shutdown, so the first stage kernel cannot be shutdown properly
> when using kexec.
> 
> WORK-AROUND: Build the first stage kernel with CONFIG_KVM=n, or apply KVM bug
> fix patches from [1].
> 
> FIX: Takahiro Akashi has preliminary patches to fix the KVM shutdown problem.  I
> have those in my master branch at [1].  KVM + kexec works properly with that
> branch.  Patches neeeded:
> 
>  arm64: kvm: add a cpu tear-down function
>  arm64: kexec: fix kvm issue
>  arm64/kvm: Remove !KEXEC Kconfig dependency
>  arm64/kexec: Enable kexec in the arm64 defconfig
> 
> /memreserve/
> ----------
> 
> PROBLEM: Device tree /memreserve/ entries are not available in
> /proc/device-tree.  For systems that have /memreserve/ entries and use
> /proc/device-tree during kexec, the second stage kernel will use the reserved
> regions and the system will become unstable.
> 
> WORK-AROUND: Enable the kernel config option CONFIG_SYSFS=y to expose a binary
> device tree to user space at /sys/firmware/fdt that includes /memreserve/
> entries OR pass a user specified DTB using the kexec --dtb option.
> 
> FIX: This is expected behavior.  To maximize user support, rework device tree
> definitions to not use /memreserve/ entries.
> 
> -Geoff



_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 5/6] arm64/kexec: Add core kexec support
  2015-03-19 20:35   ` Geoff Levand
@ 2015-04-07 16:38     ` Suzuki K. Poulose
  -1 siblings, 0 replies; 36+ messages in thread
From: Suzuki K. Poulose @ 2015-04-07 16:38 UTC (permalink / raw)
  To: linux-arm-kernel

On 19/03/15 20:35, Geoff Levand wrote:
> Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the
> arm64 architecture that add support for the kexec re-boot mechanism
> (CONFIG_KEXEC) on arm64 platforms.
>
> With the addition of arm64 kexec support shutdown code paths through the kernel
> are executed that previously were not.  To avoid system instability do to
> problems in the current arm64 KVM kernel implementation add a Kconfig dependency
> on !KEXEC to the arm64 KVM menu item.
>
> Signed-off-by: Geoff Levand <geoff@infradead.org>
> ---
>   arch/arm64/Kconfig                  |   9 +++
>   arch/arm64/include/asm/kexec.h      |  48 ++++++++++++
>   arch/arm64/kernel/Makefile          |   1 +
>   arch/arm64/kernel/machine_kexec.c   | 125 ++++++++++++++++++++++++++++++
>   arch/arm64/kernel/relocate_kernel.S | 149 ++++++++++++++++++++++++++++++++++++
>   arch/arm64/kvm/Kconfig              |   1 +
>   include/uapi/linux/kexec.h          |   1 +
>   7 files changed, 334 insertions(+)
>   create mode 100644 arch/arm64/include/asm/kexec.h
>   create mode 100644 arch/arm64/kernel/machine_kexec.c
>   create mode 100644 arch/arm64/kernel/relocate_kernel.S
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 1b8e973..5a606d1 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -528,6 +528,15 @@ config SECCOMP
>            and the task is only allowed to execute a few safe syscalls
>            defined by each seccomp mode.
>
> +config KEXEC
> +       depends on (!SMP || PM_SLEEP_SMP)
> +       bool "kexec system call"
> +       ---help---
> +         kexec is a system call that implements the ability to shutdown your
> +         current kernel, and to start another kernel.  It is like a reboot
> +         but it is independent of the system firmware.   And like a reboot
> +         you can start any kernel with it, not just Linux.
> +
>   config XEN_DOM0
>          def_bool y
>          depends on XEN
> diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
> new file mode 100644
> index 0000000..3530ff5
> --- /dev/null
> +++ b/arch/arm64/include/asm/kexec.h
> @@ -0,0 +1,48 @@
> +/*
> + * kexec for arm64
> + *
> + * Copyright (C) Linaro.
> + * Copyright (C) Futurewei Technologies.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#if !defined(_ARM64_KEXEC_H)
> +#define _ARM64_KEXEC_H
> +
> +/* Maximum physical address we can use pages from */
> +
> +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
> +
> +/* Maximum address we can reach in physical address mode */
> +
> +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
> +
> +/* Maximum address we can use for the control code buffer */
> +
> +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
> +
> +#define KEXEC_CONTROL_PAGE_SIZE        4096
> +
> +#define KEXEC_ARCH KEXEC_ARCH_ARM64
> +
> +#if !defined(__ASSEMBLY__)
> +
> +/**
> + * crash_setup_regs() - save registers for the panic kernel
> + *
> + * @newregs: registers are saved here
> + * @oldregs: registers to be saved (may be %NULL)
> + */
> +
> +static inline void crash_setup_regs(struct pt_regs *newregs,
> +                                   struct pt_regs *oldregs)
> +{
> +       /* Empty routine needed to avoid build errors. */
> +}
> +
> +#endif /* !defined(__ASSEMBLY__) */
> +
> +#endif
> diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
> index 5ee07ee..da9a7ee 100644
> --- a/arch/arm64/kernel/Makefile
> +++ b/arch/arm64/kernel/Makefile
> @@ -35,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB)              += kgdb.o
>   arm64-obj-$(CONFIG_EFI)                        += efi.o efi-stub.o efi-entry.o
>   arm64-obj-$(CONFIG_PCI)                        += pci.o
>   arm64-obj-$(CONFIG_ARMV8_DEPRECATED)   += armv8_deprecated.o
> +arm64-obj-$(CONFIG_KEXEC)              += machine_kexec.o relocate_kernel.o
>
>   obj-y                                  += $(arm64-obj-y) vdso/
>   obj-m                                  += $(arm64-obj-m)
> diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
> new file mode 100644
> index 0000000..f1387d0
> --- /dev/null
> +++ b/arch/arm64/kernel/machine_kexec.c
> @@ -0,0 +1,125 @@
> +/*
> + * kexec for arm64
> + *
> + * Copyright (C) Linaro.
> + * Copyright (C) Futurewei Technologies.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/kexec.h>
> +#include <linux/of_fdt.h>
> +#include <linux/slab.h>
> +#include <linux/uaccess.h>
> +
> +#include <asm/cacheflush.h>
> +#include <asm/system_misc.h>
> +
> +/* Global variables for the relocate_kernel routine. */
> +extern const unsigned char relocate_new_kernel[];
> +extern const unsigned long relocate_new_kernel_size;
> +extern unsigned long arm64_kexec_dtb_addr;
> +extern unsigned long arm64_kexec_kimage_head;
> +extern unsigned long arm64_kexec_kimage_start;
> +
> +void machine_kexec_cleanup(struct kimage *image)
> +{
> +       /* Empty routine needed to avoid build errors. */
> +}
> +
> +/**
> + * machine_kexec_prepare - Prepare for a kexec reboot.
> + *
> + * Called from the core kexec code when a kernel image is loaded.
> + */
> +int machine_kexec_prepare(struct kimage *image)
> +{
> +       arm64_kexec_kimage_start = image->start;
> +       return 0;
> +}
> +
> +/**
> + * kexec_list_flush - Helper to flush the kimage list to PoC.
> + */
> +static void kexec_list_flush(unsigned long kimage_head)
> +{
> +       void *dest;

What is the use of dest ?
> +       unsigned long *entry;
> +
> +       for (entry = &kimage_head, dest = NULL; ; entry++) {
> +               unsigned int flag = *entry &
> +                       (IND_DESTINATION | IND_INDIRECTION | IND_DONE |
> +                       IND_SOURCE);
You could instead do :

	flag = *entry & IND_FLAGS;

> +               void *addr = phys_to_virt(*entry & PAGE_MASK);
> +
> +               switch (flag) {
> +               case IND_INDIRECTION:
> +                       entry = (unsigned long *)addr - 1;
> +                       __flush_dcache_area(addr, PAGE_SIZE);
> +                       break;
> +               case IND_DESTINATION:
> +                       dest = addr;
> +                       break;
> +               case IND_SOURCE:
> +                       __flush_dcache_area(addr, PAGE_SIZE);
> +                       dest += PAGE_SIZE;
> +                       break;
> +               case IND_DONE:
> +                       return;
> +               default:
> +                       BUG();
> +               }
> +       }
> +}

Thanks
Suzuki

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 5/6] arm64/kexec: Add core kexec support
@ 2015-04-07 16:38     ` Suzuki K. Poulose
  0 siblings, 0 replies; 36+ messages in thread
From: Suzuki K. Poulose @ 2015-04-07 16:38 UTC (permalink / raw)
  To: Geoff Levand, Catalin Marinas, Will Deacon
  Cc: Marc Zyngier, kexec, christoffer.dall, linux-arm-kernel

On 19/03/15 20:35, Geoff Levand wrote:
> Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the
> arm64 architecture that add support for the kexec re-boot mechanism
> (CONFIG_KEXEC) on arm64 platforms.
>
> With the addition of arm64 kexec support shutdown code paths through the kernel
> are executed that previously were not.  To avoid system instability do to
> problems in the current arm64 KVM kernel implementation add a Kconfig dependency
> on !KEXEC to the arm64 KVM menu item.
>
> Signed-off-by: Geoff Levand <geoff@infradead.org>
> ---
>   arch/arm64/Kconfig                  |   9 +++
>   arch/arm64/include/asm/kexec.h      |  48 ++++++++++++
>   arch/arm64/kernel/Makefile          |   1 +
>   arch/arm64/kernel/machine_kexec.c   | 125 ++++++++++++++++++++++++++++++
>   arch/arm64/kernel/relocate_kernel.S | 149 ++++++++++++++++++++++++++++++++++++
>   arch/arm64/kvm/Kconfig              |   1 +
>   include/uapi/linux/kexec.h          |   1 +
>   7 files changed, 334 insertions(+)
>   create mode 100644 arch/arm64/include/asm/kexec.h
>   create mode 100644 arch/arm64/kernel/machine_kexec.c
>   create mode 100644 arch/arm64/kernel/relocate_kernel.S
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 1b8e973..5a606d1 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -528,6 +528,15 @@ config SECCOMP
>            and the task is only allowed to execute a few safe syscalls
>            defined by each seccomp mode.
>
> +config KEXEC
> +       depends on (!SMP || PM_SLEEP_SMP)
> +       bool "kexec system call"
> +       ---help---
> +         kexec is a system call that implements the ability to shutdown your
> +         current kernel, and to start another kernel.  It is like a reboot
> +         but it is independent of the system firmware.   And like a reboot
> +         you can start any kernel with it, not just Linux.
> +
>   config XEN_DOM0
>          def_bool y
>          depends on XEN
> diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
> new file mode 100644
> index 0000000..3530ff5
> --- /dev/null
> +++ b/arch/arm64/include/asm/kexec.h
> @@ -0,0 +1,48 @@
> +/*
> + * kexec for arm64
> + *
> + * Copyright (C) Linaro.
> + * Copyright (C) Futurewei Technologies.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#if !defined(_ARM64_KEXEC_H)
> +#define _ARM64_KEXEC_H
> +
> +/* Maximum physical address we can use pages from */
> +
> +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
> +
> +/* Maximum address we can reach in physical address mode */
> +
> +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
> +
> +/* Maximum address we can use for the control code buffer */
> +
> +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
> +
> +#define KEXEC_CONTROL_PAGE_SIZE        4096
> +
> +#define KEXEC_ARCH KEXEC_ARCH_ARM64
> +
> +#if !defined(__ASSEMBLY__)
> +
> +/**
> + * crash_setup_regs() - save registers for the panic kernel
> + *
> + * @newregs: registers are saved here
> + * @oldregs: registers to be saved (may be %NULL)
> + */
> +
> +static inline void crash_setup_regs(struct pt_regs *newregs,
> +                                   struct pt_regs *oldregs)
> +{
> +       /* Empty routine needed to avoid build errors. */
> +}
> +
> +#endif /* !defined(__ASSEMBLY__) */
> +
> +#endif
> diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
> index 5ee07ee..da9a7ee 100644
> --- a/arch/arm64/kernel/Makefile
> +++ b/arch/arm64/kernel/Makefile
> @@ -35,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB)              += kgdb.o
>   arm64-obj-$(CONFIG_EFI)                        += efi.o efi-stub.o efi-entry.o
>   arm64-obj-$(CONFIG_PCI)                        += pci.o
>   arm64-obj-$(CONFIG_ARMV8_DEPRECATED)   += armv8_deprecated.o
> +arm64-obj-$(CONFIG_KEXEC)              += machine_kexec.o relocate_kernel.o
>
>   obj-y                                  += $(arm64-obj-y) vdso/
>   obj-m                                  += $(arm64-obj-m)
> diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
> new file mode 100644
> index 0000000..f1387d0
> --- /dev/null
> +++ b/arch/arm64/kernel/machine_kexec.c
> @@ -0,0 +1,125 @@
> +/*
> + * kexec for arm64
> + *
> + * Copyright (C) Linaro.
> + * Copyright (C) Futurewei Technologies.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/kexec.h>
> +#include <linux/of_fdt.h>
> +#include <linux/slab.h>
> +#include <linux/uaccess.h>
> +
> +#include <asm/cacheflush.h>
> +#include <asm/system_misc.h>
> +
> +/* Global variables for the relocate_kernel routine. */
> +extern const unsigned char relocate_new_kernel[];
> +extern const unsigned long relocate_new_kernel_size;
> +extern unsigned long arm64_kexec_dtb_addr;
> +extern unsigned long arm64_kexec_kimage_head;
> +extern unsigned long arm64_kexec_kimage_start;
> +
> +void machine_kexec_cleanup(struct kimage *image)
> +{
> +       /* Empty routine needed to avoid build errors. */
> +}
> +
> +/**
> + * machine_kexec_prepare - Prepare for a kexec reboot.
> + *
> + * Called from the core kexec code when a kernel image is loaded.
> + */
> +int machine_kexec_prepare(struct kimage *image)
> +{
> +       arm64_kexec_kimage_start = image->start;
> +       return 0;
> +}
> +
> +/**
> + * kexec_list_flush - Helper to flush the kimage list to PoC.
> + */
> +static void kexec_list_flush(unsigned long kimage_head)
> +{
> +       void *dest;

What is the use of dest ?
> +       unsigned long *entry;
> +
> +       for (entry = &kimage_head, dest = NULL; ; entry++) {
> +               unsigned int flag = *entry &
> +                       (IND_DESTINATION | IND_INDIRECTION | IND_DONE |
> +                       IND_SOURCE);
You could instead do :

	flag = *entry & IND_FLAGS;

> +               void *addr = phys_to_virt(*entry & PAGE_MASK);
> +
> +               switch (flag) {
> +               case IND_INDIRECTION:
> +                       entry = (unsigned long *)addr - 1;
> +                       __flush_dcache_area(addr, PAGE_SIZE);
> +                       break;
> +               case IND_DESTINATION:
> +                       dest = addr;
> +                       break;
> +               case IND_SOURCE:
> +                       __flush_dcache_area(addr, PAGE_SIZE);
> +                       dest += PAGE_SIZE;
> +                       break;
> +               case IND_DONE:
> +                       return;
> +               default:
> +                       BUG();
> +               }
> +       }
> +}

Thanks
Suzuki


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 5/6] arm64/kexec: Add core kexec support
  2015-04-07 16:38     ` Suzuki K. Poulose
@ 2015-04-07 22:48       ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-04-07 22:48 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Suzuki,

On Tue, 2015-04-07 at 17:38 +0100, Suzuki K. Poulose wrote:
> On 19/03/15 20:35, Geoff Levand wrote:
> > +static void kexec_list_flush(unsigned long kimage_head)
> > +{
> > +       void *dest;
> 
> What is the use of dest ?

dest is left over from when I had a generic list walk routine.  I'll
remove it.
 
> > +       unsigned long *entry;
> > +
> > +       for (entry = &kimage_head, dest = NULL; ; entry++) {
> > +               unsigned int flag = *entry &
> > +                       (IND_DESTINATION | IND_INDIRECTION | IND_DONE |
> > +                       IND_SOURCE);
> You could instead do :
> 
> 	flag = *entry & IND_FLAGS;

Yes, now that the patch to define IND_FLAGS has been merged.

-Geoff

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 5/6] arm64/kexec: Add core kexec support
@ 2015-04-07 22:48       ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-04-07 22:48 UTC (permalink / raw)
  To: Suzuki K. Poulose
  Cc: Marc Zyngier, Catalin Marinas, Will Deacon, linux-arm-kernel,
	kexec, christoffer.dall

Hi Suzuki,

On Tue, 2015-04-07 at 17:38 +0100, Suzuki K. Poulose wrote:
> On 19/03/15 20:35, Geoff Levand wrote:
> > +static void kexec_list_flush(unsigned long kimage_head)
> > +{
> > +       void *dest;
> 
> What is the use of dest ?

dest is left over from when I had a generic list walk routine.  I'll
remove it.
 
> > +       unsigned long *entry;
> > +
> > +       for (entry = &kimage_head, dest = NULL; ; entry++) {
> > +               unsigned int flag = *entry &
> > +                       (IND_DESTINATION | IND_INDIRECTION | IND_DONE |
> > +                       IND_SOURCE);
> You could instead do :
> 
> 	flag = *entry & IND_FLAGS;

Yes, now that the patch to define IND_FLAGS has been merged.

-Geoff


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH V2 5/6] arm64/kexec: Add core kexec support
  2015-03-19 20:35   ` Geoff Levand
@ 2015-04-07 23:01     ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-04-07 23:01 UTC (permalink / raw)
  To: linux-arm-kernel

Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the
arm64 architecture that add support for the kexec re-boot mechanism
(CONFIG_KEXEC) on arm64 platforms.

With the addition of arm64 kexec support shutdown code paths through the kernel
are executed that previously were not.  To avoid system instability do to
problems in the current arm64 KVM kernel implementation add a Kconfig dependency
on !KEXEC to the arm64 KVM menu item.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
I pushed this V2 out to my kexec-v9 branch:

 git://git.kernel.org/pub/scm/linux/kernel/git/geoff/linux-kexec.git kexec-v9

-Geoff

 arch/arm64/Kconfig                  |   9 +++
 arch/arm64/include/asm/kexec.h      |  48 ++++++++++++
 arch/arm64/kernel/Makefile          |   1 +
 arch/arm64/kernel/machine_kexec.c   | 120 +++++++++++++++++++++++++++++
 arch/arm64/kernel/relocate_kernel.S | 149 ++++++++++++++++++++++++++++++++++++
 arch/arm64/kvm/Kconfig              |   1 +
 include/uapi/linux/kexec.h          |   1 +
 7 files changed, 329 insertions(+)
 create mode 100644 arch/arm64/include/asm/kexec.h
 create mode 100644 arch/arm64/kernel/machine_kexec.c
 create mode 100644 arch/arm64/kernel/relocate_kernel.S

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 23d51be..5716edf 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -553,6 +553,15 @@ config SECCOMP
 	  and the task is only allowed to execute a few safe syscalls
 	  defined by each seccomp mode.
 
+config KEXEC
+	depends on (!SMP || PM_SLEEP_SMP)
+	bool "kexec system call"
+	---help---
+	  kexec is a system call that implements the ability to shutdown your
+	  current kernel, and to start another kernel.  It is like a reboot
+	  but it is independent of the system firmware.   And like a reboot
+	  you can start any kernel with it, not just Linux.
+
 config XEN_DOM0
 	def_bool y
 	depends on XEN
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
new file mode 100644
index 0000000..3530ff5
--- /dev/null
+++ b/arch/arm64/include/asm/kexec.h
@@ -0,0 +1,48 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(_ARM64_KEXEC_H)
+#define _ARM64_KEXEC_H
+
+/* Maximum physical address we can use pages from */
+
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+#define KEXEC_CONTROL_PAGE_SIZE	4096
+
+#define KEXEC_ARCH KEXEC_ARCH_ARM64
+
+#if !defined(__ASSEMBLY__)
+
+/**
+ * crash_setup_regs() - save registers for the panic kernel
+ *
+ * @newregs: registers are saved here
+ * @oldregs: registers to be saved (may be %NULL)
+ */
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+				    struct pt_regs *oldregs)
+{
+	/* Empty routine needed to avoid build errors. */
+}
+
+#endif /* !defined(__ASSEMBLY__) */
+
+#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index b12e15b..ac3c2e2 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -35,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB)		+= kgdb.o
 arm64-obj-$(CONFIG_EFI)			+= efi.o efi-stub.o efi-entry.o
 arm64-obj-$(CONFIG_PCI)			+= pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)	+= armv8_deprecated.o
+arm64-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
 
 obj-y					+= $(arm64-obj-y) vdso/
 obj-m					+= $(arm64-obj-m)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
new file mode 100644
index 0000000..82efd4b
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -0,0 +1,120 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+#include <linux/of_fdt.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
+
+/* Global variables for the relocate_kernel routine. */
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned long relocate_new_kernel_size;
+extern unsigned long arm64_kexec_dtb_addr;
+extern unsigned long arm64_kexec_kimage_head;
+extern unsigned long arm64_kexec_kimage_start;
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+	/* Empty routine needed to avoid build errors. */
+}
+
+/**
+ * machine_kexec_prepare - Prepare for a kexec reboot.
+ *
+ * Called from the core kexec code when a kernel image is loaded.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+	arm64_kexec_kimage_start = image->start;
+	return 0;
+}
+
+/**
+ * kexec_list_flush - Helper to flush the kimage list to PoC.
+ */
+static void kexec_list_flush(unsigned long kimage_head)
+{
+	unsigned long *entry;
+
+	for (entry = &kimage_head; ; entry++) {
+		unsigned int flag = *entry & IND_FLAGS;
+		void *addr = phys_to_virt(*entry & PAGE_MASK);
+
+		switch (flag) {
+		case IND_INDIRECTION:
+			entry = (unsigned long *)addr - 1;
+			__flush_dcache_area(addr, PAGE_SIZE);
+			break;
+		case IND_DESTINATION:
+			break;
+		case IND_SOURCE:
+			__flush_dcache_area(addr, PAGE_SIZE);
+			break;
+		case IND_DONE:
+			return;
+		default:
+			BUG();
+		}
+	}
+}
+
+/**
+ * machine_kexec - Do the kexec reboot.
+ *
+ * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
+ */
+void machine_kexec(struct kimage *image)
+{
+	phys_addr_t reboot_code_buffer_phys;
+	void *reboot_code_buffer;
+
+	BUG_ON(num_online_cpus() > 1);
+
+	arm64_kexec_kimage_head = image->head;
+
+	reboot_code_buffer_phys = page_to_phys(image->control_code_page);
+	reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
+
+	/*
+	 * Copy relocate_new_kernel to the reboot_code_buffer for use
+	 * after the kernel is shut down.
+	 */
+	memcpy(reboot_code_buffer, relocate_new_kernel,
+		relocate_new_kernel_size);
+
+	/* Flush the reboot_code_buffer in preparation for its execution. */
+	__flush_dcache_area(reboot_code_buffer, relocate_new_kernel_size);
+
+	/* Flush the kimage list. */
+	kexec_list_flush(image->head);
+
+	pr_info("Bye!\n");
+
+	/* Disable all DAIF exceptions. */
+	asm volatile ("msr daifset, #0xf" : : : "memory");
+
+	/*
+	 * soft_restart() will shutdown the MMU, disable data caches, then
+	 * transfer control to the reboot_code_buffer which contains a copy of
+	 * the relocate_new_kernel routine.  relocate_new_kernel will use
+	 * physical addressing to relocate the new kernel to its final position
+	 * and then will transfer control to the entry point of the new kernel.
+	 */
+	soft_restart(reboot_code_buffer_phys);
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+	/* Empty routine needed to avoid build errors. */
+}
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
new file mode 100644
index 0000000..166d960
--- /dev/null
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -0,0 +1,149 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+
+#include <asm/assembler.h>
+#include <asm/kexec.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+
+/*
+ * relocate_new_kernel - Put a 2nd stage kernel image in place and boot it.
+ *
+ * The memory that the old kernel occupies may be overwritten when coping the
+ * new image to its final location.  To assure that the relocate_new_kernel
+ * routine which does that copy is not overwritten all code and data needed
+ * by relocate_new_kernel must be between the symbols relocate_new_kernel and
+ * relocate_new_kernel_end.  The machine_kexec() routine will copy
+ * relocate_new_kernel to the kexec control_code_page, a special page which
+ * has been set up to be preserved during the copy operation.
+ */
+.globl relocate_new_kernel
+relocate_new_kernel:
+
+	/* Setup the list loop variables. */
+	ldr	x18, arm64_kexec_kimage_head	/* x18 = list entry */
+	dcache_line_size x17, x0		/* x17 = dcache line size */
+	mov	x16, xzr			/* x16 = segment start */
+	mov	x15, xzr			/* x15 = entry ptr */
+	mov	x14, xzr			/* x14 = copy dest */
+
+	/* Check if the new image needs relocation. */
+	cbz	x18, .Ldone
+	tbnz	x18, IND_DONE_BIT, .Ldone
+
+.Lloop:
+	and	x13, x18, PAGE_MASK		/* x13 = addr */
+
+	/* Test the entry flags. */
+.Ltest_source:
+	tbz	x18, IND_SOURCE_BIT, .Ltest_indirection
+
+	mov x20, x14				/*  x20 = copy dest */
+	mov x21, x13				/*  x21 = copy src */
+
+	/* Invalidate dest page to PoC. */
+	mov	x0, x20
+	add	x19, x0, #PAGE_SIZE
+	sub	x1, x17, #1
+	bic	x0, x0, x1
+1:	dc	ivac, x0
+	add	x0, x0, x17
+	cmp	x0, x19
+	b.lo	1b
+	dsb	sy
+
+	/* Copy page. */
+1:	ldp	x22, x23, [x21]
+	ldp	x24, x25, [x21, #16]
+	ldp	x26, x27, [x21, #32]
+	ldp	x28, x29, [x21, #48]
+	add	x21, x21, #64
+	stnp	x22, x23, [x20]
+	stnp	x24, x25, [x20, #16]
+	stnp	x26, x27, [x20, #32]
+	stnp	x28, x29, [x20, #48]
+	add	x20, x20, #64
+	tst	x21, #(PAGE_SIZE - 1)
+	b.ne	1b
+
+	/* dest += PAGE_SIZE */
+	add	x14, x14, PAGE_SIZE
+	b	.Lnext
+
+.Ltest_indirection:
+	tbz	x18, IND_INDIRECTION_BIT, .Ltest_destination
+
+	/* ptr = addr */
+	mov	x15, x13
+	b	.Lnext
+
+.Ltest_destination:
+	tbz	x18, IND_DESTINATION_BIT, .Lnext
+
+	mov	x16, x13
+
+	/* dest = addr */
+	mov	x14, x13
+
+.Lnext:
+	/* entry = *ptr++ */
+	ldr	x18, [x15], #8
+
+	/* while (!(entry & DONE)) */
+	tbz	x18, IND_DONE_BIT, .Lloop
+
+.Ldone:
+	dsb	sy
+	isb
+	ic	ialluis
+	dsb	sy
+	isb
+
+	/* Start new image. */
+	ldr	x4, arm64_kexec_kimage_start
+	mov	x0, xzr
+	mov	x1, xzr
+	mov	x2, xzr
+	mov	x3, xzr
+	br	x4
+
+.align 3	/* To keep the 64-bit values below naturally aligned. */
+
+/* The machine_kexec routines set these variables. */
+
+/*
+ * arm64_kexec_kimage_start - Copy of image->start, the entry point of the new
+ * image.
+ */
+.globl arm64_kexec_kimage_start
+arm64_kexec_kimage_start:
+	.quad	0x0
+
+/*
+ * arm64_kexec_kimage_head - Copy of image->head, the list of kimage entries.
+ */
+.globl arm64_kexec_kimage_head
+arm64_kexec_kimage_head:
+	.quad	0x0
+
+.Lrelocate_new_kernel_end:
+
+/*
+ * relocate_new_kernel_size - Number of bytes to copy to the control_code_page.
+ */
+.globl relocate_new_kernel_size
+relocate_new_kernel_size:
+	.quad .Lrelocate_new_kernel_end - relocate_new_kernel
+
+.org	KEXEC_CONTROL_PAGE_SIZE
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f5590c8..30ae7a7 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
 
 config KVM
 	bool "Kernel-based Virtual Machine (KVM) support"
+	depends on !KEXEC
 	select MMU_NOTIFIER
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 99048e5..ccec467 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -39,6 +39,7 @@
 #define KEXEC_ARCH_SH      (42 << 16)
 #define KEXEC_ARCH_MIPS_LE (10 << 16)
 #define KEXEC_ARCH_MIPS    ( 8 << 16)
+#define KEXEC_ARCH_ARM64   (183 << 16)
 
 /* The artificial cap on the number of segments passed to kexec_load. */
 #define KEXEC_SEGMENT_MAX 16
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH V2 5/6] arm64/kexec: Add core kexec support
@ 2015-04-07 23:01     ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-04-07 23:01 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Suzuki K. Poulose, marc.zyngier, Will Deacon, kexec,
	christoffer.dall, linux-arm-kernel

Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the
arm64 architecture that add support for the kexec re-boot mechanism
(CONFIG_KEXEC) on arm64 platforms.

With the addition of arm64 kexec support shutdown code paths through the kernel
are executed that previously were not.  To avoid system instability do to
problems in the current arm64 KVM kernel implementation add a Kconfig dependency
on !KEXEC to the arm64 KVM menu item.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
I pushed this V2 out to my kexec-v9 branch:

 git://git.kernel.org/pub/scm/linux/kernel/git/geoff/linux-kexec.git kexec-v9

-Geoff

 arch/arm64/Kconfig                  |   9 +++
 arch/arm64/include/asm/kexec.h      |  48 ++++++++++++
 arch/arm64/kernel/Makefile          |   1 +
 arch/arm64/kernel/machine_kexec.c   | 120 +++++++++++++++++++++++++++++
 arch/arm64/kernel/relocate_kernel.S | 149 ++++++++++++++++++++++++++++++++++++
 arch/arm64/kvm/Kconfig              |   1 +
 include/uapi/linux/kexec.h          |   1 +
 7 files changed, 329 insertions(+)
 create mode 100644 arch/arm64/include/asm/kexec.h
 create mode 100644 arch/arm64/kernel/machine_kexec.c
 create mode 100644 arch/arm64/kernel/relocate_kernel.S

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 23d51be..5716edf 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -553,6 +553,15 @@ config SECCOMP
 	  and the task is only allowed to execute a few safe syscalls
 	  defined by each seccomp mode.
 
+config KEXEC
+	depends on (!SMP || PM_SLEEP_SMP)
+	bool "kexec system call"
+	---help---
+	  kexec is a system call that implements the ability to shutdown your
+	  current kernel, and to start another kernel.  It is like a reboot
+	  but it is independent of the system firmware.   And like a reboot
+	  you can start any kernel with it, not just Linux.
+
 config XEN_DOM0
 	def_bool y
 	depends on XEN
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
new file mode 100644
index 0000000..3530ff5
--- /dev/null
+++ b/arch/arm64/include/asm/kexec.h
@@ -0,0 +1,48 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(_ARM64_KEXEC_H)
+#define _ARM64_KEXEC_H
+
+/* Maximum physical address we can use pages from */
+
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+#define KEXEC_CONTROL_PAGE_SIZE	4096
+
+#define KEXEC_ARCH KEXEC_ARCH_ARM64
+
+#if !defined(__ASSEMBLY__)
+
+/**
+ * crash_setup_regs() - save registers for the panic kernel
+ *
+ * @newregs: registers are saved here
+ * @oldregs: registers to be saved (may be %NULL)
+ */
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+				    struct pt_regs *oldregs)
+{
+	/* Empty routine needed to avoid build errors. */
+}
+
+#endif /* !defined(__ASSEMBLY__) */
+
+#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index b12e15b..ac3c2e2 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -35,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB)		+= kgdb.o
 arm64-obj-$(CONFIG_EFI)			+= efi.o efi-stub.o efi-entry.o
 arm64-obj-$(CONFIG_PCI)			+= pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)	+= armv8_deprecated.o
+arm64-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
 
 obj-y					+= $(arm64-obj-y) vdso/
 obj-m					+= $(arm64-obj-m)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
new file mode 100644
index 0000000..82efd4b
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -0,0 +1,120 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+#include <linux/of_fdt.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
+
+/* Global variables for the relocate_kernel routine. */
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned long relocate_new_kernel_size;
+extern unsigned long arm64_kexec_dtb_addr;
+extern unsigned long arm64_kexec_kimage_head;
+extern unsigned long arm64_kexec_kimage_start;
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+	/* Empty routine needed to avoid build errors. */
+}
+
+/**
+ * machine_kexec_prepare - Prepare for a kexec reboot.
+ *
+ * Called from the core kexec code when a kernel image is loaded.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+	arm64_kexec_kimage_start = image->start;
+	return 0;
+}
+
+/**
+ * kexec_list_flush - Helper to flush the kimage list to PoC.
+ */
+static void kexec_list_flush(unsigned long kimage_head)
+{
+	unsigned long *entry;
+
+	for (entry = &kimage_head; ; entry++) {
+		unsigned int flag = *entry & IND_FLAGS;
+		void *addr = phys_to_virt(*entry & PAGE_MASK);
+
+		switch (flag) {
+		case IND_INDIRECTION:
+			entry = (unsigned long *)addr - 1;
+			__flush_dcache_area(addr, PAGE_SIZE);
+			break;
+		case IND_DESTINATION:
+			break;
+		case IND_SOURCE:
+			__flush_dcache_area(addr, PAGE_SIZE);
+			break;
+		case IND_DONE:
+			return;
+		default:
+			BUG();
+		}
+	}
+}
+
+/**
+ * machine_kexec - Do the kexec reboot.
+ *
+ * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
+ */
+void machine_kexec(struct kimage *image)
+{
+	phys_addr_t reboot_code_buffer_phys;
+	void *reboot_code_buffer;
+
+	BUG_ON(num_online_cpus() > 1);
+
+	arm64_kexec_kimage_head = image->head;
+
+	reboot_code_buffer_phys = page_to_phys(image->control_code_page);
+	reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
+
+	/*
+	 * Copy relocate_new_kernel to the reboot_code_buffer for use
+	 * after the kernel is shut down.
+	 */
+	memcpy(reboot_code_buffer, relocate_new_kernel,
+		relocate_new_kernel_size);
+
+	/* Flush the reboot_code_buffer in preparation for its execution. */
+	__flush_dcache_area(reboot_code_buffer, relocate_new_kernel_size);
+
+	/* Flush the kimage list. */
+	kexec_list_flush(image->head);
+
+	pr_info("Bye!\n");
+
+	/* Disable all DAIF exceptions. */
+	asm volatile ("msr daifset, #0xf" : : : "memory");
+
+	/*
+	 * soft_restart() will shutdown the MMU, disable data caches, then
+	 * transfer control to the reboot_code_buffer which contains a copy of
+	 * the relocate_new_kernel routine.  relocate_new_kernel will use
+	 * physical addressing to relocate the new kernel to its final position
+	 * and then will transfer control to the entry point of the new kernel.
+	 */
+	soft_restart(reboot_code_buffer_phys);
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+	/* Empty routine needed to avoid build errors. */
+}
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
new file mode 100644
index 0000000..166d960
--- /dev/null
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -0,0 +1,149 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+
+#include <asm/assembler.h>
+#include <asm/kexec.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+
+/*
+ * relocate_new_kernel - Put a 2nd stage kernel image in place and boot it.
+ *
+ * The memory that the old kernel occupies may be overwritten when coping the
+ * new image to its final location.  To assure that the relocate_new_kernel
+ * routine which does that copy is not overwritten all code and data needed
+ * by relocate_new_kernel must be between the symbols relocate_new_kernel and
+ * relocate_new_kernel_end.  The machine_kexec() routine will copy
+ * relocate_new_kernel to the kexec control_code_page, a special page which
+ * has been set up to be preserved during the copy operation.
+ */
+.globl relocate_new_kernel
+relocate_new_kernel:
+
+	/* Setup the list loop variables. */
+	ldr	x18, arm64_kexec_kimage_head	/* x18 = list entry */
+	dcache_line_size x17, x0		/* x17 = dcache line size */
+	mov	x16, xzr			/* x16 = segment start */
+	mov	x15, xzr			/* x15 = entry ptr */
+	mov	x14, xzr			/* x14 = copy dest */
+
+	/* Check if the new image needs relocation. */
+	cbz	x18, .Ldone
+	tbnz	x18, IND_DONE_BIT, .Ldone
+
+.Lloop:
+	and	x13, x18, PAGE_MASK		/* x13 = addr */
+
+	/* Test the entry flags. */
+.Ltest_source:
+	tbz	x18, IND_SOURCE_BIT, .Ltest_indirection
+
+	mov x20, x14				/*  x20 = copy dest */
+	mov x21, x13				/*  x21 = copy src */
+
+	/* Invalidate dest page to PoC. */
+	mov	x0, x20
+	add	x19, x0, #PAGE_SIZE
+	sub	x1, x17, #1
+	bic	x0, x0, x1
+1:	dc	ivac, x0
+	add	x0, x0, x17
+	cmp	x0, x19
+	b.lo	1b
+	dsb	sy
+
+	/* Copy page. */
+1:	ldp	x22, x23, [x21]
+	ldp	x24, x25, [x21, #16]
+	ldp	x26, x27, [x21, #32]
+	ldp	x28, x29, [x21, #48]
+	add	x21, x21, #64
+	stnp	x22, x23, [x20]
+	stnp	x24, x25, [x20, #16]
+	stnp	x26, x27, [x20, #32]
+	stnp	x28, x29, [x20, #48]
+	add	x20, x20, #64
+	tst	x21, #(PAGE_SIZE - 1)
+	b.ne	1b
+
+	/* dest += PAGE_SIZE */
+	add	x14, x14, PAGE_SIZE
+	b	.Lnext
+
+.Ltest_indirection:
+	tbz	x18, IND_INDIRECTION_BIT, .Ltest_destination
+
+	/* ptr = addr */
+	mov	x15, x13
+	b	.Lnext
+
+.Ltest_destination:
+	tbz	x18, IND_DESTINATION_BIT, .Lnext
+
+	mov	x16, x13
+
+	/* dest = addr */
+	mov	x14, x13
+
+.Lnext:
+	/* entry = *ptr++ */
+	ldr	x18, [x15], #8
+
+	/* while (!(entry & DONE)) */
+	tbz	x18, IND_DONE_BIT, .Lloop
+
+.Ldone:
+	dsb	sy
+	isb
+	ic	ialluis
+	dsb	sy
+	isb
+
+	/* Start new image. */
+	ldr	x4, arm64_kexec_kimage_start
+	mov	x0, xzr
+	mov	x1, xzr
+	mov	x2, xzr
+	mov	x3, xzr
+	br	x4
+
+.align 3	/* To keep the 64-bit values below naturally aligned. */
+
+/* The machine_kexec routines set these variables. */
+
+/*
+ * arm64_kexec_kimage_start - Copy of image->start, the entry point of the new
+ * image.
+ */
+.globl arm64_kexec_kimage_start
+arm64_kexec_kimage_start:
+	.quad	0x0
+
+/*
+ * arm64_kexec_kimage_head - Copy of image->head, the list of kimage entries.
+ */
+.globl arm64_kexec_kimage_head
+arm64_kexec_kimage_head:
+	.quad	0x0
+
+.Lrelocate_new_kernel_end:
+
+/*
+ * relocate_new_kernel_size - Number of bytes to copy to the control_code_page.
+ */
+.globl relocate_new_kernel_size
+relocate_new_kernel_size:
+	.quad .Lrelocate_new_kernel_end - relocate_new_kernel
+
+.org	KEXEC_CONTROL_PAGE_SIZE
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f5590c8..30ae7a7 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
 
 config KVM
 	bool "Kernel-based Virtual Machine (KVM) support"
+	depends on !KEXEC
 	select MMU_NOTIFIER
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 99048e5..ccec467 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -39,6 +39,7 @@
 #define KEXEC_ARCH_SH      (42 << 16)
 #define KEXEC_ARCH_MIPS_LE (10 << 16)
 #define KEXEC_ARCH_MIPS    ( 8 << 16)
+#define KEXEC_ARCH_ARM64   (183 << 16)
 
 /* The artificial cap on the number of segments passed to kexec_load. */
 #define KEXEC_SEGMENT_MAX 16
-- 
2.1.0




_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 0/6] arm64 kexec kernel patches V8
  2015-03-19 20:35 ` Geoff Levand
@ 2015-04-08 11:16   ` Suzuki K. Poulose
  -1 siblings, 0 replies; 36+ messages in thread
From: Suzuki K. Poulose @ 2015-04-08 11:16 UTC (permalink / raw)
  To: linux-arm-kernel

On 19/03/15 20:35, Geoff Levand wrote:
> Hi All,
>
> This series adds the core support for kexec re-boots on arm64.  This v8 of the
> series is mainly just a rebase to Linux-4.0-rc3, and a few very minor changes
> requested for v7.
>
> To load a second stage kernel and execute a kexec re-boot on arm64 my patches to
> kexec-tools [2], which have not yet been merged upstream, are needed.
>
> I have tested with the ARM VE fast model, the ARM Base model and the ARM
> Foundation model with various kernel config options for both the first and
> second stage kernels.  Kexec on EFI systems works correctly.  With the ACPI
> kernel patches from [3] applied, kexec on ACPI systems seeems to work correctly.
> More ACPI + kexec testing is needed.
>
> Patch 1 here moves the macros from proc-macros.S to asm/assembler.h so that the
> dcache_line_size macro it defines can be uesd by kexec's relocate kernel
> routine.
>
> Patches 2-4 rework the arm64 hcall mechanism to give the arm64 soft_restart()
> routine the ability to switch exception levels from EL1 to EL2 for kernels that
> were entered in EL2.
>
> Patches 5-6 add the actual kexec support.
>
> Please consider all patches for inclusion.
>
> [1]  https://git.kernel.org/cgit/linux/kernel/git/geoff/linux-kexec.git
> [2]  https://git.kernel.org/cgit/linux/kernel/git/geoff/kexec-tools.git
Btw, I get the following build failure for kexec-tools master branch, 
with Linaro tool chain (crosstool-NG linaro-1.13.1-4.9-2014.09 - Linaro 
GCC 4.9-2014.09 )

  $ ./configure --host=aarch64-linux-gnu
  [...]
  $ make -j4
kexec/arch/arm64/kexec-arm64.c: In function ?machine_verify_elf_rel?:
kexec/arch/arm64/kexec-arm64.c:970:29: error: ?EM_AARCH64? undeclared 
(first use in this function)
   return (ehdr->e_machine == EM_AARCH64);
                              ^
kexec/arch/arm64/kexec-arm64.c:970:29: note: each undeclared identifier 
is reported only once for each function it appears in
kexec/arch/arm64/kexec-arm64.c:971:1: warning: control reaches end of 
non-void function [-Wreturn-type]
  }
  ^
make: *** [kexec/arch/arm64/kexec-arm64.o] Error 1
make: *** Waiting for unfinished jobs....
----

You may need to add the definition of EM_AARCH64 to include/elf.h and
include that instead of linux/elf.h, like the other archs.


Cheers
Suzuki

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 0/6] arm64 kexec kernel patches V8
@ 2015-04-08 11:16   ` Suzuki K. Poulose
  0 siblings, 0 replies; 36+ messages in thread
From: Suzuki K. Poulose @ 2015-04-08 11:16 UTC (permalink / raw)
  To: Geoff Levand, Catalin Marinas, Will Deacon
  Cc: Ard Biesheuvel, Marc Zyngier, kexec, christoffer.dall,
	hanjun.guo, linux-arm-kernel

On 19/03/15 20:35, Geoff Levand wrote:
> Hi All,
>
> This series adds the core support for kexec re-boots on arm64.  This v8 of the
> series is mainly just a rebase to Linux-4.0-rc3, and a few very minor changes
> requested for v7.
>
> To load a second stage kernel and execute a kexec re-boot on arm64 my patches to
> kexec-tools [2], which have not yet been merged upstream, are needed.
>
> I have tested with the ARM VE fast model, the ARM Base model and the ARM
> Foundation model with various kernel config options for both the first and
> second stage kernels.  Kexec on EFI systems works correctly.  With the ACPI
> kernel patches from [3] applied, kexec on ACPI systems seeems to work correctly.
> More ACPI + kexec testing is needed.
>
> Patch 1 here moves the macros from proc-macros.S to asm/assembler.h so that the
> dcache_line_size macro it defines can be uesd by kexec's relocate kernel
> routine.
>
> Patches 2-4 rework the arm64 hcall mechanism to give the arm64 soft_restart()
> routine the ability to switch exception levels from EL1 to EL2 for kernels that
> were entered in EL2.
>
> Patches 5-6 add the actual kexec support.
>
> Please consider all patches for inclusion.
>
> [1]  https://git.kernel.org/cgit/linux/kernel/git/geoff/linux-kexec.git
> [2]  https://git.kernel.org/cgit/linux/kernel/git/geoff/kexec-tools.git
Btw, I get the following build failure for kexec-tools master branch, 
with Linaro tool chain (crosstool-NG linaro-1.13.1-4.9-2014.09 - Linaro 
GCC 4.9-2014.09 )

  $ ./configure --host=aarch64-linux-gnu
  [...]
  $ make -j4
kexec/arch/arm64/kexec-arm64.c: In function ‘machine_verify_elf_rel’:
kexec/arch/arm64/kexec-arm64.c:970:29: error: ‘EM_AARCH64’ undeclared 
(first use in this function)
   return (ehdr->e_machine == EM_AARCH64);
                              ^
kexec/arch/arm64/kexec-arm64.c:970:29: note: each undeclared identifier 
is reported only once for each function it appears in
kexec/arch/arm64/kexec-arm64.c:971:1: warning: control reaches end of 
non-void function [-Wreturn-type]
  }
  ^
make: *** [kexec/arch/arm64/kexec-arm64.o] Error 1
make: *** Waiting for unfinished jobs....
----

You may need to add the definition of EM_AARCH64 to include/elf.h and
include that instead of linux/elf.h, like the other archs.


Cheers
Suzuki



_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 0/6] arm64 kexec kernel patches V8
  2015-04-08 11:16   ` Suzuki K. Poulose
@ 2015-04-08 17:14     ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-04-08 17:14 UTC (permalink / raw)
  To: linux-arm-kernel

Hi,

On Wed, 2015-04-08 at 12:16 +0100, Suzuki K. Poulose wrote:
> Btw, I get the following build failure for kexec-tools master branch, 
> with Linaro tool chain (crosstool-NG linaro-1.13.1-4.9-2014.09 - Linaro 
> GCC 4.9-2014.09 )
> 
>   $ ./configure --host=aarch64-linux-gnu
>   [...]
>   $ make -j4
> kexec/arch/arm64/kexec-arm64.c: In function ?machine_verify_elf_rel?:
> kexec/arch/arm64/kexec-arm64.c:970:29: error: ?EM_AARCH64? undeclared 

I guess that toolchain is using some ancient headers.  As a workaround
you can use configure CPPFLAGS='-DEM_AARCH64=183'.

-Geoff

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 0/6] arm64 kexec kernel patches V8
@ 2015-04-08 17:14     ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-04-08 17:14 UTC (permalink / raw)
  To: Suzuki K. Poulose
  Cc: linux-arm-kernel, Ard Biesheuvel, Marc Zyngier, Catalin Marinas,
	Will Deacon, hanjun.guo, kexec, christoffer.dall

Hi,

On Wed, 2015-04-08 at 12:16 +0100, Suzuki K. Poulose wrote:
> Btw, I get the following build failure for kexec-tools master branch, 
> with Linaro tool chain (crosstool-NG linaro-1.13.1-4.9-2014.09 - Linaro 
> GCC 4.9-2014.09 )
> 
>   $ ./configure --host=aarch64-linux-gnu
>   [...]
>   $ make -j4
> kexec/arch/arm64/kexec-arm64.c: In function ‘machine_verify_elf_rel’:
> kexec/arch/arm64/kexec-arm64.c:970:29: error: ‘EM_AARCH64’ undeclared 

I guess that toolchain is using some ancient headers.  As a workaround
you can use configure CPPFLAGS='-DEM_AARCH64=183'.

-Geoff


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 0/6] arm64 kexec kernel patches V8
  2015-04-08 17:14     ` Geoff Levand
@ 2015-07-03  3:39       ` Pratyush Anand
  -1 siblings, 0 replies; 36+ messages in thread
From: Pratyush Anand @ 2015-07-03  3:39 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Geoff,

I suppose there would be a next revision of kexec patches. When is the
that  expected?
Thanks a lot for working on this.

~Pratyush

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 0/6] arm64 kexec kernel patches V8
@ 2015-07-03  3:39       ` Pratyush Anand
  0 siblings, 0 replies; 36+ messages in thread
From: Pratyush Anand @ 2015-07-03  3:39 UTC (permalink / raw)
  To: Geoff Levand
  Cc: Ard Biesheuvel, Marc Zyngier, Catalin Marinas, Suzuki K. Poulose,
	Will Deacon, christoffer.dall, hanjun.guo, kexec,
	linux-arm-kernel

Hi Geoff,

I suppose there would be a next revision of kexec patches. When is the
that  expected?
Thanks a lot for working on this.

~Pratyush

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 0/6] arm64 kexec kernel patches V8
  2015-07-03  3:39       ` Pratyush Anand
@ 2015-07-06 17:23         ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-07-06 17:23 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Pratyush,

On Fri, 2015-07-03 at 09:09 +0530, Pratyush Anand wrote:
> I suppose there would be a next revision of kexec patches. When is the
> that  expected?

As I just replied to Marc's mail, I'll work on preparing a rebased kexec
patch set the next chance I get.  I'm hoping I can start on it his week.

-Geoff

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 0/6] arm64 kexec kernel patches V8
@ 2015-07-06 17:23         ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-07-06 17:23 UTC (permalink / raw)
  To: Pratyush Anand
  Cc: Ard Biesheuvel, Marc Zyngier, Catalin Marinas, Suzuki K. Poulose,
	Will Deacon, christoffer.dall, hanjun.guo, kexec,
	linux-arm-kernel

Hi Pratyush,

On Fri, 2015-07-03 at 09:09 +0530, Pratyush Anand wrote:
> I suppose there would be a next revision of kexec patches. When is the
> that  expected?

As I just replied to Marc's mail, I'll work on preparing a rebased kexec
patch set the next chance I get.  I'm hoping I can start on it his week.

-Geoff


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 2/6] arm64: Convert hcalls to use HVC immediate value
  2015-03-19 20:35   ` Geoff Levand
@ 2015-09-23 19:21     ` Timur Tabi
  -1 siblings, 0 replies; 36+ messages in thread
From: Timur Tabi @ 2015-09-23 19:21 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Mar 19, 2015 at 3:35 PM, Geoff Levand <geoff@infradead.org> wrote:
> The existing arm64 hcall implementations are limited in that they only allow
> for two distinct hcalls; with the x0 register either zero or not zero.  Also,
> the API of the hyp-stub exception vector routines and the KVM exception vector
> routines differ; hyp-stub uses a non-zero value in x0 to implement
> __hyp_set_vectors, whereas KVM uses it to implement kvm_call_hyp.
>
> To allow for additional hcalls to be defined and to make the arm64 hcall API
> more consistent across exception vector routines, change the hcall
> implementations to use the 16 bit immediate value of the HVC instruction to
> specify the hcall type.
>
> Define three new preprocessor macros HVC_CALL_HYP, HVC_GET_VECTORS, and
> HVC_SET_VECTORS to be used as hcall type specifiers and convert the
> existing __hyp_get_vectors(), __hyp_set_vectors() and kvm_call_hyp() routines
> to use these new macros when executing an HVC call.  Also, change the
> corresponding hyp-stub and KVM el1_sync exception vector routines to use these
> new macros.
>
> Signed-off-by: Geoff Levand <geoff@infradead.org>

What is the status of this patch?  I see it's not in 4.2.  I ask
because this patch might be breaking our 4.2-based kernel, and I'm
just wondering if it's still applicable.

-- 
Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 2/6] arm64: Convert hcalls to use HVC immediate value
@ 2015-09-23 19:21     ` Timur Tabi
  0 siblings, 0 replies; 36+ messages in thread
From: Timur Tabi @ 2015-09-23 19:21 UTC (permalink / raw)
  To: Geoff Levand
  Cc: Marc Zyngier, Catalin Marinas, Will Deacon, linux-arm-kernel,
	kexec, Christoffer Dall

On Thu, Mar 19, 2015 at 3:35 PM, Geoff Levand <geoff@infradead.org> wrote:
> The existing arm64 hcall implementations are limited in that they only allow
> for two distinct hcalls; with the x0 register either zero or not zero.  Also,
> the API of the hyp-stub exception vector routines and the KVM exception vector
> routines differ; hyp-stub uses a non-zero value in x0 to implement
> __hyp_set_vectors, whereas KVM uses it to implement kvm_call_hyp.
>
> To allow for additional hcalls to be defined and to make the arm64 hcall API
> more consistent across exception vector routines, change the hcall
> implementations to use the 16 bit immediate value of the HVC instruction to
> specify the hcall type.
>
> Define three new preprocessor macros HVC_CALL_HYP, HVC_GET_VECTORS, and
> HVC_SET_VECTORS to be used as hcall type specifiers and convert the
> existing __hyp_get_vectors(), __hyp_set_vectors() and kvm_call_hyp() routines
> to use these new macros when executing an HVC call.  Also, change the
> corresponding hyp-stub and KVM el1_sync exception vector routines to use these
> new macros.
>
> Signed-off-by: Geoff Levand <geoff@infradead.org>

What is the status of this patch?  I see it's not in 4.2.  I ask
because this patch might be breaking our 4.2-based kernel, and I'm
just wondering if it's still applicable.

-- 
Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project.

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 2/6] arm64: Convert hcalls to use HVC immediate value
  2015-09-23 19:21     ` Timur Tabi
@ 2015-09-24 19:04       ` Geoff Levand
  -1 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-09-24 19:04 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 2015-09-23 at 14:21 -0500, Timur Tabi wrote:
> What is the status of this patch?  I see it's not in 4.2.  I ask
> because this patch might be breaking our 4.2-based kernel, and I'm
> just wondering if it's still applicable.

I have it in my linux-kexec repo, which currently based on 4.3-rc2.  I
plan to submit it again in the next week or so after testing.

-Geoff

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 2/6] arm64: Convert hcalls to use HVC immediate value
@ 2015-09-24 19:04       ` Geoff Levand
  0 siblings, 0 replies; 36+ messages in thread
From: Geoff Levand @ 2015-09-24 19:04 UTC (permalink / raw)
  To: Timur Tabi
  Cc: Marc Zyngier, Catalin Marinas, Will Deacon, linux-arm-kernel,
	kexec, Christoffer Dall

On Wed, 2015-09-23 at 14:21 -0500, Timur Tabi wrote:
> What is the status of this patch?  I see it's not in 4.2.  I ask
> because this patch might be breaking our 4.2-based kernel, and I'm
> just wondering if it's still applicable.

I have it in my linux-kexec repo, which currently based on 4.3-rc2.  I
plan to submit it again in the next week or so after testing.

-Geoff



_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 36+ messages in thread

end of thread, other threads:[~2015-09-24 19:04 UTC | newest]

Thread overview: 36+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-19 20:35 [PATCH 0/6] arm64 kexec kernel patches V8 Geoff Levand
2015-03-19 20:35 ` Geoff Levand
2015-03-19 20:35 ` [PATCH 2/6] arm64: Convert hcalls to use HVC immediate value Geoff Levand
2015-03-19 20:35   ` Geoff Levand
2015-09-23 19:21   ` Timur Tabi
2015-09-23 19:21     ` Timur Tabi
2015-09-24 19:04     ` Geoff Levand
2015-09-24 19:04       ` Geoff Levand
2015-03-19 20:35 ` [PATCH 4/6] arm64: Add EL2 switch to soft_restart Geoff Levand
2015-03-19 20:35   ` Geoff Levand
2015-03-19 20:35 ` [PATCH 1/6] arm64: Fold proc-macros.S into assembler.h Geoff Levand
2015-03-19 20:35   ` Geoff Levand
2015-03-19 20:35 ` [PATCH 5/6] arm64/kexec: Add core kexec support Geoff Levand
2015-03-19 20:35   ` Geoff Levand
2015-04-07 16:38   ` Suzuki K. Poulose
2015-04-07 16:38     ` Suzuki K. Poulose
2015-04-07 22:48     ` Geoff Levand
2015-04-07 22:48       ` Geoff Levand
2015-04-07 23:01   ` [PATCH V2 " Geoff Levand
2015-04-07 23:01     ` Geoff Levand
2015-03-19 20:35 ` [PATCH 3/6] arm64: Add new hcall HVC_CALL_FUNC Geoff Levand
2015-03-19 20:35   ` Geoff Levand
2015-03-19 20:35 ` [PATCH 6/6] arm64/kexec: Add pr_devel output Geoff Levand
2015-03-19 20:35   ` Geoff Levand
2015-03-20 19:48 ` [PATCH 0/6] arm64 kexec kernel patches V8 Mark Rutland
2015-03-20 19:48   ` Mark Rutland
2015-04-03 16:48 ` Geoff Levand
2015-04-03 16:48   ` Geoff Levand
2015-04-08 11:16 ` Suzuki K. Poulose
2015-04-08 11:16   ` Suzuki K. Poulose
2015-04-08 17:14   ` Geoff Levand
2015-04-08 17:14     ` Geoff Levand
2015-07-03  3:39     ` Pratyush Anand
2015-07-03  3:39       ` Pratyush Anand
2015-07-06 17:23       ` Geoff Levand
2015-07-06 17:23         ` Geoff Levand

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.