* [PATCH 0/4] powerpc: enable relocatable support for 6xx
@ 2013-06-19 9:20 Kevin Hao
2013-06-19 9:20 ` [PATCH 1/4] " Kevin Hao
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Kevin Hao @ 2013-06-19 9:20 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc
This patch series enables the relocatable support for 6xx boards.
With these patches:
* the kernel can boot from any address between 0x10000 ~ 0x2000000
* kdump is workable
* a single kernel image can be used as boot or kdump kernel
Boot test on a mpc8260 board. Also passed the build test for the
following configurations:
ppc40x_defconfig
ppc64e_defconfig
ppc64_defconfig
corenet32_smp_defconfig
corenet64_smp_defconfig
ppc44x_defconfig
pmac32_defconfig
pq2fads_defconfig
mpc5200_defconfig
pseries_defconfig
---
Kevin Hao (4):
powerpc: enable relocatable support for 6xx
powerpc: move the exception trampoline helper functions to a separate
file
powerpc: s/kdump/exception/ for the exception trampoline functions
powerpc: make the kernel bootable from non 0 address for 6xx
arch/powerpc/Kconfig | 4 +-
arch/powerpc/include/asm/exception_trampoline.h | 35 ++++++++
arch/powerpc/include/asm/kdump.h | 32 --------
arch/powerpc/include/asm/page.h | 2 +-
arch/powerpc/kernel/Makefile | 1 +
arch/powerpc/kernel/crash_dump.c | 41 ----------
arch/powerpc/kernel/exception_trampoline.c | 82 +++++++++++++++++++
arch/powerpc/kernel/head_32.S | 103 ++++++++++++++++++++++++
arch/powerpc/kernel/prom.c | 4 +-
arch/powerpc/kernel/prom_init_check.sh | 2 +-
arch/powerpc/kernel/setup_32.c | 3 +-
arch/powerpc/kernel/setup_64.c | 4 +-
arch/powerpc/mm/ppc_mmu_32.c | 7 +-
13 files changed, 232 insertions(+), 88 deletions(-)
create mode 100644 arch/powerpc/include/asm/exception_trampoline.h
create mode 100644 arch/powerpc/kernel/exception_trampoline.c
--
1.8.1.4
Thanks,
Kevin
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/4] powerpc: enable relocatable support for 6xx
2013-06-19 9:20 [PATCH 0/4] powerpc: enable relocatable support for 6xx Kevin Hao
@ 2013-06-19 9:20 ` Kevin Hao
2013-06-19 9:20 ` [PATCH 2/4] powerpc: move the exception trampoline helper functions to a separate file Kevin Hao
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Kevin Hao @ 2013-06-19 9:20 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc
This is based on the codes in head_44x.S. With this patch the kernel
can only boot from 0 with CONFIG_RELOCATABLE enabled. We will add the
support to boot from a non 0 address in the following patches.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
---
arch/powerpc/Kconfig | 2 +-
arch/powerpc/include/asm/page.h | 2 +-
arch/powerpc/kernel/head_32.S | 103 +++++++++++++++++++++++++++++++++
arch/powerpc/kernel/prom_init_check.sh | 2 +-
4 files changed, 106 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c33e3ad..8fe2792 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -866,7 +866,7 @@ config DYNAMIC_MEMSTART
config RELOCATABLE
bool "Build a relocatable kernel"
- depends on ADVANCED_OPTIONS && FLATMEM && 44x
+ depends on ADVANCED_OPTIONS && FLATMEM && (44x || 6xx)
select NONSTATIC_KERNEL
help
This builds a kernel image that is capable of running at the
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812..7145c14 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -207,7 +207,7 @@ extern long long virt_phys_offset;
* On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
* the other definitions for __va & __pa.
*/
-#ifdef CONFIG_BOOKE
+#if defined(CONFIG_BOOKE) || defined(CONFIG_6xx)
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index dc0488b..eb47b13 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -73,6 +73,8 @@ _ENTRY(_start);
nop /* used by __secondary_hold on prep (mtx) and chrp smp */
nop
+ bl perform_relocation
+
/* PMAC
* Enter here with the kernel text, data and bss loaded starting at
* 0, running with virtual == physical mapping.
@@ -149,6 +151,8 @@ __start:
*/
bl early_init
+ bl after_relocation_init
+
/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
* the physical address we are running at, returned by early_init()
*/
@@ -180,6 +184,7 @@ __after_mmu_off:
#endif /* CONFIG_6xx */
+#ifndef CONFIG_RELOCATABLE
/*
* We need to run with _start at physical address 0.
* On CHRP, we are loaded at 0x10000 since OF on CHRP uses
@@ -193,6 +198,8 @@ __after_mmu_off:
lis r5,PHYSICAL_START@h
cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
bne relocate_kernel
+#endif
+
/*
* we now have the 1st 16M of ram mapped with the bats.
* prep needs the mmu to be turned on here, but pmac already has it on.
@@ -1263,6 +1270,102 @@ m8260_gorom:
blr
#endif
+perform_relocation:
+#ifdef CONFIG_RELOCATABLE
+/*
+ * Relocate ourselves to the current runtime address.
+ * This is called only by the Boot CPU.
+ * r21 will be loaded with the physical runtime address of _stext
+ * Save the r3~r7 since these registers may contain the values needed
+ * by the following boot code.
+ */
+ mr r22,r3
+ mr r23,r4
+ mr r24,r5
+ mr r25,r6
+ mr r26,r7
+
+ mflr r20
+ bl 0f /* Get our runtime address */
+0: mflr r21 /* Make it accessible */
+ addis r21,r21,(_stext - 0b)@ha
+ addi r21,r21,(_stext - 0b)@l /*Get our current runtime base*/
+
+ /*
+ * We have the runtime address of our base.
+ * We calculate our shift of offset from a 256M page.
+ * We could map the 256M page we belong to at PAGE_OFFSET and
+ * get going from there.
+ */
+ lis r4,KERNELBASE@h
+ ori r4,r4,KERNELBASE@l
+ rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */
+ rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */
+ subf r3,r5,r6 /* r3 = r6 - r5 */
+ add r3,r4,r3 /* Required Virutal Address */
+
+ bl relocate
+ mtlr r20
+ mr r3,r22
+ mr r4,r23
+ mr r5,r24
+ mr r6,r25
+ mr r7,r26
+#endif
+ blr
+
+after_relocation_init:
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * Relocatable kernel support based on processing of dynamic
+ * relocation entries.
+ *
+ * r21 will contain the current offset of _stext
+ */
+ lis r3,kernstart_addr@ha
+ la r3,kernstart_addr@l(r3)
+
+ /* Store kernstart_addr */
+ tophys(r3,r3)
+ stw r21,0(r3)
+
+ /*
+ * Compute the virt_phys_offset :
+ * virt_phys_offset = stext.run - kernstart_addr
+ *
+ * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
+ * When we relocate, we have :
+ *
+ * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
+ *
+ * hence:
+ * virt_phys_offset = (KERNELBASE & ~0xfffffff)
+ * - (kernstart_addr & ~0xfffffff)
+ *
+ */
+
+ /* KERNELBASE&~0xfffffff => (r4,r5) */
+ lis r5,KERNELBASE@h
+ rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */
+
+ /* kernelstart_addr & ~0xfffffff => (r6,r7) */
+ rlwinm r7,r21,0,0,3 /* Align to 256M, lower 32bit */
+
+ /*
+ * 64bit subtraction.
+ */
+ subf r5,r7,r5
+
+ /* Store virt_phys_offset */
+ lis r3,virt_phys_offset@h
+ ori r3,r3,virt_phys_offset@l
+
+ tophys(r3,r3)
+ li r4,0
+ stw r4,0(r3) /* Higher 32bit */
+ stw r5,4(r3) /* Lower 32bit */
+#endif
+ blr
/*
* We put a few things here that have to be page-aligned.
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 3765da6..a24c208 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -22,7 +22,7 @@ __secondary_hold_acknowledge __secondary_hold_spinloop __start
strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
-boot_command_line __prom_init_toc_start __prom_init_toc_end"
+boot_command_line __prom_init_toc_start __prom_init_toc_end virt_phys_offset"
NM="$1"
OBJ="$2"
--
1.8.1.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/4] powerpc: move the exception trampoline helper functions to a separate file
2013-06-19 9:20 [PATCH 0/4] powerpc: enable relocatable support for 6xx Kevin Hao
2013-06-19 9:20 ` [PATCH 1/4] " Kevin Hao
@ 2013-06-19 9:20 ` Kevin Hao
2013-06-19 9:20 ` [PATCH 3/4] powerpc: s/kdump/exception/ for the exception trampoline functions Kevin Hao
2013-06-19 9:20 ` [PATCH 4/4] powerpc: make the kernel bootable from non 0 address for 6xx Kevin Hao
3 siblings, 0 replies; 5+ messages in thread
From: Kevin Hao @ 2013-06-19 9:20 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc
For some platform such as 6xx we need to setup the exception
trampoline for a relocatable kernel even through the CONFIG_CRASH_DUMP
is disabled. So move these functions to a separate file so they can
be used by non dump kernel. This patch doesn't introduce any function
change.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
---
arch/powerpc/include/asm/exception_trampoline.h | 35 +++++++++++++
arch/powerpc/include/asm/kdump.h | 32 -----------
arch/powerpc/kernel/Makefile | 2 +-
arch/powerpc/kernel/crash_dump.c | 41 ---------------
arch/powerpc/kernel/exception_trampoline.c | 70 +++++++++++++++++++++++++
arch/powerpc/kernel/prom.c | 2 +-
arch/powerpc/kernel/setup_32.c | 1 +
arch/powerpc/kernel/setup_64.c | 2 +-
8 files changed, 109 insertions(+), 76 deletions(-)
create mode 100644 arch/powerpc/include/asm/exception_trampoline.h
create mode 100644 arch/powerpc/kernel/exception_trampoline.c
diff --git a/arch/powerpc/include/asm/exception_trampoline.h b/arch/powerpc/include/asm/exception_trampoline.h
new file mode 100644
index 0000000..707ad6c
--- /dev/null
+++ b/arch/powerpc/include/asm/exception_trampoline.h
@@ -0,0 +1,35 @@
+#ifndef _EXCEPTION_TRAMPOLINE_H
+#define _EXCEPTION_TRAMPOLINE_H
+
+/* How many bytes to reserve at zero for kdump. The reserve limit should
+ * be greater or equal to the trampoline's end address.
+ * Reserve to the end of the FWNMI area, see head_64.S */
+#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */
+
+/*
+ * On PPC64 translation is disabled during trampoline setup, so we use
+ * physical addresses. Though on PPC32 translation is already enabled,
+ * so we can't do the same. Luckily create_trampoline() creates relative
+ * branches, so we can just add the PAGE_OFFSET and don't worry about it.
+ */
+#ifdef __powerpc64__
+#define KDUMP_TRAMPOLINE_START 0x0100
+#define KDUMP_TRAMPOLINE_END 0x3000
+#else
+#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
+#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
+#endif /* __powerpc64__ */
+
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
+extern void reserve_kdump_trampoline(void);
+extern void setup_kdump_trampoline(void);
+#else
+/* !CRASH_DUMP || !NONSTATIC_KERNEL */
+static inline void reserve_kdump_trampoline(void) { ; }
+static inline void setup_kdump_trampoline(void) { ; }
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _EXCEPTION_TRAMPOLINE_H */
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index c977620..7ac3553 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -5,42 +5,10 @@
#define KDUMP_KERNELBASE 0x2000000
-/* How many bytes to reserve at zero for kdump. The reserve limit should
- * be greater or equal to the trampoline's end address.
- * Reserve to the end of the FWNMI area, see head_64.S */
-#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */
-
#ifdef CONFIG_CRASH_DUMP
-/*
- * On PPC64 translation is disabled during trampoline setup, so we use
- * physical addresses. Though on PPC32 translation is already enabled,
- * so we can't do the same. Luckily create_trampoline() creates relative
- * branches, so we can just add the PAGE_OFFSET and don't worry about it.
- */
-#ifdef __powerpc64__
-#define KDUMP_TRAMPOLINE_START 0x0100
-#define KDUMP_TRAMPOLINE_END 0x3000
-#else
-#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
-#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
-#endif /* __powerpc64__ */
-
#define KDUMP_MIN_TCE_ENTRIES 2048
#endif /* CONFIG_CRASH_DUMP */
-#ifndef __ASSEMBLY__
-
-#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
-extern void reserve_kdump_trampoline(void);
-extern void setup_kdump_trampoline(void);
-#else
-/* !CRASH_DUMP || !NONSTATIC_KERNEL */
-static inline void reserve_kdump_trampoline(void) { ; }
-static inline void setup_kdump_trampoline(void) { ; }
-#endif
-
-#endif /* __ASSEMBLY__ */
-
#endif /* __PPC64_KDUMP_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index f960a79..c73a0e3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -59,7 +59,7 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o exception_trampoline.o
obj-$(CONFIG_FA_DUMP) += fadump.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_E500) += idle_e500.o
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9ec3fe1..56dab87 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -28,47 +28,6 @@
#define DBG(fmt...)
#endif
-#ifndef CONFIG_NONSTATIC_KERNEL
-void __init reserve_kdump_trampoline(void)
-{
- memblock_reserve(0, KDUMP_RESERVE_LIMIT);
-}
-
-static void __init create_trampoline(unsigned long addr)
-{
- unsigned int *p = (unsigned int *)addr;
-
- /* The maximum range of a single instruction branch, is the current
- * instruction's address + (32 MB - 4) bytes. For the trampoline we
- * need to branch to current address + 32 MB. So we insert a nop at
- * the trampoline address, then the next instruction (+ 4 bytes)
- * does a branch to (32 MB - 4). The net effect is that when we
- * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
- * two instructions it doesn't require any registers.
- */
- patch_instruction(p, PPC_INST_NOP);
- patch_branch(++p, addr + PHYSICAL_START, 0);
-}
-
-void __init setup_kdump_trampoline(void)
-{
- unsigned long i;
-
- DBG(" -> setup_kdump_trampoline()\n");
-
- for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
- create_trampoline(i);
- }
-
-#ifdef CONFIG_PPC_PSERIES
- create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
- create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
-#endif /* CONFIG_PPC_PSERIES */
-
- DBG(" <- setup_kdump_trampoline()\n");
-}
-#endif /* CONFIG_NONSTATIC_KERNEL */
-
static int __init parse_savemaxmem(char *p)
{
if (p)
diff --git a/arch/powerpc/kernel/exception_trampoline.c b/arch/powerpc/kernel/exception_trampoline.c
new file mode 100644
index 0000000..71f4b72
--- /dev/null
+++ b/arch/powerpc/kernel/exception_trampoline.c
@@ -0,0 +1,70 @@
+/*
+ * Routines for doing kexec-based kdump.
+ *
+ * Copyright (C) 2005, IBM Corp.
+ *
+ * Created by: Michael Ellerman
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#undef DEBUG
+
+#include <linux/crash_dump.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <asm/code-patching.h>
+#include <asm/kdump.h>
+#include <asm/prom.h>
+#include <asm/firmware.h>
+#include <asm/uaccess.h>
+#include <asm/rtas.h>
+
+#ifdef DEBUG
+#include <asm/udbg.h>
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+#ifndef CONFIG_NONSTATIC_KERNEL
+void __init reserve_kdump_trampoline(void)
+{
+ memblock_reserve(0, KDUMP_RESERVE_LIMIT);
+}
+
+static void __init create_trampoline(unsigned long addr)
+{
+ unsigned int *p = (unsigned int *)addr;
+
+ /* The maximum range of a single instruction branch, is the current
+ * instruction's address + (32 MB - 4) bytes. For the trampoline we
+ * need to branch to current address + 32 MB. So we insert a nop at
+ * the trampoline address, then the next instruction (+ 4 bytes)
+ * does a branch to (32 MB - 4). The net effect is that when we
+ * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
+ * two instructions it doesn't require any registers.
+ */
+ patch_instruction(p, PPC_INST_NOP);
+ patch_branch(++p, addr + PHYSICAL_START, 0);
+}
+
+void __init setup_kdump_trampoline(void)
+{
+ unsigned long i;
+
+ DBG(" -> setup_kdump_trampoline()\n");
+
+ for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
+ create_trampoline(i);
+ }
+
+#ifdef CONFIG_PPC_PSERIES
+ create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
+ create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
+#endif /* CONFIG_PPC_PSERIES */
+
+ DBG(" <- setup_kdump_trampoline()\n");
+}
+#endif /* CONFIG_NONSTATIC_KERNEL */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b6f7a9..4a13ac5 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -40,7 +40,7 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/kdump.h>
+#include <asm/exception_trampoline.h>
#include <asm/smp.h>
#include <asm/mmu.h>
#include <asm/paca.h>
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a8f54ec..9f3aa43 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -38,6 +38,7 @@
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/mmu_context.h>
+#include <asm/exception_trampoline.h>
#include "setup.h"
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e379d3f..06b8562 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -38,7 +38,7 @@
#include <linux/hugetlb.h>
#include <asm/io.h>
-#include <asm/kdump.h>
+#include <asm/exception_trampoline.h>
#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
--
1.8.1.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 3/4] powerpc: s/kdump/exception/ for the exception trampoline functions
2013-06-19 9:20 [PATCH 0/4] powerpc: enable relocatable support for 6xx Kevin Hao
2013-06-19 9:20 ` [PATCH 1/4] " Kevin Hao
2013-06-19 9:20 ` [PATCH 2/4] powerpc: move the exception trampoline helper functions to a separate file Kevin Hao
@ 2013-06-19 9:20 ` Kevin Hao
2013-06-19 9:20 ` [PATCH 4/4] powerpc: make the kernel bootable from non 0 address for 6xx Kevin Hao
3 siblings, 0 replies; 5+ messages in thread
From: Kevin Hao @ 2013-06-19 9:20 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc
These functions are not just kdump specific. Replace the 'kdump' with
the 'exception' to make them more general.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
---
arch/powerpc/include/asm/exception_trampoline.h | 20 ++++++++++----------
arch/powerpc/kernel/exception_trampoline.c | 14 +++++++-------
arch/powerpc/kernel/prom.c | 2 +-
arch/powerpc/kernel/setup_32.c | 2 +-
arch/powerpc/kernel/setup_64.c | 2 +-
5 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/arch/powerpc/include/asm/exception_trampoline.h b/arch/powerpc/include/asm/exception_trampoline.h
index 707ad6c..88281c9 100644
--- a/arch/powerpc/include/asm/exception_trampoline.h
+++ b/arch/powerpc/include/asm/exception_trampoline.h
@@ -1,10 +1,10 @@
#ifndef _EXCEPTION_TRAMPOLINE_H
#define _EXCEPTION_TRAMPOLINE_H
-/* How many bytes to reserve at zero for kdump. The reserve limit should
+/* How many bytes to reserve at zero for exception. The reserve limit should
* be greater or equal to the trampoline's end address.
* Reserve to the end of the FWNMI area, see head_64.S */
-#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */
+#define EXCEPTION_RESERVE_LIMIT 0x10000 /* 64K */
/*
* On PPC64 translation is disabled during trampoline setup, so we use
@@ -13,22 +13,22 @@
* branches, so we can just add the PAGE_OFFSET and don't worry about it.
*/
#ifdef __powerpc64__
-#define KDUMP_TRAMPOLINE_START 0x0100
-#define KDUMP_TRAMPOLINE_END 0x3000
+#define EXCEPTION_TRAMPOLINE_START 0x0100
+#define EXCEPTION_TRAMPOLINE_END 0x3000
#else
-#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
-#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
+#define EXCEPTION_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
+#define EXCEPTION_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
#endif /* __powerpc64__ */
#ifndef __ASSEMBLY__
#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
-extern void reserve_kdump_trampoline(void);
-extern void setup_kdump_trampoline(void);
+extern void reserve_exception_trampoline(void);
+extern void setup_exception_trampoline(void);
#else
/* !CRASH_DUMP || !NONSTATIC_KERNEL */
-static inline void reserve_kdump_trampoline(void) { ; }
-static inline void setup_kdump_trampoline(void) { ; }
+static inline void reserve_exception_trampoline(void) { ; }
+static inline void setup_exception_trampoline(void) { ; }
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/exception_trampoline.c b/arch/powerpc/kernel/exception_trampoline.c
index 71f4b72..b725116 100644
--- a/arch/powerpc/kernel/exception_trampoline.c
+++ b/arch/powerpc/kernel/exception_trampoline.c
@@ -15,7 +15,7 @@
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/code-patching.h>
-#include <asm/kdump.h>
+#include <asm/exception_trampoline.h>
#include <asm/prom.h>
#include <asm/firmware.h>
#include <asm/uaccess.h>
@@ -29,9 +29,9 @@
#endif
#ifndef CONFIG_NONSTATIC_KERNEL
-void __init reserve_kdump_trampoline(void)
+void __init reserve_exception_trampoline(void)
{
- memblock_reserve(0, KDUMP_RESERVE_LIMIT);
+ memblock_reserve(0, EXCEPTION_RESERVE_LIMIT);
}
static void __init create_trampoline(unsigned long addr)
@@ -50,13 +50,13 @@ static void __init create_trampoline(unsigned long addr)
patch_branch(++p, addr + PHYSICAL_START, 0);
}
-void __init setup_kdump_trampoline(void)
+void __init setup_exception_trampoline(void)
{
unsigned long i;
- DBG(" -> setup_kdump_trampoline()\n");
+ DBG(" -> setup_exception_trampoline()\n");
- for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
+ for (i = EXCEPTION_TRAMPOLINE_START; i < EXCEPTION_TRAMPOLINE_END; i += 8) {
create_trampoline(i);
}
@@ -65,6 +65,6 @@ void __init setup_kdump_trampoline(void)
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
#endif /* CONFIG_PPC_PSERIES */
- DBG(" <- setup_kdump_trampoline()\n");
+ DBG(" <- setup_exception_trampoline()\n");
}
#endif /* CONFIG_NONSTATIC_KERNEL */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 4a13ac5..57214ee 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -669,7 +669,7 @@ void __init early_init_devtree(void *params)
/* If relocatable, reserve first 32k for interrupt vectors etc. */
if (PHYSICAL_START > MEMORY_START)
memblock_reserve(MEMORY_START, 0x8000);
- reserve_kdump_trampoline();
+ reserve_exception_trampoline();
#ifdef CONFIG_FA_DUMP
/*
* If we fail to reserve memory for firmware-assisted dump then
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9f3aa43..e040bb2 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -133,7 +133,7 @@ notrace void __init machine_init(u64 dt_ptr)
probe_machine();
- setup_kdump_trampoline();
+ setup_exception_trampoline();
#ifdef CONFIG_6xx
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 06b8562..b077847 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -222,7 +222,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Probe the machine type */
probe_machine();
- setup_kdump_trampoline();
+ setup_exception_trampoline();
DBG("Found, Initializing memory management...\n");
--
1.8.1.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 4/4] powerpc: make the kernel bootable from non 0 address for 6xx
2013-06-19 9:20 [PATCH 0/4] powerpc: enable relocatable support for 6xx Kevin Hao
` (2 preceding siblings ...)
2013-06-19 9:20 ` [PATCH 3/4] powerpc: s/kdump/exception/ for the exception trampoline functions Kevin Hao
@ 2013-06-19 9:20 ` Kevin Hao
3 siblings, 0 replies; 5+ messages in thread
From: Kevin Hao @ 2013-06-19 9:20 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc
Add the support to boot the kernel from a non 0 address for 6xx.
Setup the exception trampoline if the physical start address is
not 0.
For a kdump kernel, enable the relocatable support implicitly.
Since the memstart_adddr of the kdump is not 0, we definitely
should regard this when setting up the BAT map.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
---
arch/powerpc/Kconfig | 2 +-
arch/powerpc/include/asm/exception_trampoline.h | 4 ++--
arch/powerpc/kernel/Makefile | 3 ++-
arch/powerpc/kernel/exception_trampoline.c | 18 +++++++++++++++---
arch/powerpc/mm/ppc_mmu_32.c | 7 +------
5 files changed, 21 insertions(+), 13 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8fe2792..6e03028 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -382,7 +382,7 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
- select RELOCATABLE if PPC64 || 44x
+ select RELOCATABLE if PPC64 || 44x || 6xx
select DYNAMIC_MEMSTART if FSL_BOOKE
help
Build a kernel suitable for use as a kdump capture kernel.
diff --git a/arch/powerpc/include/asm/exception_trampoline.h b/arch/powerpc/include/asm/exception_trampoline.h
index 88281c9..df4af6a 100644
--- a/arch/powerpc/include/asm/exception_trampoline.h
+++ b/arch/powerpc/include/asm/exception_trampoline.h
@@ -22,11 +22,11 @@
#ifndef __ASSEMBLY__
-#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_6xx)
extern void reserve_exception_trampoline(void);
extern void setup_exception_trampoline(void);
#else
-/* !CRASH_DUMP || !NONSTATIC_KERNEL */
+/* !CONFIG_RELOCATABLE || !CONFIG_6xx */
static inline void reserve_exception_trampoline(void) { ; }
static inline void setup_exception_trampoline(void) { ; }
#endif
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index c73a0e3..c722156 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -59,7 +59,8 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o exception_trampoline.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_RELOCATABLE) += exception_trampoline.o
obj-$(CONFIG_FA_DUMP) += fadump.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_E500) += idle_e500.o
diff --git a/arch/powerpc/kernel/exception_trampoline.c b/arch/powerpc/kernel/exception_trampoline.c
index b725116..68aed9e 100644
--- a/arch/powerpc/kernel/exception_trampoline.c
+++ b/arch/powerpc/kernel/exception_trampoline.c
@@ -28,10 +28,16 @@
#define DBG(fmt...)
#endif
-#ifndef CONFIG_NONSTATIC_KERNEL
+#ifdef CONFIG_6xx
void __init reserve_exception_trampoline(void)
{
- memblock_reserve(0, EXCEPTION_RESERVE_LIMIT);
+ /*
+ * We don't need to reserve this memory region for a kdump kernel
+ * since this is not included in the memory regions of kdump kernel.
+ */
+
+ if (!memstart_addr && PHYSICAL_START)
+ memblock_reserve(0, EXCEPTION_RESERVE_LIMIT);
}
static void __init create_trampoline(unsigned long addr)
@@ -54,6 +60,12 @@ void __init setup_exception_trampoline(void)
{
unsigned long i;
+ if (!PHYSICAL_START)
+ return;
+
+ if (PHYSICAL_START > 0x2000000)
+ panic("Don't support to load a kernel above 32M address");
+
DBG(" -> setup_exception_trampoline()\n");
for (i = EXCEPTION_TRAMPOLINE_START; i < EXCEPTION_TRAMPOLINE_END; i += 8) {
@@ -67,4 +79,4 @@ void __init setup_exception_trampoline(void)
DBG(" <- setup_exception_trampoline()\n");
}
-#endif /* CONFIG_NONSTATIC_KERNEL */
+#endif /* CONFIG_6xx */
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 11571e1..99ce477 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -86,7 +86,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
/* Make sure we don't map a block larger than the
smallest alignment of the physical address. */
- tot = top;
+ tot = top + memstart_addr;
for (bl = 128<<10; bl < max_size; bl <<= 1) {
if (bl * 2 > tot)
break;
@@ -275,11 +275,6 @@ void __init MMU_init_hw(void)
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- /* We don't currently support the first MEMBLOCK not mapping 0
- * physical on those processors
- */
- BUG_ON(first_memblock_base != 0);
-
/* 601 can only access 16MB at the moment */
if (PVR_VER(mfspr(SPRN_PVR)) == 1)
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000));
--
1.8.1.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2013-06-19 9:21 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-06-19 9:20 [PATCH 0/4] powerpc: enable relocatable support for 6xx Kevin Hao
2013-06-19 9:20 ` [PATCH 1/4] " Kevin Hao
2013-06-19 9:20 ` [PATCH 2/4] powerpc: move the exception trampoline helper functions to a separate file Kevin Hao
2013-06-19 9:20 ` [PATCH 3/4] powerpc: s/kdump/exception/ for the exception trampoline functions Kevin Hao
2013-06-19 9:20 ` [PATCH 4/4] powerpc: make the kernel bootable from non 0 address for 6xx Kevin Hao
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.