All of lore.kernel.org
 help / color / mirror / Atom feed
* [v2][PATCH 0/7] powerpc/book3e: support kexec and kdump
@ 2013-06-20  7:53 ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linux-kernel, linuxppc-dev

This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

v2:
* rebase on merge branch as Ben mention now.

v1:
* improve some patch head
* rebase on next branch with patch 7

--------------------------------------------------------------------------------
Tiejun Chen (7):
      powerpc/book3e: support CONFIG_RELOCATABLE
      book3e/kexec/kdump: enable kexec for kernel
      book3e/kexec/kdump: create a 1:1 TLB mapping
      book3e/kexec/kdump: introduce a kexec kernel flag
      book3e/kexec/kdump: implement ppc64 kexec specfic
      book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET
      book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

 arch/powerpc/Kconfig                     |    2 +-
 arch/powerpc/include/asm/exception-64e.h |    8 ++++
 arch/powerpc/include/asm/page.h          |    2 +
 arch/powerpc/include/asm/smp.h           |    3 ++
 arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++-
 arch/powerpc/kernel/head_64.S            |   47 +++++++++++++++++++--
 arch/powerpc/kernel/machine_kexec_64.c   |    6 +++
 arch/powerpc/kernel/misc_64.S            |   67 +++++++++++++++++++++++++++++-
 arch/powerpc/lib/feature-fixups.c        |    7 ++++
 arch/powerpc/platforms/85xx/smp.c        |   27 ++++++++++++
 10 files changed, 178 insertions(+), 6 deletions(-)

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v2][PATCH 0/7] powerpc/book3e: support kexec and kdump
@ 2013-06-20  7:53 ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linuxppc-dev, linux-kernel

This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

v2:
* rebase on merge branch as Ben mention now.

v1:
* improve some patch head
* rebase on next branch with patch 7

--------------------------------------------------------------------------------
Tiejun Chen (7):
      powerpc/book3e: support CONFIG_RELOCATABLE
      book3e/kexec/kdump: enable kexec for kernel
      book3e/kexec/kdump: create a 1:1 TLB mapping
      book3e/kexec/kdump: introduce a kexec kernel flag
      book3e/kexec/kdump: implement ppc64 kexec specfic
      book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET
      book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

 arch/powerpc/Kconfig                     |    2 +-
 arch/powerpc/include/asm/exception-64e.h |    8 ++++
 arch/powerpc/include/asm/page.h          |    2 +
 arch/powerpc/include/asm/smp.h           |    3 ++
 arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++-
 arch/powerpc/kernel/head_64.S            |   47 +++++++++++++++++++--
 arch/powerpc/kernel/machine_kexec_64.c   |    6 +++
 arch/powerpc/kernel/misc_64.S            |   67 +++++++++++++++++++++++++++++-
 arch/powerpc/lib/feature-fixups.c        |    7 ++++
 arch/powerpc/platforms/85xx/smp.c        |   27 ++++++++++++
 10 files changed, 178 insertions(+), 6 deletions(-)

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
  2013-06-20  7:53 ` Tiejun Chen
@ 2013-06-20  7:53   ` Tiejun Chen
  -1 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linux-kernel, linuxppc-dev

book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/include/asm/exception-64e.h |    8 ++++++++
 arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++++++++++-
 arch/powerpc/kernel/head_64.S            |   22 ++++++++++++++++++++++
 arch/powerpc/lib/feature-fixups.c        |    7 +++++++
 4 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..89e940d 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,18 @@ exc_##label##_book3e:
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED
 #endif
 
+#ifndef CONFIG_RELOCATABLE
 #define SET_IVOR(vector_number, vector_offset)	\
 	li	r3,vector_offset@l; 		\
 	ori	r3,r3,interrupt_base_book3e@l;	\
 	mtspr	SPRN_IVOR##vector_number,r3;
+#else
+#define SET_IVOR(vector_number, vector_offset)	\
+	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+	rlwinm	r3,r3,0,15,0;			\
+	ori	r3,r3,vector_offset@l;		\
+	mtspr	SPRN_IVOR##vector_number,r3;
+#endif
 
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 645170a..4b23119 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1097,7 +1097,15 @@ skpinv:	addi	r6,r6,1				/* Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
 	/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+	/* We have to find out address from lr. */
+	bl	1f		/* Find our address */
+1:	mflr	r6
+	addi	r6,r6,(2f - 1b)
+	tovirt(r6,r6)
+#else
 	LOAD_REG_IMMEDIATE(r6,2f)
+#endif
 	lis	r7,MSR_KERNEL@h
 	ori	r7,r7,MSR_KERNEL@l
 	mtspr	SPRN_SRR0,r6
@@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
 	mflr	r28
 	b	3b
 
-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
 	/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+	tovirt(r2,r2)
+	LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
 	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
 	mtspr	SPRN_IVPR,r3
 	sync
 	blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b61363d..0942f3a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
 	/* process relocations for the final address of the kernel */
 	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
 	sldi	r25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+	tovirt(r26,r26)			/* on booke, we already run at PAGE_OFFSET */
+#endif
 	lwz	r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+	tophys(r26,r26)			/* Restore for the remains. */
+#endif
 	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
 	bne	1f
 	add	r25,r25,r26
 1:	mr	r3,r25
 	bl	.relocate
+#if defined(CONFIG_PPC_BOOK3E)
+	/* We should set ivpr again after .relocate. */
+	bl	.init_core_book3e
+#endif
 #endif
 
 /*
@@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
+#if defined(CONFIG_PPC_BOOK3E)
+	tovirt(r26,r26)			/* on booke, we already run at PAGE_OFFSET */
+#endif
 	lwz	r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+	tophys(r26,r26)			/* Restore for the remains. */
+#endif
 	cmplwi	cr0,r7,1
 	bne	3f
 
+#ifdef CONFIG_PPC_BOOK3E
+	LOAD_REG_ADDR(r5, interrupt_end_book3e)
+	LOAD_REG_ADDR(r11, _stext)
+	sub	r5,r5,r11
+#else
 	/* just copy interrupts */
 	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+#endif
 	b	5f
 3:
 #endif
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 7a8a748..13f20ed 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -135,13 +135,20 @@ void do_final_fixups(void)
 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
 	int *src, *dest;
 	unsigned long length;
+#ifdef CONFIG_PPC_BOOK3E
+	extern char interrupt_end_book3e[];
+#endif
 
 	if (PHYSICAL_START == 0)
 		return;
 
 	src = (int *)(KERNELBASE + PHYSICAL_START);
 	dest = (int *)KERNELBASE;
+#ifdef CONFIG_PPC_BOOK3E
+	length = (interrupt_end_book3e - _stext) / sizeof(int);
+#else
 	length = (__end_interrupts - _stext) / sizeof(int);
+#endif
 
 	while (length--) {
 		patch_instruction(dest, *src);
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
@ 2013-06-20  7:53   ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linuxppc-dev, linux-kernel

book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/include/asm/exception-64e.h |    8 ++++++++
 arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++++++++++-
 arch/powerpc/kernel/head_64.S            |   22 ++++++++++++++++++++++
 arch/powerpc/lib/feature-fixups.c        |    7 +++++++
 4 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..89e940d 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,18 @@ exc_##label##_book3e:
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED
 #endif
 
+#ifndef CONFIG_RELOCATABLE
 #define SET_IVOR(vector_number, vector_offset)	\
 	li	r3,vector_offset@l; 		\
 	ori	r3,r3,interrupt_base_book3e@l;	\
 	mtspr	SPRN_IVOR##vector_number,r3;
+#else
+#define SET_IVOR(vector_number, vector_offset)	\
+	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+	rlwinm	r3,r3,0,15,0;			\
+	ori	r3,r3,vector_offset@l;		\
+	mtspr	SPRN_IVOR##vector_number,r3;
+#endif
 
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 645170a..4b23119 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1097,7 +1097,15 @@ skpinv:	addi	r6,r6,1				/* Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
 	/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+	/* We have to find out address from lr. */
+	bl	1f		/* Find our address */
+1:	mflr	r6
+	addi	r6,r6,(2f - 1b)
+	tovirt(r6,r6)
+#else
 	LOAD_REG_IMMEDIATE(r6,2f)
+#endif
 	lis	r7,MSR_KERNEL@h
 	ori	r7,r7,MSR_KERNEL@l
 	mtspr	SPRN_SRR0,r6
@@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
 	mflr	r28
 	b	3b
 
-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
 	/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+	tovirt(r2,r2)
+	LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
 	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
 	mtspr	SPRN_IVPR,r3
 	sync
 	blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b61363d..0942f3a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
 	/* process relocations for the final address of the kernel */
 	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
 	sldi	r25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+	tovirt(r26,r26)			/* on booke, we already run at PAGE_OFFSET */
+#endif
 	lwz	r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+	tophys(r26,r26)			/* Restore for the remains. */
+#endif
 	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
 	bne	1f
 	add	r25,r25,r26
 1:	mr	r3,r25
 	bl	.relocate
+#if defined(CONFIG_PPC_BOOK3E)
+	/* We should set ivpr again after .relocate. */
+	bl	.init_core_book3e
+#endif
 #endif
 
 /*
@@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
+#if defined(CONFIG_PPC_BOOK3E)
+	tovirt(r26,r26)			/* on booke, we already run at PAGE_OFFSET */
+#endif
 	lwz	r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+	tophys(r26,r26)			/* Restore for the remains. */
+#endif
 	cmplwi	cr0,r7,1
 	bne	3f
 
+#ifdef CONFIG_PPC_BOOK3E
+	LOAD_REG_ADDR(r5, interrupt_end_book3e)
+	LOAD_REG_ADDR(r11, _stext)
+	sub	r5,r5,r11
+#else
 	/* just copy interrupts */
 	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+#endif
 	b	5f
 3:
 #endif
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 7a8a748..13f20ed 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -135,13 +135,20 @@ void do_final_fixups(void)
 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
 	int *src, *dest;
 	unsigned long length;
+#ifdef CONFIG_PPC_BOOK3E
+	extern char interrupt_end_book3e[];
+#endif
 
 	if (PHYSICAL_START == 0)
 		return;
 
 	src = (int *)(KERNELBASE + PHYSICAL_START);
 	dest = (int *)KERNELBASE;
+#ifdef CONFIG_PPC_BOOK3E
+	length = (interrupt_end_book3e - _stext) / sizeof(int);
+#else
 	length = (__end_interrupts - _stext) / sizeof(int);
+#endif
 
 	while (length--) {
 		patch_instruction(dest, *src);
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
  2013-06-20  7:53 ` Tiejun Chen
@ 2013-06-20  7:53   ` Tiejun Chen
  -1 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linux-kernel, linuxppc-dev

We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/Kconfig                   |    2 +-
 arch/powerpc/kernel/machine_kexec_64.c |    6 ++++++
 arch/powerpc/kernel/misc_64.S          |    6 ++++++
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c33e3ad..6ecf3c9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -364,7 +364,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
 	bool "kexec system call"
-	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
+	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 611acdf..ef39271 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -33,6 +33,7 @@
 int default_machine_kexec_prepare(struct kimage *image)
 {
 	int i;
+#ifndef CONFIG_PPC_BOOK3E
 	unsigned long begin, end;	/* limits of segment */
 	unsigned long low, high;	/* limits of blocked memory range */
 	struct device_node *node;
@@ -41,6 +42,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 
 	if (!ppc_md.hpte_clear_all)
 		return -ENOENT;
+#endif
 
 	/*
 	 * Since we use the kernel fault handlers and paging code to
@@ -51,6 +53,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 		if (image->segment[i].mem < __pa(_end))
 			return -ETXTBSY;
 
+#ifndef CONFIG_PPC_BOOK3E
 	/*
 	 * For non-LPAR, we absolutely can not overwrite the mmu hash
 	 * table, since we are still using the bolted entries in it to
@@ -92,6 +95,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 				return -ETXTBSY;
 		}
 	}
+#endif
 
 	return 0;
 }
@@ -367,6 +371,7 @@ void default_machine_kexec(struct kimage *image)
 	/* NOTREACHED */
 }
 
+#ifndef CONFIG_PPC_BOOK3E
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
 
@@ -411,3 +416,4 @@ static int __init export_htab_values(void)
 	return 0;
 }
 late_initcall(export_htab_values);
+#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 6820e45..f1a7ce7 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -543,9 +543,13 @@ _GLOBAL(kexec_sequence)
 	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
 
 	/* disable interrupts, we are overwriting kernel data next */
+#ifndef CONFIG_PPC_BOOK3E
 	mfmsr	r3
 	rlwinm	r3,r3,0,17,15
 	mtmsrd	r3,1
+#else
+	wrteei	0
+#endif
 
 	/* copy dest pages, flush whole dest image */
 	mr	r3,r29
@@ -567,10 +571,12 @@ _GLOBAL(kexec_sequence)
 	li	r6,1
 	stw	r6,kexec_flag-1b(5)
 
+#ifndef CONFIG_PPC_BOOK3E
 	/* clear out hardware hash page table and tlb */
 	ld	r5,0(r27)		/* deref function descriptor */
 	mtctr	r5
 	bctrl				/* ppc_md.hpte_clear_all(void); */
+#endif
 
 /*
  *   kexec image calling is:
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
@ 2013-06-20  7:53   ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linuxppc-dev, linux-kernel

We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/Kconfig                   |    2 +-
 arch/powerpc/kernel/machine_kexec_64.c |    6 ++++++
 arch/powerpc/kernel/misc_64.S          |    6 ++++++
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c33e3ad..6ecf3c9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -364,7 +364,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
 	bool "kexec system call"
-	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
+	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 611acdf..ef39271 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -33,6 +33,7 @@
 int default_machine_kexec_prepare(struct kimage *image)
 {
 	int i;
+#ifndef CONFIG_PPC_BOOK3E
 	unsigned long begin, end;	/* limits of segment */
 	unsigned long low, high;	/* limits of blocked memory range */
 	struct device_node *node;
@@ -41,6 +42,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 
 	if (!ppc_md.hpte_clear_all)
 		return -ENOENT;
+#endif
 
 	/*
 	 * Since we use the kernel fault handlers and paging code to
@@ -51,6 +53,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 		if (image->segment[i].mem < __pa(_end))
 			return -ETXTBSY;
 
+#ifndef CONFIG_PPC_BOOK3E
 	/*
 	 * For non-LPAR, we absolutely can not overwrite the mmu hash
 	 * table, since we are still using the bolted entries in it to
@@ -92,6 +95,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 				return -ETXTBSY;
 		}
 	}
+#endif
 
 	return 0;
 }
@@ -367,6 +371,7 @@ void default_machine_kexec(struct kimage *image)
 	/* NOTREACHED */
 }
 
+#ifndef CONFIG_PPC_BOOK3E
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
 
@@ -411,3 +416,4 @@ static int __init export_htab_values(void)
 	return 0;
 }
 late_initcall(export_htab_values);
+#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 6820e45..f1a7ce7 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -543,9 +543,13 @@ _GLOBAL(kexec_sequence)
 	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
 
 	/* disable interrupts, we are overwriting kernel data next */
+#ifndef CONFIG_PPC_BOOK3E
 	mfmsr	r3
 	rlwinm	r3,r3,0,17,15
 	mtmsrd	r3,1
+#else
+	wrteei	0
+#endif
 
 	/* copy dest pages, flush whole dest image */
 	mr	r3,r29
@@ -567,10 +571,12 @@ _GLOBAL(kexec_sequence)
 	li	r6,1
 	stw	r6,kexec_flag-1b(5)
 
+#ifndef CONFIG_PPC_BOOK3E
 	/* clear out hardware hash page table and tlb */
 	ld	r5,0(r27)		/* deref function descriptor */
 	mtctr	r5
 	bctrl				/* ppc_md.hpte_clear_all(void); */
+#endif
 
 /*
  *   kexec image calling is:
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 3/7] book3e/kexec/kdump: create a 1:1 TLB mapping
  2013-06-20  7:53 ` Tiejun Chen
@ 2013-06-20  7:53   ` Tiejun Chen
  -1 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linux-kernel, linuxppc-dev

book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/kernel/head_64.S |    9 ++++---
 arch/powerpc/kernel/misc_64.S |   55 ++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 0942f3a..3e19ba2 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -444,12 +444,12 @@ _STATIC(__after_prom_start)
 	tovirt(r3,r3)			/* on booke, we already run at PAGE_OFFSET */
 #endif
 	mr.	r4,r26			/* In some cases the loader may  */
+#if defined(CONFIG_PPC_BOOK3E)
+	tovirt(r4,r4)
+#endif
 	beq	9f			/* have already put us at zero */
 	li	r6,0x100		/* Start offset, the first 0x100 */
 					/* bytes were copied earlier.	 */
-#ifdef CONFIG_PPC_BOOK3E
-	tovirt(r6,r6)			/* on booke, we already run at PAGE_OFFSET */
-#endif
 
 #ifdef CONFIG_RELOCATABLE
 /*
@@ -492,6 +492,9 @@ _STATIC(__after_prom_start)
 p_end:	.llong	_end - _stext
 
 4:	/* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+	tovirt(r26,r26)
+#endif
 	addis	r5,r26,(p_end - _stext)@ha
 	ld	r5,(p_end - _stext)@l(r5)	/* get _end */
 5:	bl	.copy_and_flush		/* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index f1a7ce7..20cbb98 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -460,6 +460,49 @@ kexec_flag:
 
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include <asm/mmu.h>
+kexec_create_tlb:
+	/* Invalidate all TLBs to avoid any TLB conflict. */
+	PPC_TLBILX_ALL(0,R0)
+	sync
+	isync
+
+	mfspr	r10,SPRN_TLB1CFG
+	andi.	r10,r10,TLBnCFG_N_ENTRY	/* Extract # entries */
+	subi	r10,r10,1		/* Often its always safe to use last */
+	lis	r9,MAS0_TLBSEL(1)@h
+	rlwimi	r9,r10,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP	MAS2_M
+#else
+#define M_IF_SMP	0
+#endif
+	mtspr	SPRN_MAS0,r9
+
+	lis	r9,(MAS1_VALID|MAS1_IPROT)@h
+	ori	r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+	mtspr	SPRN_MAS1,r9
+
+	LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+	mtspr	SPRN_MAS2,r9
+
+	LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+	mtspr	SPRN_MAS3,r9
+	li	r9,0
+	mtspr	SPRN_MAS7,r9
+
+	tlbwe
+	isync
+	blr
+#endif
 
 /* kexec_smp_wait(void)
  *
@@ -473,6 +516,10 @@ kexec_flag:
  */
 _GLOBAL(kexec_smp_wait)
 	lhz	r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+	/* Create a 1:1 mapping. */
+	bl	kexec_create_tlb
+#endif
 	bl	real_mode
 
 	li	r4,KEXEC_STATE_REAL_MODE
@@ -489,6 +536,7 @@ _GLOBAL(kexec_smp_wait)
  * don't overwrite r3 here, it is live for kexec_wait above.
  */
 real_mode:	/* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
 1:	li	r9,MSR_RI
 	li	r10,MSR_DR|MSR_IR
 	mflr	r11		/* return address to SRR0 */
@@ -500,7 +548,10 @@ real_mode:	/* assume normal blr return */
 	mtspr	SPRN_SRR1,r10
 	mtspr	SPRN_SRR0,r11
 	rfid
-
+#else
+	/* the real mode is nothing for book3e. */
+	blr
+#endif
 
 /*
  * kexec_sequence(newstack, start, image, control, clear_all())
@@ -549,6 +600,8 @@ _GLOBAL(kexec_sequence)
 	mtmsrd	r3,1
 #else
 	wrteei	0
+	/* Create a 1:1 mapping. */
+	bl	kexec_create_tlb
 #endif
 
 	/* copy dest pages, flush whole dest image */
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 3/7] book3e/kexec/kdump: create a 1:1 TLB mapping
@ 2013-06-20  7:53   ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linuxppc-dev, linux-kernel

book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/kernel/head_64.S |    9 ++++---
 arch/powerpc/kernel/misc_64.S |   55 ++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 0942f3a..3e19ba2 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -444,12 +444,12 @@ _STATIC(__after_prom_start)
 	tovirt(r3,r3)			/* on booke, we already run at PAGE_OFFSET */
 #endif
 	mr.	r4,r26			/* In some cases the loader may  */
+#if defined(CONFIG_PPC_BOOK3E)
+	tovirt(r4,r4)
+#endif
 	beq	9f			/* have already put us at zero */
 	li	r6,0x100		/* Start offset, the first 0x100 */
 					/* bytes were copied earlier.	 */
-#ifdef CONFIG_PPC_BOOK3E
-	tovirt(r6,r6)			/* on booke, we already run at PAGE_OFFSET */
-#endif
 
 #ifdef CONFIG_RELOCATABLE
 /*
@@ -492,6 +492,9 @@ _STATIC(__after_prom_start)
 p_end:	.llong	_end - _stext
 
 4:	/* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+	tovirt(r26,r26)
+#endif
 	addis	r5,r26,(p_end - _stext)@ha
 	ld	r5,(p_end - _stext)@l(r5)	/* get _end */
 5:	bl	.copy_and_flush		/* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index f1a7ce7..20cbb98 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -460,6 +460,49 @@ kexec_flag:
 
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include <asm/mmu.h>
+kexec_create_tlb:
+	/* Invalidate all TLBs to avoid any TLB conflict. */
+	PPC_TLBILX_ALL(0,R0)
+	sync
+	isync
+
+	mfspr	r10,SPRN_TLB1CFG
+	andi.	r10,r10,TLBnCFG_N_ENTRY	/* Extract # entries */
+	subi	r10,r10,1		/* Often its always safe to use last */
+	lis	r9,MAS0_TLBSEL(1)@h
+	rlwimi	r9,r10,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP	MAS2_M
+#else
+#define M_IF_SMP	0
+#endif
+	mtspr	SPRN_MAS0,r9
+
+	lis	r9,(MAS1_VALID|MAS1_IPROT)@h
+	ori	r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+	mtspr	SPRN_MAS1,r9
+
+	LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+	mtspr	SPRN_MAS2,r9
+
+	LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+	mtspr	SPRN_MAS3,r9
+	li	r9,0
+	mtspr	SPRN_MAS7,r9
+
+	tlbwe
+	isync
+	blr
+#endif
 
 /* kexec_smp_wait(void)
  *
@@ -473,6 +516,10 @@ kexec_flag:
  */
 _GLOBAL(kexec_smp_wait)
 	lhz	r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+	/* Create a 1:1 mapping. */
+	bl	kexec_create_tlb
+#endif
 	bl	real_mode
 
 	li	r4,KEXEC_STATE_REAL_MODE
@@ -489,6 +536,7 @@ _GLOBAL(kexec_smp_wait)
  * don't overwrite r3 here, it is live for kexec_wait above.
  */
 real_mode:	/* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
 1:	li	r9,MSR_RI
 	li	r10,MSR_DR|MSR_IR
 	mflr	r11		/* return address to SRR0 */
@@ -500,7 +548,10 @@ real_mode:	/* assume normal blr return */
 	mtspr	SPRN_SRR1,r10
 	mtspr	SPRN_SRR0,r11
 	rfid
-
+#else
+	/* the real mode is nothing for book3e. */
+	blr
+#endif
 
 /*
  * kexec_sequence(newstack, start, image, control, clear_all())
@@ -549,6 +600,8 @@ _GLOBAL(kexec_sequence)
 	mtmsrd	r3,1
 #else
 	wrteei	0
+	/* Create a 1:1 mapping. */
+	bl	kexec_create_tlb
 #endif
 
 	/* copy dest pages, flush whole dest image */
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
  2013-06-20  7:53 ` Tiejun Chen
@ 2013-06-20  7:53   ` Tiejun Chen
  -1 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linux-kernel, linuxppc-dev

We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/include/asm/smp.h    |    3 +++
 arch/powerpc/kernel/head_64.S     |   12 ++++++++++++
 arch/powerpc/kernel/misc_64.S     |    6 ++++++
 arch/powerpc/platforms/85xx/smp.c |   14 ++++++++++++++
 4 files changed, 35 insertions(+)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabe..fbc3d9b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -200,6 +200,9 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+extern unsigned long __run_at_kexec;
+#endif
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3e19ba2..ffa4b18 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,12 @@ __secondary_hold_spinloop:
 __secondary_hold_acknowledge:
 	.llong	0x0
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	.globl	__run_at_kexec
+__run_at_kexec:
+	.llong	0x0	/* Flag for the secondary kernel from kexec. */
+#endif
+
 #ifdef CONFIG_RELOCATABLE
 	/* This flag is set to 1 by a loader if the kernel should run
 	 * at the loaded address instead of the linked address.  This
@@ -417,6 +423,12 @@ _STATIC(__after_prom_start)
 #if defined(CONFIG_PPC_BOOK3E)
 	tovirt(r26,r26)			/* on booke, we already run at PAGE_OFFSET */
 #endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	/* If relocated we need to restore this flag on that relocated address. */
+	ld	r7,__run_at_kexec-_stext(r26)
+	std	r7,__run_at_kexec-_stext(r26)
+#endif
+
 	lwz	r7,__run_at_load-_stext(r26)
 #if defined(CONFIG_PPC_BOOK3E)
 	tophys(r26,r26)			/* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 20cbb98..c89aead 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -619,6 +619,12 @@ _GLOBAL(kexec_sequence)
 	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
 1:	/* assume normal blr return */
 
+	/* notify we're going into kexec kernel for SMP. */
+	LOAD_REG_ADDR(r3,__run_at_kexec)
+	li	r4,1
+	std	r4,0(r3)
+	sync
+
 	/* release other cpus to the new kernel secondary start at 0x60 */
 	mflr	r5
 	li	r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 6a17599..b308373 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -150,6 +150,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
 	int hw_cpu = get_hard_smp_processor_id(nr);
 	int ioremappable;
 	int ret = 0;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	unsigned long *ptr;
+#endif
 
 	WARN_ON(nr < 0 || nr >= NR_CPUS);
 	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -238,11 +241,22 @@ out:
 #else
 	smp_generic_kick_cpu(nr);
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
+	/* We shouldn't access spin_table from the bootloader to up any
+	 * secondary cpu for kexec kernel, and kexec kernel already
+	 * know how to jump to generic_secondary_smp_init.
+	 */
+	if (!*ptr) {
+#endif
 	flush_spin_table(spin_table);
 	out_be32(&spin_table->pir, hw_cpu);
 	out_be64((u64 *)(&spin_table->addr_h),
 	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
 	flush_spin_table(spin_table);
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	}
+#endif
 #endif
 
 	local_irq_restore(flags);
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
@ 2013-06-20  7:53   ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linuxppc-dev, linux-kernel

We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/include/asm/smp.h    |    3 +++
 arch/powerpc/kernel/head_64.S     |   12 ++++++++++++
 arch/powerpc/kernel/misc_64.S     |    6 ++++++
 arch/powerpc/platforms/85xx/smp.c |   14 ++++++++++++++
 4 files changed, 35 insertions(+)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabe..fbc3d9b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -200,6 +200,9 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+extern unsigned long __run_at_kexec;
+#endif
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3e19ba2..ffa4b18 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,12 @@ __secondary_hold_spinloop:
 __secondary_hold_acknowledge:
 	.llong	0x0
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	.globl	__run_at_kexec
+__run_at_kexec:
+	.llong	0x0	/* Flag for the secondary kernel from kexec. */
+#endif
+
 #ifdef CONFIG_RELOCATABLE
 	/* This flag is set to 1 by a loader if the kernel should run
 	 * at the loaded address instead of the linked address.  This
@@ -417,6 +423,12 @@ _STATIC(__after_prom_start)
 #if defined(CONFIG_PPC_BOOK3E)
 	tovirt(r26,r26)			/* on booke, we already run at PAGE_OFFSET */
 #endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	/* If relocated we need to restore this flag on that relocated address. */
+	ld	r7,__run_at_kexec-_stext(r26)
+	std	r7,__run_at_kexec-_stext(r26)
+#endif
+
 	lwz	r7,__run_at_load-_stext(r26)
 #if defined(CONFIG_PPC_BOOK3E)
 	tophys(r26,r26)			/* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 20cbb98..c89aead 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -619,6 +619,12 @@ _GLOBAL(kexec_sequence)
 	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
 1:	/* assume normal blr return */
 
+	/* notify we're going into kexec kernel for SMP. */
+	LOAD_REG_ADDR(r3,__run_at_kexec)
+	li	r4,1
+	std	r4,0(r3)
+	sync
+
 	/* release other cpus to the new kernel secondary start at 0x60 */
 	mflr	r5
 	li	r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 6a17599..b308373 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -150,6 +150,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
 	int hw_cpu = get_hard_smp_processor_id(nr);
 	int ioremappable;
 	int ret = 0;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	unsigned long *ptr;
+#endif
 
 	WARN_ON(nr < 0 || nr >= NR_CPUS);
 	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -238,11 +241,22 @@ out:
 #else
 	smp_generic_kick_cpu(nr);
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
+	/* We shouldn't access spin_table from the bootloader to up any
+	 * secondary cpu for kexec kernel, and kexec kernel already
+	 * know how to jump to generic_secondary_smp_init.
+	 */
+	if (!*ptr) {
+#endif
 	flush_spin_table(spin_table);
 	out_be32(&spin_table->pir, hw_cpu);
 	out_be64((u64 *)(&spin_table->addr_h),
 	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
 	flush_spin_table(spin_table);
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+	}
+#endif
 #endif
 
 	local_irq_restore(flags);
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 5/7] book3e/kexec/kdump: implement ppc64 kexec specfic
  2013-06-20  7:53 ` Tiejun Chen
@ 2013-06-20  7:53   ` Tiejun Chen
  -1 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linux-kernel, linuxppc-dev

ppc64 kexec mechanism has a different implementation with ppc32.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/platforms/85xx/smp.c |   13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index b308373..18a5f8a 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -280,6 +280,7 @@ struct smp_ops_t smp_85xx_ops = {
 };
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -298,6 +299,14 @@ static void mpc85xx_smp_kexec_down(void *arg)
 	if (ppc_md.kexec_cpu_down)
 		ppc_md.kexec_cpu_down(0,1);
 }
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+	local_irq_disable();
+	hard_irq_disable();
+	mpic_teardown_this_cpu(secondary);
+}
+#endif
 
 static void map_and_flush(unsigned long paddr)
 {
@@ -349,11 +358,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
 
 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 {
+#ifdef CONFIG_PPC32
 	int timeout = INT_MAX;
 	int i, num_cpus = num_present_cpus();
+#endif
 
 	mpc85xx_smp_flush_dcache_kexec(image);
 
+#ifdef CONFIG_PPC32
 	if (image->type == KEXEC_TYPE_DEFAULT)
 		smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 
@@ -371,6 +383,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
 		if ( i == smp_processor_id() ) continue;
 		mpic_reset_core(i);
 	}
+#endif
 
 	default_machine_kexec(image);
 }
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 5/7] book3e/kexec/kdump: implement ppc64 kexec specfic
@ 2013-06-20  7:53   ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linuxppc-dev, linux-kernel

ppc64 kexec mechanism has a different implementation with ppc32.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/platforms/85xx/smp.c |   13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index b308373..18a5f8a 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -280,6 +280,7 @@ struct smp_ops_t smp_85xx_ops = {
 };
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -298,6 +299,14 @@ static void mpc85xx_smp_kexec_down(void *arg)
 	if (ppc_md.kexec_cpu_down)
 		ppc_md.kexec_cpu_down(0,1);
 }
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+	local_irq_disable();
+	hard_irq_disable();
+	mpic_teardown_this_cpu(secondary);
+}
+#endif
 
 static void map_and_flush(unsigned long paddr)
 {
@@ -349,11 +358,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
 
 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 {
+#ifdef CONFIG_PPC32
 	int timeout = INT_MAX;
 	int i, num_cpus = num_present_cpus();
+#endif
 
 	mpc85xx_smp_flush_dcache_kexec(image);
 
+#ifdef CONFIG_PPC32
 	if (image->type == KEXEC_TYPE_DEFAULT)
 		smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 
@@ -371,6 +383,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
 		if ( i == smp_processor_id() ) continue;
 		mpic_reset_core(i);
 	}
+#endif
 
 	default_machine_kexec(image);
 }
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 6/7] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET
  2013-06-20  7:53 ` Tiejun Chen
@ 2013-06-20  7:53   ` Tiejun Chen
  -1 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linux-kernel, linuxppc-dev

Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/include/asm/page.h |    2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812..5b00081 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
 /* See Description below for VIRT_PHYS_OFFSET */
 #ifdef CONFIG_RELOCATABLE_PPC32
 #define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
 #else
 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 #endif
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 6/7] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET
@ 2013-06-20  7:53   ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linuxppc-dev, linux-kernel

Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/include/asm/page.h |    2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812..5b00081 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
 /* See Description below for VIRT_PHYS_OFFSET */
 #ifdef CONFIG_RELOCATABLE_PPC32
 #define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
 #else
 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 #endif
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 7/7] book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB
  2013-06-20  7:53 ` Tiejun Chen
@ 2013-06-20  7:53   ` Tiejun Chen
  -1 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linux-kernel, linuxppc-dev

In commit 96f013f, "powerpc/kexec: Add kexec "hold" support for Book3e
processors", requires that GPR4 survive the "hold" process, for IBM Blue
Gene/Q with with some very strange firmware. But for FSL Book3E, r4 = 1
to indicate that the initial TLB entry for this core already exists so
we still should set r4 with 0 to create that initial TLB.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/kernel/head_64.S |    4 ++++
 1 file changed, 4 insertions(+)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ffa4b18..63ed1c3 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -129,6 +129,10 @@ __secondary_hold:
 	/* Grab our physical cpu number */
 	mr	r24,r3
 	/* stash r4 for book3e */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+	/* we need to setup initial TLB entry. */
+	li	r4,0
+#endif
 	mr	r25,r4
 
 	/* Tell the master cpu we're here */
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v2][PATCH 7/7] book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB
@ 2013-06-20  7:53   ` Tiejun Chen
  0 siblings, 0 replies; 32+ messages in thread
From: Tiejun Chen @ 2013-06-20  7:53 UTC (permalink / raw)
  To: benh; +Cc: linuxppc-dev, linux-kernel

In commit 96f013f, "powerpc/kexec: Add kexec "hold" support for Book3e
processors", requires that GPR4 survive the "hold" process, for IBM Blue
Gene/Q with with some very strange firmware. But for FSL Book3E, r4 = 1
to indicate that the initial TLB entry for this core already exists so
we still should set r4 with 0 to create that initial TLB.

Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
 arch/powerpc/kernel/head_64.S |    4 ++++
 1 file changed, 4 insertions(+)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ffa4b18..63ed1c3 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -129,6 +129,10 @@ __secondary_hold:
 	/* Grab our physical cpu number */
 	mr	r24,r3
 	/* stash r4 for book3e */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+	/* we need to setup initial TLB entry. */
+	li	r4,0
+#endif
 	mr	r25,r4
 
 	/* Tell the master cpu we're here */
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* RE: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
  2013-06-20  7:53   ` Tiejun Chen
@ 2013-07-02  5:00     ` Bhushan Bharat-R65777
  -1 siblings, 0 replies; 32+ messages in thread
From: Bhushan Bharat-R65777 @ 2013-07-02  5:00 UTC (permalink / raw)
  To: Tiejun Chen, benh; +Cc: linuxppc-dev, linux-kernel



> -----Original Message-----
> From: Linuxppc-dev [mailto:linuxppc-dev-
> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
> Sent: Thursday, June 20, 2013 1:23 PM
> To: benh@kernel.crashing.org
> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
> Subject: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
> 
> book3e is different with book3s since 3s includes the exception
> vectors code in head_64.S as it relies on absolute addressing
> which is only possible within this compilation unit. So we have
> to get that label address with got.
> 
> And when boot a relocated kernel, we should reset ipvr properly again
> after .relocate.
> 
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/include/asm/exception-64e.h |    8 ++++++++
>  arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++++++++++-
>  arch/powerpc/kernel/head_64.S            |   22 ++++++++++++++++++++++
>  arch/powerpc/lib/feature-fixups.c        |    7 +++++++
>  4 files changed, 51 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/include/asm/exception-64e.h
> b/arch/powerpc/include/asm/exception-64e.h
> index 51fa43e..89e940d 100644
> --- a/arch/powerpc/include/asm/exception-64e.h
> +++ b/arch/powerpc/include/asm/exception-64e.h
> @@ -214,10 +214,18 @@ exc_##label##_book3e:
>  #define TLB_MISS_STATS_SAVE_INFO_BOLTED
>  #endif
> 
> +#ifndef CONFIG_RELOCATABLE
>  #define SET_IVOR(vector_number, vector_offset)	\
>  	li	r3,vector_offset@l; 		\
>  	ori	r3,r3,interrupt_base_book3e@l;	\
>  	mtspr	SPRN_IVOR##vector_number,r3;
> +#else
> +#define SET_IVOR(vector_number, vector_offset)	\
> +	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
> +	rlwinm	r3,r3,0,15,0;			\
> +	ori	r3,r3,vector_offset@l;		\
> +	mtspr	SPRN_IVOR##vector_number,r3;
> +#endif
> 
>  #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
> 
> diff --git a/arch/powerpc/kernel/exceptions-64e.S
> b/arch/powerpc/kernel/exceptions-64e.S
> index 645170a..4b23119 100644
> --- a/arch/powerpc/kernel/exceptions-64e.S
> +++ b/arch/powerpc/kernel/exceptions-64e.S
> @@ -1097,7 +1097,15 @@ skpinv:	addi	r6,r6,1				/*
> Increment */
>   * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
>   */
>  	/* Now we branch the new virtual address mapped by this entry */
> +#ifdef CONFIG_RELOCATABLE
> +	/* We have to find out address from lr. */
> +	bl	1f		/* Find our address */
> +1:	mflr	r6
> +	addi	r6,r6,(2f - 1b)
> +	tovirt(r6,r6)
> +#else
>  	LOAD_REG_IMMEDIATE(r6,2f)
> +#endif
>  	lis	r7,MSR_KERNEL@h
>  	ori	r7,r7,MSR_KERNEL@l
>  	mtspr	SPRN_SRR0,r6
> @@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
>  	mflr	r28
>  	b	3b
> 
> -_STATIC(init_core_book3e)
> +_GLOBAL(init_core_book3e)
>  	/* Establish the interrupt vector base */
> +#ifdef CONFIG_RELOCATABLE
> +	tovirt(r2,r2)
> +	LOAD_REG_ADDR(r3, interrupt_base_book3e)
> +#else
>  	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
> +#endif
>  	mtspr	SPRN_IVPR,r3
>  	sync
>  	blr
> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
> index b61363d..0942f3a 100644
> --- a/arch/powerpc/kernel/head_64.S
> +++ b/arch/powerpc/kernel/head_64.S
> @@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
>  	/* process relocations for the final address of the kernel */
>  	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
>  	sldi	r25,r25,32
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
> +#endif
>  	lwz	r7,__run_at_load-_stext(r26)
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tophys(r26,r26)			/* Restore for the remains. */
> +#endif
>  	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
>  	bne	1f
>  	add	r25,r25,r26
>  1:	mr	r3,r25
>  	bl	.relocate
> +#if defined(CONFIG_PPC_BOOK3E)
> +	/* We should set ivpr again after .relocate. */
> +	bl	.init_core_book3e
> +#endif
>  #endif
> 
>  /*
> @@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
>   * variable __run_at_load, if it is set the kernel is treated as relocatable
>   * kernel, otherwise it will be moved to PHYSICAL_START
>   */
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
> +#endif
>  	lwz	r7,__run_at_load-_stext(r26)
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tophys(r26,r26)			/* Restore for the remains. */
> +#endif
>  	cmplwi	cr0,r7,1
>  	bne	3f
> 
> +#ifdef CONFIG_PPC_BOOK3E
> +	LOAD_REG_ADDR(r5, interrupt_end_book3e)
> +	LOAD_REG_ADDR(r11, _stext)
> +	sub	r5,r5,r11
> +#else
>  	/* just copy interrupts */
>  	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
> +#endif
>  	b	5f
>  3:
>  #endif
> diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-
> fixups.c
> index 7a8a748..13f20ed 100644
> --- a/arch/powerpc/lib/feature-fixups.c
> +++ b/arch/powerpc/lib/feature-fixups.c
> @@ -135,13 +135,20 @@ void do_final_fixups(void)
>  #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
>  	int *src, *dest;
>  	unsigned long length;
> +#ifdef CONFIG_PPC_BOOK3E
> +	extern char interrupt_end_book3e[];
> +#endif

Cannot we do this in arch/powerpc/kernel/asm/sections.h

> 
>  	if (PHYSICAL_START == 0)
>  		return;
> 
>  	src = (int *)(KERNELBASE + PHYSICAL_START);
>  	dest = (int *)KERNELBASE;
> +#ifdef CONFIG_PPC_BOOK3E
> +	length = (interrupt_end_book3e - _stext) / sizeof(int);
> +#else
>  	length = (__end_interrupts - _stext) / sizeof(int);
> +#endif

can we keep same name in books and booke; __end_interrupts ? this way we can avoid such #ifdefs

-Bharat

> 
>  	while (length--) {
>  		patch_instruction(dest, *src);
> --
> 1.7.9.5
> 
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev



^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
@ 2013-07-02  5:00     ` Bhushan Bharat-R65777
  0 siblings, 0 replies; 32+ messages in thread
From: Bhushan Bharat-R65777 @ 2013-07-02  5:00 UTC (permalink / raw)
  To: Tiejun Chen, benh; +Cc: linuxppc-dev, linux-kernel



> -----Original Message-----
> From: Linuxppc-dev [mailto:linuxppc-dev-
> bounces+bharat.bhushan=3Dfreescale.com@lists.ozlabs.org] On Behalf Of Tie=
jun Chen
> Sent: Thursday, June 20, 2013 1:23 PM
> To: benh@kernel.crashing.org
> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
> Subject: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
>=20
> book3e is different with book3s since 3s includes the exception
> vectors code in head_64.S as it relies on absolute addressing
> which is only possible within this compilation unit. So we have
> to get that label address with got.
>=20
> And when boot a relocated kernel, we should reset ipvr properly again
> after .relocate.
>=20
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/include/asm/exception-64e.h |    8 ++++++++
>  arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++++++++++-
>  arch/powerpc/kernel/head_64.S            |   22 ++++++++++++++++++++++
>  arch/powerpc/lib/feature-fixups.c        |    7 +++++++
>  4 files changed, 51 insertions(+), 1 deletion(-)
>=20
> diff --git a/arch/powerpc/include/asm/exception-64e.h
> b/arch/powerpc/include/asm/exception-64e.h
> index 51fa43e..89e940d 100644
> --- a/arch/powerpc/include/asm/exception-64e.h
> +++ b/arch/powerpc/include/asm/exception-64e.h
> @@ -214,10 +214,18 @@ exc_##label##_book3e:
>  #define TLB_MISS_STATS_SAVE_INFO_BOLTED
>  #endif
>=20
> +#ifndef CONFIG_RELOCATABLE
>  #define SET_IVOR(vector_number, vector_offset)	\
>  	li	r3,vector_offset@l; 		\
>  	ori	r3,r3,interrupt_base_book3e@l;	\
>  	mtspr	SPRN_IVOR##vector_number,r3;
> +#else
> +#define SET_IVOR(vector_number, vector_offset)	\
> +	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
> +	rlwinm	r3,r3,0,15,0;			\
> +	ori	r3,r3,vector_offset@l;		\
> +	mtspr	SPRN_IVOR##vector_number,r3;
> +#endif
>=20
>  #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
>=20
> diff --git a/arch/powerpc/kernel/exceptions-64e.S
> b/arch/powerpc/kernel/exceptions-64e.S
> index 645170a..4b23119 100644
> --- a/arch/powerpc/kernel/exceptions-64e.S
> +++ b/arch/powerpc/kernel/exceptions-64e.S
> @@ -1097,7 +1097,15 @@ skpinv:	addi	r6,r6,1				/*
> Increment */
>   * r4 =3D MAS0 w/TLBSEL & ESEL for the temp mapping
>   */
>  	/* Now we branch the new virtual address mapped by this entry */
> +#ifdef CONFIG_RELOCATABLE
> +	/* We have to find out address from lr. */
> +	bl	1f		/* Find our address */
> +1:	mflr	r6
> +	addi	r6,r6,(2f - 1b)
> +	tovirt(r6,r6)
> +#else
>  	LOAD_REG_IMMEDIATE(r6,2f)
> +#endif
>  	lis	r7,MSR_KERNEL@h
>  	ori	r7,r7,MSR_KERNEL@l
>  	mtspr	SPRN_SRR0,r6
> @@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
>  	mflr	r28
>  	b	3b
>=20
> -_STATIC(init_core_book3e)
> +_GLOBAL(init_core_book3e)
>  	/* Establish the interrupt vector base */
> +#ifdef CONFIG_RELOCATABLE
> +	tovirt(r2,r2)
> +	LOAD_REG_ADDR(r3, interrupt_base_book3e)
> +#else
>  	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
> +#endif
>  	mtspr	SPRN_IVPR,r3
>  	sync
>  	blr
> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.=
S
> index b61363d..0942f3a 100644
> --- a/arch/powerpc/kernel/head_64.S
> +++ b/arch/powerpc/kernel/head_64.S
> @@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
>  	/* process relocations for the final address of the kernel */
>  	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
>  	sldi	r25,r25,32
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
> +#endif
>  	lwz	r7,__run_at_load-_stext(r26)
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tophys(r26,r26)			/* Restore for the remains. */
> +#endif
>  	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
>  	bne	1f
>  	add	r25,r25,r26
>  1:	mr	r3,r25
>  	bl	.relocate
> +#if defined(CONFIG_PPC_BOOK3E)
> +	/* We should set ivpr again after .relocate. */
> +	bl	.init_core_book3e
> +#endif
>  #endif
>=20
>  /*
> @@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
>   * variable __run_at_load, if it is set the kernel is treated as relocat=
able
>   * kernel, otherwise it will be moved to PHYSICAL_START
>   */
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
> +#endif
>  	lwz	r7,__run_at_load-_stext(r26)
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tophys(r26,r26)			/* Restore for the remains. */
> +#endif
>  	cmplwi	cr0,r7,1
>  	bne	3f
>=20
> +#ifdef CONFIG_PPC_BOOK3E
> +	LOAD_REG_ADDR(r5, interrupt_end_book3e)
> +	LOAD_REG_ADDR(r11, _stext)
> +	sub	r5,r5,r11
> +#else
>  	/* just copy interrupts */
>  	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
> +#endif
>  	b	5f
>  3:
>  #endif
> diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature=
-
> fixups.c
> index 7a8a748..13f20ed 100644
> --- a/arch/powerpc/lib/feature-fixups.c
> +++ b/arch/powerpc/lib/feature-fixups.c
> @@ -135,13 +135,20 @@ void do_final_fixups(void)
>  #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
>  	int *src, *dest;
>  	unsigned long length;
> +#ifdef CONFIG_PPC_BOOK3E
> +	extern char interrupt_end_book3e[];
> +#endif

Cannot we do this in arch/powerpc/kernel/asm/sections.h

>=20
>  	if (PHYSICAL_START =3D=3D 0)
>  		return;
>=20
>  	src =3D (int *)(KERNELBASE + PHYSICAL_START);
>  	dest =3D (int *)KERNELBASE;
> +#ifdef CONFIG_PPC_BOOK3E
> +	length =3D (interrupt_end_book3e - _stext) / sizeof(int);
> +#else
>  	length =3D (__end_interrupts - _stext) / sizeof(int);
> +#endif

can we keep same name in books and booke; __end_interrupts ? this way we ca=
n avoid such #ifdefs

-Bharat

>=20
>  	while (length--) {
>  		patch_instruction(dest, *src);
> --
> 1.7.9.5
>=20
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev

^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
  2013-06-20  7:53   ` Tiejun Chen
@ 2013-07-02  5:17     ` Bhushan Bharat-R65777
  -1 siblings, 0 replies; 32+ messages in thread
From: Bhushan Bharat-R65777 @ 2013-07-02  5:17 UTC (permalink / raw)
  To: Tiejun Chen, benh; +Cc: linuxppc-dev, linux-kernel



> -----Original Message-----
> From: Linuxppc-dev [mailto:linuxppc-dev-
> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
> Sent: Thursday, June 20, 2013 1:23 PM
> To: benh@kernel.crashing.org
> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
> Subject: [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
> 
> We need to active KEXEC for book3e and bypass or convert non-book3e stuff
> in kexec coverage.
> 
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/Kconfig                   |    2 +-
>  arch/powerpc/kernel/machine_kexec_64.c |    6 ++++++
>  arch/powerpc/kernel/misc_64.S          |    6 ++++++
>  3 files changed, 13 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index c33e3ad..6ecf3c9 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -364,7 +364,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
> 
>  config KEXEC
>  	bool "kexec system call"
> -	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
> +	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
>  	help
>  	  kexec is a system call that implements the ability to shutdown your
>  	  current kernel, and to start another kernel.  It is like a reboot
> diff --git a/arch/powerpc/kernel/machine_kexec_64.c
> b/arch/powerpc/kernel/machine_kexec_64.c
> index 611acdf..ef39271 100644
> --- a/arch/powerpc/kernel/machine_kexec_64.c
> +++ b/arch/powerpc/kernel/machine_kexec_64.c
> @@ -33,6 +33,7 @@
>  int default_machine_kexec_prepare(struct kimage *image)
>  {
>  	int i;
> +#ifndef CONFIG_PPC_BOOK3E
>  	unsigned long begin, end;	/* limits of segment */
>  	unsigned long low, high;	/* limits of blocked memory range */
>  	struct device_node *node;
> @@ -41,6 +42,7 @@ int default_machine_kexec_prepare(struct kimage *image)
> 
>  	if (!ppc_md.hpte_clear_all)
>  		return -ENOENT;
> +#endif

Do we really need this function for book3e? can we have a separate function rather than multiple confusing ifdef?

-Bharat

> 
>  	/*
>  	 * Since we use the kernel fault handlers and paging code to
> @@ -51,6 +53,7 @@ int default_machine_kexec_prepare(struct kimage *image)
>  		if (image->segment[i].mem < __pa(_end))
>  			return -ETXTBSY;
> 
> +#ifndef CONFIG_PPC_BOOK3E
>  	/*
>  	 * For non-LPAR, we absolutely can not overwrite the mmu hash
>  	 * table, since we are still using the bolted entries in it to
> @@ -92,6 +95,7 @@ int default_machine_kexec_prepare(struct kimage *image)
>  				return -ETXTBSY;
>  		}
>  	}
> +#endif
> 
>  	return 0;
>  }
> @@ -367,6 +371,7 @@ void default_machine_kexec(struct kimage *image)
>  	/* NOTREACHED */
>  }
> 
> +#ifndef CONFIG_PPC_BOOK3E
>  /* Values we need to export to the second kernel via the device tree. */
>  static unsigned long htab_base;
> 
> @@ -411,3 +416,4 @@ static int __init export_htab_values(void)
>  	return 0;
>  }
>  late_initcall(export_htab_values);
> +#endif
> diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
> index 6820e45..f1a7ce7 100644
> --- a/arch/powerpc/kernel/misc_64.S
> +++ b/arch/powerpc/kernel/misc_64.S
> @@ -543,9 +543,13 @@ _GLOBAL(kexec_sequence)
>  	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
> 
>  	/* disable interrupts, we are overwriting kernel data next */
> +#ifndef CONFIG_PPC_BOOK3E
>  	mfmsr	r3
>  	rlwinm	r3,r3,0,17,15
>  	mtmsrd	r3,1
> +#else
> +	wrteei	0
> +#endif
> 
>  	/* copy dest pages, flush whole dest image */
>  	mr	r3,r29
> @@ -567,10 +571,12 @@ _GLOBAL(kexec_sequence)
>  	li	r6,1
>  	stw	r6,kexec_flag-1b(5)
> 
> +#ifndef CONFIG_PPC_BOOK3E
>  	/* clear out hardware hash page table and tlb */
>  	ld	r5,0(r27)		/* deref function descriptor */
>  	mtctr	r5
>  	bctrl				/* ppc_md.hpte_clear_all(void); */
> +#endif
> 
>  /*
>   *   kexec image calling is:
> --
> 1.7.9.5
> 
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev



^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
@ 2013-07-02  5:17     ` Bhushan Bharat-R65777
  0 siblings, 0 replies; 32+ messages in thread
From: Bhushan Bharat-R65777 @ 2013-07-02  5:17 UTC (permalink / raw)
  To: Tiejun Chen, benh; +Cc: linuxppc-dev, linux-kernel



> -----Original Message-----
> From: Linuxppc-dev [mailto:linuxppc-dev-
> bounces+bharat.bhushan=3Dfreescale.com@lists.ozlabs.org] On Behalf Of Tie=
jun Chen
> Sent: Thursday, June 20, 2013 1:23 PM
> To: benh@kernel.crashing.org
> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
> Subject: [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
>=20
> We need to active KEXEC for book3e and bypass or convert non-book3e stuff
> in kexec coverage.
>=20
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/Kconfig                   |    2 +-
>  arch/powerpc/kernel/machine_kexec_64.c |    6 ++++++
>  arch/powerpc/kernel/misc_64.S          |    6 ++++++
>  3 files changed, 13 insertions(+), 1 deletion(-)
>=20
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index c33e3ad..6ecf3c9 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -364,7 +364,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
>=20
>  config KEXEC
>  	bool "kexec system call"
> -	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
> +	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
>  	help
>  	  kexec is a system call that implements the ability to shutdown your
>  	  current kernel, and to start another kernel.  It is like a reboot
> diff --git a/arch/powerpc/kernel/machine_kexec_64.c
> b/arch/powerpc/kernel/machine_kexec_64.c
> index 611acdf..ef39271 100644
> --- a/arch/powerpc/kernel/machine_kexec_64.c
> +++ b/arch/powerpc/kernel/machine_kexec_64.c
> @@ -33,6 +33,7 @@
>  int default_machine_kexec_prepare(struct kimage *image)
>  {
>  	int i;
> +#ifndef CONFIG_PPC_BOOK3E
>  	unsigned long begin, end;	/* limits of segment */
>  	unsigned long low, high;	/* limits of blocked memory range */
>  	struct device_node *node;
> @@ -41,6 +42,7 @@ int default_machine_kexec_prepare(struct kimage *image)
>=20
>  	if (!ppc_md.hpte_clear_all)
>  		return -ENOENT;
> +#endif

Do we really need this function for book3e? can we have a separate function=
 rather than multiple confusing ifdef?

-Bharat

>=20
>  	/*
>  	 * Since we use the kernel fault handlers and paging code to
> @@ -51,6 +53,7 @@ int default_machine_kexec_prepare(struct kimage *image)
>  		if (image->segment[i].mem < __pa(_end))
>  			return -ETXTBSY;
>=20
> +#ifndef CONFIG_PPC_BOOK3E
>  	/*
>  	 * For non-LPAR, we absolutely can not overwrite the mmu hash
>  	 * table, since we are still using the bolted entries in it to
> @@ -92,6 +95,7 @@ int default_machine_kexec_prepare(struct kimage *image)
>  				return -ETXTBSY;
>  		}
>  	}
> +#endif
>=20
>  	return 0;
>  }
> @@ -367,6 +371,7 @@ void default_machine_kexec(struct kimage *image)
>  	/* NOTREACHED */
>  }
>=20
> +#ifndef CONFIG_PPC_BOOK3E
>  /* Values we need to export to the second kernel via the device tree. */
>  static unsigned long htab_base;
>=20
> @@ -411,3 +416,4 @@ static int __init export_htab_values(void)
>  	return 0;
>  }
>  late_initcall(export_htab_values);
> +#endif
> diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.=
S
> index 6820e45..f1a7ce7 100644
> --- a/arch/powerpc/kernel/misc_64.S
> +++ b/arch/powerpc/kernel/misc_64.S
> @@ -543,9 +543,13 @@ _GLOBAL(kexec_sequence)
>  	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
>=20
>  	/* disable interrupts, we are overwriting kernel data next */
> +#ifndef CONFIG_PPC_BOOK3E
>  	mfmsr	r3
>  	rlwinm	r3,r3,0,17,15
>  	mtmsrd	r3,1
> +#else
> +	wrteei	0
> +#endif
>=20
>  	/* copy dest pages, flush whole dest image */
>  	mr	r3,r29
> @@ -567,10 +571,12 @@ _GLOBAL(kexec_sequence)
>  	li	r6,1
>  	stw	r6,kexec_flag-1b(5)
>=20
> +#ifndef CONFIG_PPC_BOOK3E
>  	/* clear out hardware hash page table and tlb */
>  	ld	r5,0(r27)		/* deref function descriptor */
>  	mtctr	r5
>  	bctrl				/* ppc_md.hpte_clear_all(void); */
> +#endif
>=20
>  /*
>   *   kexec image calling is:
> --
> 1.7.9.5
>=20
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev

^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
  2013-06-20  7:53   ` Tiejun Chen
@ 2013-07-02  5:37     ` Bhushan Bharat-R65777
  -1 siblings, 0 replies; 32+ messages in thread
From: Bhushan Bharat-R65777 @ 2013-07-02  5:37 UTC (permalink / raw)
  To: Tiejun Chen, benh; +Cc: linuxppc-dev, linux-kernel



> -----Original Message-----
> From: Linuxppc-dev [mailto:linuxppc-dev-
> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
> Sent: Thursday, June 20, 2013 1:23 PM
> To: benh@kernel.crashing.org
> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
> Subject: [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
> 
> We need to introduce a flag to indicate we're already running
> a kexec kernel then we can go proper path. For example, We
> shouldn't access spin_table from the bootloader to up any secondary
> cpu for kexec kernel, and kexec kernel already know how to jump to
> generic_secondary_smp_init.
> 
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/include/asm/smp.h    |    3 +++
>  arch/powerpc/kernel/head_64.S     |   12 ++++++++++++
>  arch/powerpc/kernel/misc_64.S     |    6 ++++++
>  arch/powerpc/platforms/85xx/smp.c |   14 ++++++++++++++
>  4 files changed, 35 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
> index ffbaabe..fbc3d9b 100644
> --- a/arch/powerpc/include/asm/smp.h
> +++ b/arch/powerpc/include/asm/smp.h
> @@ -200,6 +200,9 @@ extern void generic_secondary_thread_init(void);
>  extern unsigned long __secondary_hold_spinloop;
>  extern unsigned long __secondary_hold_acknowledge;
>  extern char __secondary_hold;
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +extern unsigned long __run_at_kexec;
> +#endif
> 
>  extern void __early_start(void);
>  #endif /* __ASSEMBLY__ */
> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
> index 3e19ba2..ffa4b18 100644
> --- a/arch/powerpc/kernel/head_64.S
> +++ b/arch/powerpc/kernel/head_64.S
> @@ -89,6 +89,12 @@ __secondary_hold_spinloop:
>  __secondary_hold_acknowledge:
>  	.llong	0x0
> 
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	.globl	__run_at_kexec
> +__run_at_kexec:
> +	.llong	0x0	/* Flag for the secondary kernel from kexec. */
> +#endif
> +
>  #ifdef CONFIG_RELOCATABLE
>  	/* This flag is set to 1 by a loader if the kernel should run
>  	 * at the loaded address instead of the linked address.  This
> @@ -417,6 +423,12 @@ _STATIC(__after_prom_start)
>  #if defined(CONFIG_PPC_BOOK3E)
>  	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
>  #endif
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	/* If relocated we need to restore this flag on that relocated address. */
> +	ld	r7,__run_at_kexec-_stext(r26)
> +	std	r7,__run_at_kexec-_stext(r26)
> +#endif
> +
>  	lwz	r7,__run_at_load-_stext(r26)
>  #if defined(CONFIG_PPC_BOOK3E)
>  	tophys(r26,r26)			/* Restore for the remains. */
> diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
> index 20cbb98..c89aead 100644
> --- a/arch/powerpc/kernel/misc_64.S
> +++ b/arch/powerpc/kernel/misc_64.S
> @@ -619,6 +619,12 @@ _GLOBAL(kexec_sequence)
>  	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
>  1:	/* assume normal blr return */
> 
> +	/* notify we're going into kexec kernel for SMP. */
> +	LOAD_REG_ADDR(r3,__run_at_kexec)
> +	li	r4,1
> +	std	r4,0(r3)
> +	sync
> +
>  	/* release other cpus to the new kernel secondary start at 0x60 */
>  	mflr	r5
>  	li	r6,1
> diff --git a/arch/powerpc/platforms/85xx/smp.c
> b/arch/powerpc/platforms/85xx/smp.c
> index 6a17599..b308373 100644
> --- a/arch/powerpc/platforms/85xx/smp.c
> +++ b/arch/powerpc/platforms/85xx/smp.c
> @@ -150,6 +150,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
>  	int hw_cpu = get_hard_smp_processor_id(nr);
>  	int ioremappable;
>  	int ret = 0;
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	unsigned long *ptr;
> +#endif

What about if we can remove the ifdef around *ptr ...

> 
>  	WARN_ON(nr < 0 || nr >= NR_CPUS);
>  	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
> @@ -238,11 +241,22 @@ out:
>  #else
>  	smp_generic_kick_cpu(nr);
> 
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);

... #endif here ...

> +	/* We shouldn't access spin_table from the bootloader to up any
> +	 * secondary cpu for kexec kernel, and kexec kernel already
> +	 * know how to jump to generic_secondary_smp_init.
> +	 */
> +	if (!*ptr) {
> +#endif

... remove #endif ...

>  	flush_spin_table(spin_table);
>  	out_be32(&spin_table->pir, hw_cpu);
>  	out_be64((u64 *)(&spin_table->addr_h),
>  	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
>  	flush_spin_table(spin_table);
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	}
> +#endif

--- remove above 3 lines

-Bharat

>  #endif
> 
>  	local_irq_restore(flags);
> --
> 1.7.9.5
> 
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev



^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
@ 2013-07-02  5:37     ` Bhushan Bharat-R65777
  0 siblings, 0 replies; 32+ messages in thread
From: Bhushan Bharat-R65777 @ 2013-07-02  5:37 UTC (permalink / raw)
  To: Tiejun Chen, benh; +Cc: linuxppc-dev, linux-kernel



> -----Original Message-----
> From: Linuxppc-dev [mailto:linuxppc-dev-
> bounces+bharat.bhushan=3Dfreescale.com@lists.ozlabs.org] On Behalf Of Tie=
jun Chen
> Sent: Thursday, June 20, 2013 1:23 PM
> To: benh@kernel.crashing.org
> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
> Subject: [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel fla=
g
>=20
> We need to introduce a flag to indicate we're already running
> a kexec kernel then we can go proper path. For example, We
> shouldn't access spin_table from the bootloader to up any secondary
> cpu for kexec kernel, and kexec kernel already know how to jump to
> generic_secondary_smp_init.
>=20
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/include/asm/smp.h    |    3 +++
>  arch/powerpc/kernel/head_64.S     |   12 ++++++++++++
>  arch/powerpc/kernel/misc_64.S     |    6 ++++++
>  arch/powerpc/platforms/85xx/smp.c |   14 ++++++++++++++
>  4 files changed, 35 insertions(+)
>=20
> diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/sm=
p.h
> index ffbaabe..fbc3d9b 100644
> --- a/arch/powerpc/include/asm/smp.h
> +++ b/arch/powerpc/include/asm/smp.h
> @@ -200,6 +200,9 @@ extern void generic_secondary_thread_init(void);
>  extern unsigned long __secondary_hold_spinloop;
>  extern unsigned long __secondary_hold_acknowledge;
>  extern char __secondary_hold;
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +extern unsigned long __run_at_kexec;
> +#endif
>=20
>  extern void __early_start(void);
>  #endif /* __ASSEMBLY__ */
> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.=
S
> index 3e19ba2..ffa4b18 100644
> --- a/arch/powerpc/kernel/head_64.S
> +++ b/arch/powerpc/kernel/head_64.S
> @@ -89,6 +89,12 @@ __secondary_hold_spinloop:
>  __secondary_hold_acknowledge:
>  	.llong	0x0
>=20
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	.globl	__run_at_kexec
> +__run_at_kexec:
> +	.llong	0x0	/* Flag for the secondary kernel from kexec. */
> +#endif
> +
>  #ifdef CONFIG_RELOCATABLE
>  	/* This flag is set to 1 by a loader if the kernel should run
>  	 * at the loaded address instead of the linked address.  This
> @@ -417,6 +423,12 @@ _STATIC(__after_prom_start)
>  #if defined(CONFIG_PPC_BOOK3E)
>  	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
>  #endif
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	/* If relocated we need to restore this flag on that relocated address.=
 */
> +	ld	r7,__run_at_kexec-_stext(r26)
> +	std	r7,__run_at_kexec-_stext(r26)
> +#endif
> +
>  	lwz	r7,__run_at_load-_stext(r26)
>  #if defined(CONFIG_PPC_BOOK3E)
>  	tophys(r26,r26)			/* Restore for the remains. */
> diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.=
S
> index 20cbb98..c89aead 100644
> --- a/arch/powerpc/kernel/misc_64.S
> +++ b/arch/powerpc/kernel/misc_64.S
> @@ -619,6 +619,12 @@ _GLOBAL(kexec_sequence)
>  	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
>  1:	/* assume normal blr return */
>=20
> +	/* notify we're going into kexec kernel for SMP. */
> +	LOAD_REG_ADDR(r3,__run_at_kexec)
> +	li	r4,1
> +	std	r4,0(r3)
> +	sync
> +
>  	/* release other cpus to the new kernel secondary start at 0x60 */
>  	mflr	r5
>  	li	r6,1
> diff --git a/arch/powerpc/platforms/85xx/smp.c
> b/arch/powerpc/platforms/85xx/smp.c
> index 6a17599..b308373 100644
> --- a/arch/powerpc/platforms/85xx/smp.c
> +++ b/arch/powerpc/platforms/85xx/smp.c
> @@ -150,6 +150,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
>  	int hw_cpu =3D get_hard_smp_processor_id(nr);
>  	int ioremappable;
>  	int ret =3D 0;
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	unsigned long *ptr;
> +#endif

What about if we can remove the ifdef around *ptr ...

>=20
>  	WARN_ON(nr < 0 || nr >=3D NR_CPUS);
>  	WARN_ON(hw_cpu < 0 || hw_cpu >=3D NR_CPUS);
> @@ -238,11 +241,22 @@ out:
>  #else
>  	smp_generic_kick_cpu(nr);
>=20
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	ptr  =3D (unsigned long *)((unsigned long)&__run_at_kexec);

... #endif here ...

> +	/* We shouldn't access spin_table from the bootloader to up any
> +	 * secondary cpu for kexec kernel, and kexec kernel already
> +	 * know how to jump to generic_secondary_smp_init.
> +	 */
> +	if (!*ptr) {
> +#endif

... remove #endif ...

>  	flush_spin_table(spin_table);
>  	out_be32(&spin_table->pir, hw_cpu);
>  	out_be64((u64 *)(&spin_table->addr_h),
>  	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
>  	flush_spin_table(spin_table);
> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
> +	}
> +#endif

--- remove above 3 lines

-Bharat

>  #endif
>=20
>  	local_irq_restore(flags);
> --
> 1.7.9.5
>=20
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev

^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
  2013-06-20  7:53   ` Tiejun Chen
@ 2013-07-03 11:52     ` Sethi Varun-B16395
  -1 siblings, 0 replies; 32+ messages in thread
From: Sethi Varun-B16395 @ 2013-07-03 11:52 UTC (permalink / raw)
  To: Tiejun Chen, benh; +Cc: linuxppc-dev, linux-kernel



> -----Original Message-----
> From: Linuxppc-dev [mailto:linuxppc-dev-
> bounces+varun.sethi=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun
> Chen
> Sent: Thursday, June 20, 2013 1:23 PM
> To: benh@kernel.crashing.org
> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
> Subject: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
> 
> book3e is different with book3s since 3s includes the exception vectors
> code in head_64.S as it relies on absolute addressing which is only
> possible within this compilation unit. So we have to get that label
> address with got.
> 
> And when boot a relocated kernel, we should reset ipvr properly again
> after .relocate.
> 
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/include/asm/exception-64e.h |    8 ++++++++
>  arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++++++++++-
>  arch/powerpc/kernel/head_64.S            |   22 ++++++++++++++++++++++
>  arch/powerpc/lib/feature-fixups.c        |    7 +++++++
>  4 files changed, 51 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/include/asm/exception-64e.h
> b/arch/powerpc/include/asm/exception-64e.h
> index 51fa43e..89e940d 100644
> --- a/arch/powerpc/include/asm/exception-64e.h
> +++ b/arch/powerpc/include/asm/exception-64e.h
> @@ -214,10 +214,18 @@ exc_##label##_book3e:
>  #define TLB_MISS_STATS_SAVE_INFO_BOLTED  #endif
> 
> +#ifndef CONFIG_RELOCATABLE
>  #define SET_IVOR(vector_number, vector_offset)	\
>  	li	r3,vector_offset@l; 		\
>  	ori	r3,r3,interrupt_base_book3e@l;	\
>  	mtspr	SPRN_IVOR##vector_number,r3;
> +#else
> +#define SET_IVOR(vector_number, vector_offset)	\
> +	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
> +	rlwinm	r3,r3,0,15,0;			\
> +	ori	r3,r3,vector_offset@l;		\
> +	mtspr	SPRN_IVOR##vector_number,r3;
> +#endif
> 
[Sethi Varun-B16395] Please add a documentation note here.

>  #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
> 
> diff --git a/arch/powerpc/kernel/exceptions-64e.S
> b/arch/powerpc/kernel/exceptions-64e.S
> index 645170a..4b23119 100644
> --- a/arch/powerpc/kernel/exceptions-64e.S
> +++ b/arch/powerpc/kernel/exceptions-64e.S
> @@ -1097,7 +1097,15 @@ skpinv:	addi	r6,r6,1
> 	/* Increment */
>   * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
>   */
>  	/* Now we branch the new virtual address mapped by this entry */
> +#ifdef CONFIG_RELOCATABLE
> +	/* We have to find out address from lr. */
> +	bl	1f		/* Find our address */
> +1:	mflr	r6
> +	addi	r6,r6,(2f - 1b)
> +	tovirt(r6,r6)
> +#else
>  	LOAD_REG_IMMEDIATE(r6,2f)
> +#endif
>  	lis	r7,MSR_KERNEL@h
>  	ori	r7,r7,MSR_KERNEL@l
>  	mtspr	SPRN_SRR0,r6
> @@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
>  	mflr	r28
>  	b	3b
> 
> -_STATIC(init_core_book3e)
> +_GLOBAL(init_core_book3e)
>  	/* Establish the interrupt vector base */
> +#ifdef CONFIG_RELOCATABLE
> +	tovirt(r2,r2)
> +	LOAD_REG_ADDR(r3, interrupt_base_book3e) #else
>  	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
> +#endif
>  	mtspr	SPRN_IVPR,r3
>  	sync
>  	blr
[Sethi Varun-B16395] Please add a documentation note here as well. 

> diff --git a/arch/powerpc/kernel/head_64.S
> b/arch/powerpc/kernel/head_64.S index b61363d..0942f3a 100644
> --- a/arch/powerpc/kernel/head_64.S
> +++ b/arch/powerpc/kernel/head_64.S
> @@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
>  	/* process relocations for the final address of the kernel */
>  	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
>  	sldi	r25,r25,32
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
> +#endif
>  	lwz	r7,__run_at_load-_stext(r26)
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tophys(r26,r26)			/* Restore for the remains. */
> +#endif
>  	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
>  	bne	1f
>  	add	r25,r25,r26
>  1:	mr	r3,r25
>  	bl	.relocate
> +#if defined(CONFIG_PPC_BOOK3E)
> +	/* We should set ivpr again after .relocate. */
> +	bl	.init_core_book3e
> +#endif
>  #endif
> 
[Sethi Varun-B16395] A more detailed note over here would be useful.

>  /*
> @@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
>   * variable __run_at_load, if it is set the kernel is treated as
> relocatable
>   * kernel, otherwise it will be moved to PHYSICAL_START
>   */
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
> +#endif
>  	lwz	r7,__run_at_load-_stext(r26)
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tophys(r26,r26)			/* Restore for the remains. */
> +#endif
>  	cmplwi	cr0,r7,1
>  	bne	3f
> 
> +#ifdef CONFIG_PPC_BOOK3E
> +	LOAD_REG_ADDR(r5, interrupt_end_book3e)
> +	LOAD_REG_ADDR(r11, _stext)
> +	sub	r5,r5,r11
> +#else
>  	/* just copy interrupts */
>  	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
> +#endif
>  	b	5f
>  3:
>  #endif
> diff --git a/arch/powerpc/lib/feature-fixups.c
> b/arch/powerpc/lib/feature-fixups.c
> index 7a8a748..13f20ed 100644
> --- a/arch/powerpc/lib/feature-fixups.c
> +++ b/arch/powerpc/lib/feature-fixups.c
> @@ -135,13 +135,20 @@ void do_final_fixups(void)  #if
> defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
>  	int *src, *dest;
>  	unsigned long length;
> +#ifdef CONFIG_PPC_BOOK3E
> +	extern char interrupt_end_book3e[];
> +#endif
[Sethi Varun-B16395] You can simply move this to sections.h and remove the ifdefs.

> 
>  	if (PHYSICAL_START == 0)
>  		return;
> 
>  	src = (int *)(KERNELBASE + PHYSICAL_START);
>  	dest = (int *)KERNELBASE;
> +#ifdef CONFIG_PPC_BOOK3E
> +	length = (interrupt_end_book3e - _stext) / sizeof(int); #else
>  	length = (__end_interrupts - _stext) / sizeof(int);
> +#endif
> 
>  	while (length--) {
>  		patch_instruction(dest, *src);

-Varun


^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
@ 2013-07-03 11:52     ` Sethi Varun-B16395
  0 siblings, 0 replies; 32+ messages in thread
From: Sethi Varun-B16395 @ 2013-07-03 11:52 UTC (permalink / raw)
  To: Tiejun Chen, benh; +Cc: linuxppc-dev, linux-kernel



> -----Original Message-----
> From: Linuxppc-dev [mailto:linuxppc-dev-
> bounces+varun.sethi=3Dfreescale.com@lists.ozlabs.org] On Behalf Of Tiejun
> Chen
> Sent: Thursday, June 20, 2013 1:23 PM
> To: benh@kernel.crashing.org
> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
> Subject: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
>=20
> book3e is different with book3s since 3s includes the exception vectors
> code in head_64.S as it relies on absolute addressing which is only
> possible within this compilation unit. So we have to get that label
> address with got.
>=20
> And when boot a relocated kernel, we should reset ipvr properly again
> after .relocate.
>=20
> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
> ---
>  arch/powerpc/include/asm/exception-64e.h |    8 ++++++++
>  arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++++++++++-
>  arch/powerpc/kernel/head_64.S            |   22 ++++++++++++++++++++++
>  arch/powerpc/lib/feature-fixups.c        |    7 +++++++
>  4 files changed, 51 insertions(+), 1 deletion(-)
>=20
> diff --git a/arch/powerpc/include/asm/exception-64e.h
> b/arch/powerpc/include/asm/exception-64e.h
> index 51fa43e..89e940d 100644
> --- a/arch/powerpc/include/asm/exception-64e.h
> +++ b/arch/powerpc/include/asm/exception-64e.h
> @@ -214,10 +214,18 @@ exc_##label##_book3e:
>  #define TLB_MISS_STATS_SAVE_INFO_BOLTED  #endif
>=20
> +#ifndef CONFIG_RELOCATABLE
>  #define SET_IVOR(vector_number, vector_offset)	\
>  	li	r3,vector_offset@l; 		\
>  	ori	r3,r3,interrupt_base_book3e@l;	\
>  	mtspr	SPRN_IVOR##vector_number,r3;
> +#else
> +#define SET_IVOR(vector_number, vector_offset)	\
> +	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
> +	rlwinm	r3,r3,0,15,0;			\
> +	ori	r3,r3,vector_offset@l;		\
> +	mtspr	SPRN_IVOR##vector_number,r3;
> +#endif
>=20
[Sethi Varun-B16395] Please add a documentation note here.

>  #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
>=20
> diff --git a/arch/powerpc/kernel/exceptions-64e.S
> b/arch/powerpc/kernel/exceptions-64e.S
> index 645170a..4b23119 100644
> --- a/arch/powerpc/kernel/exceptions-64e.S
> +++ b/arch/powerpc/kernel/exceptions-64e.S
> @@ -1097,7 +1097,15 @@ skpinv:	addi	r6,r6,1
> 	/* Increment */
>   * r4 =3D MAS0 w/TLBSEL & ESEL for the temp mapping
>   */
>  	/* Now we branch the new virtual address mapped by this entry */
> +#ifdef CONFIG_RELOCATABLE
> +	/* We have to find out address from lr. */
> +	bl	1f		/* Find our address */
> +1:	mflr	r6
> +	addi	r6,r6,(2f - 1b)
> +	tovirt(r6,r6)
> +#else
>  	LOAD_REG_IMMEDIATE(r6,2f)
> +#endif
>  	lis	r7,MSR_KERNEL@h
>  	ori	r7,r7,MSR_KERNEL@l
>  	mtspr	SPRN_SRR0,r6
> @@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
>  	mflr	r28
>  	b	3b
>=20
> -_STATIC(init_core_book3e)
> +_GLOBAL(init_core_book3e)
>  	/* Establish the interrupt vector base */
> +#ifdef CONFIG_RELOCATABLE
> +	tovirt(r2,r2)
> +	LOAD_REG_ADDR(r3, interrupt_base_book3e) #else
>  	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
> +#endif
>  	mtspr	SPRN_IVPR,r3
>  	sync
>  	blr
[Sethi Varun-B16395] Please add a documentation note here as well.=20

> diff --git a/arch/powerpc/kernel/head_64.S
> b/arch/powerpc/kernel/head_64.S index b61363d..0942f3a 100644
> --- a/arch/powerpc/kernel/head_64.S
> +++ b/arch/powerpc/kernel/head_64.S
> @@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
>  	/* process relocations for the final address of the kernel */
>  	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
>  	sldi	r25,r25,32
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
> +#endif
>  	lwz	r7,__run_at_load-_stext(r26)
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tophys(r26,r26)			/* Restore for the remains. */
> +#endif
>  	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
>  	bne	1f
>  	add	r25,r25,r26
>  1:	mr	r3,r25
>  	bl	.relocate
> +#if defined(CONFIG_PPC_BOOK3E)
> +	/* We should set ivpr again after .relocate. */
> +	bl	.init_core_book3e
> +#endif
>  #endif
>=20
[Sethi Varun-B16395] A more detailed note over here would be useful.

>  /*
> @@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
>   * variable __run_at_load, if it is set the kernel is treated as
> relocatable
>   * kernel, otherwise it will be moved to PHYSICAL_START
>   */
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tovirt(r26,r26)			/* on booke, we already run at
> PAGE_OFFSET */
> +#endif
>  	lwz	r7,__run_at_load-_stext(r26)
> +#if defined(CONFIG_PPC_BOOK3E)
> +	tophys(r26,r26)			/* Restore for the remains. */
> +#endif
>  	cmplwi	cr0,r7,1
>  	bne	3f
>=20
> +#ifdef CONFIG_PPC_BOOK3E
> +	LOAD_REG_ADDR(r5, interrupt_end_book3e)
> +	LOAD_REG_ADDR(r11, _stext)
> +	sub	r5,r5,r11
> +#else
>  	/* just copy interrupts */
>  	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
> +#endif
>  	b	5f
>  3:
>  #endif
> diff --git a/arch/powerpc/lib/feature-fixups.c
> b/arch/powerpc/lib/feature-fixups.c
> index 7a8a748..13f20ed 100644
> --- a/arch/powerpc/lib/feature-fixups.c
> +++ b/arch/powerpc/lib/feature-fixups.c
> @@ -135,13 +135,20 @@ void do_final_fixups(void)  #if
> defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
>  	int *src, *dest;
>  	unsigned long length;
> +#ifdef CONFIG_PPC_BOOK3E
> +	extern char interrupt_end_book3e[];
> +#endif
[Sethi Varun-B16395] You can simply move this to sections.h and remove the =
ifdefs.

>=20
>  	if (PHYSICAL_START =3D=3D 0)
>  		return;
>=20
>  	src =3D (int *)(KERNELBASE + PHYSICAL_START);
>  	dest =3D (int *)KERNELBASE;
> +#ifdef CONFIG_PPC_BOOK3E
> +	length =3D (interrupt_end_book3e - _stext) / sizeof(int); #else
>  	length =3D (__end_interrupts - _stext) / sizeof(int);
> +#endif
>=20
>  	while (length--) {
>  		patch_instruction(dest, *src);

-Varun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
  2013-07-02  5:00     ` Bhushan Bharat-R65777
@ 2013-07-09  7:49       ` tiejun.chen
  -1 siblings, 0 replies; 32+ messages in thread
From: tiejun.chen @ 2013-07-09  7:49 UTC (permalink / raw)
  To: Bhushan Bharat-R65777; +Cc: benh, linuxppc-dev, linux-kernel

On 07/02/2013 01:00 PM, Bhushan Bharat-R65777 wrote:
>
>
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
>> Sent: Thursday, June 20, 2013 1:23 PM
>> To: benh@kernel.crashing.org
>> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
>> Subject: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
>>
>> book3e is different with book3s since 3s includes the exception
>> vectors code in head_64.S as it relies on absolute addressing
>> which is only possible within this compilation unit. So we have
>> to get that label address with got.
>>
>> And when boot a relocated kernel, we should reset ipvr properly again
>> after .relocate.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---

[snip]

>>   	int *src, *dest;
>>   	unsigned long length;
>> +#ifdef CONFIG_PPC_BOOK3E
>> +	extern char interrupt_end_book3e[];
>> +#endif
>
> Cannot we do this in arch/powerpc/kernel/asm/sections.h
>
>>
>>   	if (PHYSICAL_START == 0)
>>   		return;
>>
>>   	src = (int *)(KERNELBASE + PHYSICAL_START);
>>   	dest = (int *)KERNELBASE;
>> +#ifdef CONFIG_PPC_BOOK3E
>> +	length = (interrupt_end_book3e - _stext) / sizeof(int);
>> +#else
>>   	length = (__end_interrupts - _stext) / sizeof(int);
>> +#endif
>
> can we keep same name in books and booke; __end_interrupts ? this way we can avoid such #ifdefs

Yes, I think I can simplify this as you pointed :)

Thanks,

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
@ 2013-07-09  7:49       ` tiejun.chen
  0 siblings, 0 replies; 32+ messages in thread
From: tiejun.chen @ 2013-07-09  7:49 UTC (permalink / raw)
  To: Bhushan Bharat-R65777; +Cc: linuxppc-dev, linux-kernel

On 07/02/2013 01:00 PM, Bhushan Bharat-R65777 wrote:
>
>
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
>> Sent: Thursday, June 20, 2013 1:23 PM
>> To: benh@kernel.crashing.org
>> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
>> Subject: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
>>
>> book3e is different with book3s since 3s includes the exception
>> vectors code in head_64.S as it relies on absolute addressing
>> which is only possible within this compilation unit. So we have
>> to get that label address with got.
>>
>> And when boot a relocated kernel, we should reset ipvr properly again
>> after .relocate.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---

[snip]

>>   	int *src, *dest;
>>   	unsigned long length;
>> +#ifdef CONFIG_PPC_BOOK3E
>> +	extern char interrupt_end_book3e[];
>> +#endif
>
> Cannot we do this in arch/powerpc/kernel/asm/sections.h
>
>>
>>   	if (PHYSICAL_START == 0)
>>   		return;
>>
>>   	src = (int *)(KERNELBASE + PHYSICAL_START);
>>   	dest = (int *)KERNELBASE;
>> +#ifdef CONFIG_PPC_BOOK3E
>> +	length = (interrupt_end_book3e - _stext) / sizeof(int);
>> +#else
>>   	length = (__end_interrupts - _stext) / sizeof(int);
>> +#endif
>
> can we keep same name in books and booke; __end_interrupts ? this way we can avoid such #ifdefs

Yes, I think I can simplify this as you pointed :)

Thanks,

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
  2013-07-02  5:17     ` Bhushan Bharat-R65777
@ 2013-07-09  7:50       ` tiejun.chen
  -1 siblings, 0 replies; 32+ messages in thread
From: tiejun.chen @ 2013-07-09  7:50 UTC (permalink / raw)
  To: Bhushan Bharat-R65777; +Cc: benh, linuxppc-dev, linux-kernel

On 07/02/2013 01:17 PM, Bhushan Bharat-R65777 wrote:
>
>
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
>> Sent: Thursday, June 20, 2013 1:23 PM
>> To: benh@kernel.crashing.org
>> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
>> Subject: [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
>>
>> We need to active KEXEC for book3e and bypass or convert non-book3e stuff
>> in kexec coverage.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---
>>   arch/powerpc/Kconfig                   |    2 +-
>>   arch/powerpc/kernel/machine_kexec_64.c |    6 ++++++
>>   arch/powerpc/kernel/misc_64.S          |    6 ++++++
>>   3 files changed, 13 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index c33e3ad..6ecf3c9 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -364,7 +364,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
>>
>>   config KEXEC
>>   	bool "kexec system call"
>> -	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
>> +	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
>>   	help
>>   	  kexec is a system call that implements the ability to shutdown your
>>   	  current kernel, and to start another kernel.  It is like a reboot
>> diff --git a/arch/powerpc/kernel/machine_kexec_64.c
>> b/arch/powerpc/kernel/machine_kexec_64.c
>> index 611acdf..ef39271 100644
>> --- a/arch/powerpc/kernel/machine_kexec_64.c
>> +++ b/arch/powerpc/kernel/machine_kexec_64.c
>> @@ -33,6 +33,7 @@
>>   int default_machine_kexec_prepare(struct kimage *image)
>>   {
>>   	int i;
>> +#ifndef CONFIG_PPC_BOOK3E
>>   	unsigned long begin, end;	/* limits of segment */
>>   	unsigned long low, high;	/* limits of blocked memory range */
>>   	struct device_node *node;
>> @@ -41,6 +42,7 @@ int default_machine_kexec_prepare(struct kimage *image)
>>
>>   	if (!ppc_md.hpte_clear_all)
>>   		return -ENOENT;
>> +#endif
>
> Do we really need this function for book3e? can we have a separate function rather than multiple confusing ifdef?

I prefer we have a separate function to book3e.

Thanks

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
@ 2013-07-09  7:50       ` tiejun.chen
  0 siblings, 0 replies; 32+ messages in thread
From: tiejun.chen @ 2013-07-09  7:50 UTC (permalink / raw)
  To: Bhushan Bharat-R65777; +Cc: linuxppc-dev, linux-kernel

On 07/02/2013 01:17 PM, Bhushan Bharat-R65777 wrote:
>
>
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
>> Sent: Thursday, June 20, 2013 1:23 PM
>> To: benh@kernel.crashing.org
>> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
>> Subject: [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel
>>
>> We need to active KEXEC for book3e and bypass or convert non-book3e stuff
>> in kexec coverage.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---
>>   arch/powerpc/Kconfig                   |    2 +-
>>   arch/powerpc/kernel/machine_kexec_64.c |    6 ++++++
>>   arch/powerpc/kernel/misc_64.S          |    6 ++++++
>>   3 files changed, 13 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index c33e3ad..6ecf3c9 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -364,7 +364,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
>>
>>   config KEXEC
>>   	bool "kexec system call"
>> -	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
>> +	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
>>   	help
>>   	  kexec is a system call that implements the ability to shutdown your
>>   	  current kernel, and to start another kernel.  It is like a reboot
>> diff --git a/arch/powerpc/kernel/machine_kexec_64.c
>> b/arch/powerpc/kernel/machine_kexec_64.c
>> index 611acdf..ef39271 100644
>> --- a/arch/powerpc/kernel/machine_kexec_64.c
>> +++ b/arch/powerpc/kernel/machine_kexec_64.c
>> @@ -33,6 +33,7 @@
>>   int default_machine_kexec_prepare(struct kimage *image)
>>   {
>>   	int i;
>> +#ifndef CONFIG_PPC_BOOK3E
>>   	unsigned long begin, end;	/* limits of segment */
>>   	unsigned long low, high;	/* limits of blocked memory range */
>>   	struct device_node *node;
>> @@ -41,6 +42,7 @@ int default_machine_kexec_prepare(struct kimage *image)
>>
>>   	if (!ppc_md.hpte_clear_all)
>>   		return -ENOENT;
>> +#endif
>
> Do we really need this function for book3e? can we have a separate function rather than multiple confusing ifdef?

I prefer we have a separate function to book3e.

Thanks

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
  2013-07-02  5:37     ` Bhushan Bharat-R65777
@ 2013-07-09  7:51       ` tiejun.chen
  -1 siblings, 0 replies; 32+ messages in thread
From: tiejun.chen @ 2013-07-09  7:51 UTC (permalink / raw)
  To: Bhushan Bharat-R65777; +Cc: benh, linuxppc-dev, linux-kernel

On 07/02/2013 01:37 PM, Bhushan Bharat-R65777 wrote:
>
>
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
>> Sent: Thursday, June 20, 2013 1:23 PM
>> To: benh@kernel.crashing.org
>> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
>> Subject: [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
>>
>> We need to introduce a flag to indicate we're already running
>> a kexec kernel then we can go proper path. For example, We
>> shouldn't access spin_table from the bootloader to up any secondary
>> cpu for kexec kernel, and kexec kernel already know how to jump to
>> generic_secondary_smp_init.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---

[snip]

>> +++ b/arch/powerpc/platforms/85xx/smp.c
>> @@ -150,6 +150,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
>>   	int hw_cpu = get_hard_smp_processor_id(nr);
>>   	int ioremappable;
>>   	int ret = 0;
>> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
>> +	unsigned long *ptr;
>> +#endif
>
> What about if we can remove the ifdef around *ptr ...
>
>>
>>   	WARN_ON(nr < 0 || nr >= NR_CPUS);
>>   	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
>> @@ -238,11 +241,22 @@ out:
>>   #else
>>   	smp_generic_kick_cpu(nr);
>>
>> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
>> +	ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
>
> ... #endif here ...
>
>> +	/* We shouldn't access spin_table from the bootloader to up any
>> +	 * secondary cpu for kexec kernel, and kexec kernel already
>> +	 * know how to jump to generic_secondary_smp_init.
>> +	 */
>> +	if (!*ptr) {
>> +#endif
>
> ... remove #endif ...
>
>>   	flush_spin_table(spin_table);
>>   	out_be32(&spin_table->pir, hw_cpu);
>>   	out_be64((u64 *)(&spin_table->addr_h),
>>   	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
>>   	flush_spin_table(spin_table);
>> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
>> +	}
>> +#endif
>
> --- remove above 3 lines

I'd like to try to address your comments next version.

Thanks

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
@ 2013-07-09  7:51       ` tiejun.chen
  0 siblings, 0 replies; 32+ messages in thread
From: tiejun.chen @ 2013-07-09  7:51 UTC (permalink / raw)
  To: Bhushan Bharat-R65777; +Cc: linuxppc-dev, linux-kernel

On 07/02/2013 01:37 PM, Bhushan Bharat-R65777 wrote:
>
>
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+bharat.bhushan=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun Chen
>> Sent: Thursday, June 20, 2013 1:23 PM
>> To: benh@kernel.crashing.org
>> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
>> Subject: [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag
>>
>> We need to introduce a flag to indicate we're already running
>> a kexec kernel then we can go proper path. For example, We
>> shouldn't access spin_table from the bootloader to up any secondary
>> cpu for kexec kernel, and kexec kernel already know how to jump to
>> generic_secondary_smp_init.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---

[snip]

>> +++ b/arch/powerpc/platforms/85xx/smp.c
>> @@ -150,6 +150,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
>>   	int hw_cpu = get_hard_smp_processor_id(nr);
>>   	int ioremappable;
>>   	int ret = 0;
>> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
>> +	unsigned long *ptr;
>> +#endif
>
> What about if we can remove the ifdef around *ptr ...
>
>>
>>   	WARN_ON(nr < 0 || nr >= NR_CPUS);
>>   	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
>> @@ -238,11 +241,22 @@ out:
>>   #else
>>   	smp_generic_kick_cpu(nr);
>>
>> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
>> +	ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
>
> ... #endif here ...
>
>> +	/* We shouldn't access spin_table from the bootloader to up any
>> +	 * secondary cpu for kexec kernel, and kexec kernel already
>> +	 * know how to jump to generic_secondary_smp_init.
>> +	 */
>> +	if (!*ptr) {
>> +#endif
>
> ... remove #endif ...
>
>>   	flush_spin_table(spin_table);
>>   	out_be32(&spin_table->pir, hw_cpu);
>>   	out_be64((u64 *)(&spin_table->addr_h),
>>   	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
>>   	flush_spin_table(spin_table);
>> +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
>> +	}
>> +#endif
>
> --- remove above 3 lines

I'd like to try to address your comments next version.

Thanks

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
  2013-07-03 11:52     ` Sethi Varun-B16395
@ 2013-07-09  7:55       ` tiejun.chen
  -1 siblings, 0 replies; 32+ messages in thread
From: tiejun.chen @ 2013-07-09  7:55 UTC (permalink / raw)
  To: Sethi Varun-B16395; +Cc: benh, linuxppc-dev, linux-kernel

On 07/03/2013 07:52 PM, Sethi Varun-B16395 wrote:
>
>
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+varun.sethi=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun
>> Chen
>> Sent: Thursday, June 20, 2013 1:23 PM
>> To: benh@kernel.crashing.org
>> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
>> Subject: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
>>
>> book3e is different with book3s since 3s includes the exception vectors
>> code in head_64.S as it relies on absolute addressing which is only
>> possible within this compilation unit. So we have to get that label
>> address with got.
>>
>> And when boot a relocated kernel, we should reset ipvr properly again
>> after .relocate.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---
>>   arch/powerpc/include/asm/exception-64e.h |    8 ++++++++
>>   arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++++++++++-
>>   arch/powerpc/kernel/head_64.S            |   22 ++++++++++++++++++++++
>>   arch/powerpc/lib/feature-fixups.c        |    7 +++++++
>>   4 files changed, 51 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/include/asm/exception-64e.h
>> b/arch/powerpc/include/asm/exception-64e.h
>> index 51fa43e..89e940d 100644
>> --- a/arch/powerpc/include/asm/exception-64e.h
>> +++ b/arch/powerpc/include/asm/exception-64e.h
>> @@ -214,10 +214,18 @@ exc_##label##_book3e:
>>   #define TLB_MISS_STATS_SAVE_INFO_BOLTED  #endif
>>
>> +#ifndef CONFIG_RELOCATABLE
>>   #define SET_IVOR(vector_number, vector_offset)	\
>>   	li	r3,vector_offset@l; 		\
>>   	ori	r3,r3,interrupt_base_book3e@l;	\
>>   	mtspr	SPRN_IVOR##vector_number,r3;
>> +#else
>> +#define SET_IVOR(vector_number, vector_offset)	\
>> +	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
>> +	rlwinm	r3,r3,0,15,0;			\
>> +	ori	r3,r3,vector_offset@l;		\
>> +	mtspr	SPRN_IVOR##vector_number,r3;
>> +#endif
>>
> [Sethi Varun-B16395] Please add a documentation note here.

Okay.

>
>>   #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
>>
>> diff --git a/arch/powerpc/kernel/exceptions-64e.S
>> b/arch/powerpc/kernel/exceptions-64e.S
>> index 645170a..4b23119 100644
>> --- a/arch/powerpc/kernel/exceptions-64e.S
>> +++ b/arch/powerpc/kernel/exceptions-64e.S
>> @@ -1097,7 +1097,15 @@ skpinv:	addi	r6,r6,1
>> 	/* Increment */
>>    * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
>>    */
>>   	/* Now we branch the new virtual address mapped by this entry */
>> +#ifdef CONFIG_RELOCATABLE
>> +	/* We have to find out address from lr. */
>> +	bl	1f		/* Find our address */
>> +1:	mflr	r6
>> +	addi	r6,r6,(2f - 1b)
>> +	tovirt(r6,r6)
>> +#else
>>   	LOAD_REG_IMMEDIATE(r6,2f)
>> +#endif
>>   	lis	r7,MSR_KERNEL@h
>>   	ori	r7,r7,MSR_KERNEL@l
>>   	mtspr	SPRN_SRR0,r6
>> @@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
>>   	mflr	r28
>>   	b	3b
>>
>> -_STATIC(init_core_book3e)
>> +_GLOBAL(init_core_book3e)
>>   	/* Establish the interrupt vector base */
>> +#ifdef CONFIG_RELOCATABLE
>> +	tovirt(r2,r2)
>> +	LOAD_REG_ADDR(r3, interrupt_base_book3e) #else
>>   	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
>> +#endif
>>   	mtspr	SPRN_IVPR,r3
>>   	sync
>>   	blr
> [Sethi Varun-B16395] Please add a documentation note here as well.

Okay.

>
>> diff --git a/arch/powerpc/kernel/head_64.S
>> b/arch/powerpc/kernel/head_64.S index b61363d..0942f3a 100644
>> --- a/arch/powerpc/kernel/head_64.S
>> +++ b/arch/powerpc/kernel/head_64.S
>> @@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
>>   	/* process relocations for the final address of the kernel */
>>   	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
>>   	sldi	r25,r25,32
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	tovirt(r26,r26)			/* on booke, we already run at
>> PAGE_OFFSET */
>> +#endif
>>   	lwz	r7,__run_at_load-_stext(r26)
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	tophys(r26,r26)			/* Restore for the remains. */
>> +#endif
>>   	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
>>   	bne	1f
>>   	add	r25,r25,r26
>>   1:	mr	r3,r25
>>   	bl	.relocate
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	/* We should set ivpr again after .relocate. */
>> +	bl	.init_core_book3e
>> +#endif
>>   #endif
>>
> [Sethi Varun-B16395] A more detailed note over here would be useful.

Okay.

>
>>   /*
>> @@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
>>    * variable __run_at_load, if it is set the kernel is treated as
>> relocatable
>>    * kernel, otherwise it will be moved to PHYSICAL_START
>>    */
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	tovirt(r26,r26)			/* on booke, we already run at
>> PAGE_OFFSET */
>> +#endif
>>   	lwz	r7,__run_at_load-_stext(r26)
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	tophys(r26,r26)			/* Restore for the remains. */
>> +#endif
>>   	cmplwi	cr0,r7,1
>>   	bne	3f
>>
>> +#ifdef CONFIG_PPC_BOOK3E
>> +	LOAD_REG_ADDR(r5, interrupt_end_book3e)
>> +	LOAD_REG_ADDR(r11, _stext)
>> +	sub	r5,r5,r11
>> +#else
>>   	/* just copy interrupts */
>>   	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
>> +#endif
>>   	b	5f
>>   3:
>>   #endif
>> diff --git a/arch/powerpc/lib/feature-fixups.c
>> b/arch/powerpc/lib/feature-fixups.c
>> index 7a8a748..13f20ed 100644
>> --- a/arch/powerpc/lib/feature-fixups.c
>> +++ b/arch/powerpc/lib/feature-fixups.c
>> @@ -135,13 +135,20 @@ void do_final_fixups(void)  #if
>> defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
>>   	int *src, *dest;
>>   	unsigned long length;
>> +#ifdef CONFIG_PPC_BOOK3E
>> +	extern char interrupt_end_book3e[];
>> +#endif
> [Sethi Varun-B16395] You can simply move this to sections.h and remove the ifdefs.

I would replace interrupt_end_book3e with __end_interrupts then we can have a 
unique label for book3e and book3s as Bharat mentioned previously.

Thanks

Tiejun


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
@ 2013-07-09  7:55       ` tiejun.chen
  0 siblings, 0 replies; 32+ messages in thread
From: tiejun.chen @ 2013-07-09  7:55 UTC (permalink / raw)
  To: Sethi Varun-B16395; +Cc: linuxppc-dev, linux-kernel

On 07/03/2013 07:52 PM, Sethi Varun-B16395 wrote:
>
>
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+varun.sethi=freescale.com@lists.ozlabs.org] On Behalf Of Tiejun
>> Chen
>> Sent: Thursday, June 20, 2013 1:23 PM
>> To: benh@kernel.crashing.org
>> Cc: linuxppc-dev@lists.ozlabs.org; linux-kernel@vger.kernel.org
>> Subject: [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE
>>
>> book3e is different with book3s since 3s includes the exception vectors
>> code in head_64.S as it relies on absolute addressing which is only
>> possible within this compilation unit. So we have to get that label
>> address with got.
>>
>> And when boot a relocated kernel, we should reset ipvr properly again
>> after .relocate.
>>
>> Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
>> ---
>>   arch/powerpc/include/asm/exception-64e.h |    8 ++++++++
>>   arch/powerpc/kernel/exceptions-64e.S     |   15 ++++++++++++++-
>>   arch/powerpc/kernel/head_64.S            |   22 ++++++++++++++++++++++
>>   arch/powerpc/lib/feature-fixups.c        |    7 +++++++
>>   4 files changed, 51 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/include/asm/exception-64e.h
>> b/arch/powerpc/include/asm/exception-64e.h
>> index 51fa43e..89e940d 100644
>> --- a/arch/powerpc/include/asm/exception-64e.h
>> +++ b/arch/powerpc/include/asm/exception-64e.h
>> @@ -214,10 +214,18 @@ exc_##label##_book3e:
>>   #define TLB_MISS_STATS_SAVE_INFO_BOLTED  #endif
>>
>> +#ifndef CONFIG_RELOCATABLE
>>   #define SET_IVOR(vector_number, vector_offset)	\
>>   	li	r3,vector_offset@l; 		\
>>   	ori	r3,r3,interrupt_base_book3e@l;	\
>>   	mtspr	SPRN_IVOR##vector_number,r3;
>> +#else
>> +#define SET_IVOR(vector_number, vector_offset)	\
>> +	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
>> +	rlwinm	r3,r3,0,15,0;			\
>> +	ori	r3,r3,vector_offset@l;		\
>> +	mtspr	SPRN_IVOR##vector_number,r3;
>> +#endif
>>
> [Sethi Varun-B16395] Please add a documentation note here.

Okay.

>
>>   #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
>>
>> diff --git a/arch/powerpc/kernel/exceptions-64e.S
>> b/arch/powerpc/kernel/exceptions-64e.S
>> index 645170a..4b23119 100644
>> --- a/arch/powerpc/kernel/exceptions-64e.S
>> +++ b/arch/powerpc/kernel/exceptions-64e.S
>> @@ -1097,7 +1097,15 @@ skpinv:	addi	r6,r6,1
>> 	/* Increment */
>>    * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
>>    */
>>   	/* Now we branch the new virtual address mapped by this entry */
>> +#ifdef CONFIG_RELOCATABLE
>> +	/* We have to find out address from lr. */
>> +	bl	1f		/* Find our address */
>> +1:	mflr	r6
>> +	addi	r6,r6,(2f - 1b)
>> +	tovirt(r6,r6)
>> +#else
>>   	LOAD_REG_IMMEDIATE(r6,2f)
>> +#endif
>>   	lis	r7,MSR_KERNEL@h
>>   	ori	r7,r7,MSR_KERNEL@l
>>   	mtspr	SPRN_SRR0,r6
>> @@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
>>   	mflr	r28
>>   	b	3b
>>
>> -_STATIC(init_core_book3e)
>> +_GLOBAL(init_core_book3e)
>>   	/* Establish the interrupt vector base */
>> +#ifdef CONFIG_RELOCATABLE
>> +	tovirt(r2,r2)
>> +	LOAD_REG_ADDR(r3, interrupt_base_book3e) #else
>>   	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
>> +#endif
>>   	mtspr	SPRN_IVPR,r3
>>   	sync
>>   	blr
> [Sethi Varun-B16395] Please add a documentation note here as well.

Okay.

>
>> diff --git a/arch/powerpc/kernel/head_64.S
>> b/arch/powerpc/kernel/head_64.S index b61363d..0942f3a 100644
>> --- a/arch/powerpc/kernel/head_64.S
>> +++ b/arch/powerpc/kernel/head_64.S
>> @@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
>>   	/* process relocations for the final address of the kernel */
>>   	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
>>   	sldi	r25,r25,32
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	tovirt(r26,r26)			/* on booke, we already run at
>> PAGE_OFFSET */
>> +#endif
>>   	lwz	r7,__run_at_load-_stext(r26)
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	tophys(r26,r26)			/* Restore for the remains. */
>> +#endif
>>   	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
>>   	bne	1f
>>   	add	r25,r25,r26
>>   1:	mr	r3,r25
>>   	bl	.relocate
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	/* We should set ivpr again after .relocate. */
>> +	bl	.init_core_book3e
>> +#endif
>>   #endif
>>
> [Sethi Varun-B16395] A more detailed note over here would be useful.

Okay.

>
>>   /*
>> @@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
>>    * variable __run_at_load, if it is set the kernel is treated as
>> relocatable
>>    * kernel, otherwise it will be moved to PHYSICAL_START
>>    */
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	tovirt(r26,r26)			/* on booke, we already run at
>> PAGE_OFFSET */
>> +#endif
>>   	lwz	r7,__run_at_load-_stext(r26)
>> +#if defined(CONFIG_PPC_BOOK3E)
>> +	tophys(r26,r26)			/* Restore for the remains. */
>> +#endif
>>   	cmplwi	cr0,r7,1
>>   	bne	3f
>>
>> +#ifdef CONFIG_PPC_BOOK3E
>> +	LOAD_REG_ADDR(r5, interrupt_end_book3e)
>> +	LOAD_REG_ADDR(r11, _stext)
>> +	sub	r5,r5,r11
>> +#else
>>   	/* just copy interrupts */
>>   	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
>> +#endif
>>   	b	5f
>>   3:
>>   #endif
>> diff --git a/arch/powerpc/lib/feature-fixups.c
>> b/arch/powerpc/lib/feature-fixups.c
>> index 7a8a748..13f20ed 100644
>> --- a/arch/powerpc/lib/feature-fixups.c
>> +++ b/arch/powerpc/lib/feature-fixups.c
>> @@ -135,13 +135,20 @@ void do_final_fixups(void)  #if
>> defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
>>   	int *src, *dest;
>>   	unsigned long length;
>> +#ifdef CONFIG_PPC_BOOK3E
>> +	extern char interrupt_end_book3e[];
>> +#endif
> [Sethi Varun-B16395] You can simply move this to sections.h and remove the ifdefs.

I would replace interrupt_end_book3e with __end_interrupts then we can have a 
unique label for book3e and book3s as Bharat mentioned previously.

Thanks

Tiejun

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2013-07-09  7:54 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-06-20  7:53 [v2][PATCH 0/7] powerpc/book3e: support kexec and kdump Tiejun Chen
2013-06-20  7:53 ` Tiejun Chen
2013-06-20  7:53 ` [v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE Tiejun Chen
2013-06-20  7:53   ` Tiejun Chen
2013-07-02  5:00   ` Bhushan Bharat-R65777
2013-07-02  5:00     ` Bhushan Bharat-R65777
2013-07-09  7:49     ` tiejun.chen
2013-07-09  7:49       ` tiejun.chen
2013-07-03 11:52   ` Sethi Varun-B16395
2013-07-03 11:52     ` Sethi Varun-B16395
2013-07-09  7:55     ` tiejun.chen
2013-07-09  7:55       ` tiejun.chen
2013-06-20  7:53 ` [v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel Tiejun Chen
2013-06-20  7:53   ` Tiejun Chen
2013-07-02  5:17   ` Bhushan Bharat-R65777
2013-07-02  5:17     ` Bhushan Bharat-R65777
2013-07-09  7:50     ` tiejun.chen
2013-07-09  7:50       ` tiejun.chen
2013-06-20  7:53 ` [v2][PATCH 3/7] book3e/kexec/kdump: create a 1:1 TLB mapping Tiejun Chen
2013-06-20  7:53   ` Tiejun Chen
2013-06-20  7:53 ` [v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag Tiejun Chen
2013-06-20  7:53   ` Tiejun Chen
2013-07-02  5:37   ` Bhushan Bharat-R65777
2013-07-02  5:37     ` Bhushan Bharat-R65777
2013-07-09  7:51     ` tiejun.chen
2013-07-09  7:51       ` tiejun.chen
2013-06-20  7:53 ` [v2][PATCH 5/7] book3e/kexec/kdump: implement ppc64 kexec specfic Tiejun Chen
2013-06-20  7:53   ` Tiejun Chen
2013-06-20  7:53 ` [v2][PATCH 6/7] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET Tiejun Chen
2013-06-20  7:53   ` Tiejun Chen
2013-06-20  7:53 ` [v2][PATCH 7/7] book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB Tiejun Chen
2013-06-20  7:53   ` Tiejun Chen

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.