All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
@ 2015-11-20  9:13 Chenhui Zhao
  2015-11-20  9:13 ` [PATCH v3 2/6] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
                   ` (6 more replies)
  0 siblings, 7 replies; 14+ messages in thread
From: Chenhui Zhao @ 2015-11-20  9:13 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: scottwood

On e6500, in the case of cpu hotplug, either thread in one core
may be the first thread initilzing the TLB1. The subsequent threads
must not setup it again.

The code is derived from the comment of Scott Wood.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
---
 arch/powerpc/include/asm/cputhreads.h | 7 +++++++
 arch/powerpc/mm/tlb_nohash.c          | 4 +---
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index ba42e46..b56cece 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int cpu)
 	return cpu | (threads_per_core - 1);
 }
 
+static inline u32 get_tensr(void)
+{
+	if (cpu_has_feature(CPU_FTR_SMT))
+		return mfspr(SPRN_TENSR);
+	else
+		return 1;
+}
 
 
 #endif /* _ASM_POWERPC_CPUTHREADS_H */
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index bb04e4d..f466848 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -640,9 +640,7 @@ static void early_init_this_mmu(void)
 		 * transient mapping would cause problems.
 		 */
 #ifdef CONFIG_SMP
-		if (cpu != boot_cpuid &&
-		    (cpu != cpu_first_thread_sibling(cpu) ||
-		     cpu == cpu_first_thread_sibling(boot_cpuid)))
+		if (hweight32(get_tensr()) > 1)
 			map = false;
 #endif
 
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v3 2/6] powerpc/cache: add cache flush operation for various e500
  2015-11-20  9:13 [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
@ 2015-11-20  9:13 ` Chenhui Zhao
  2015-11-20  9:13 ` [PATCH v3 3/6] powerpc/rcpm: add RCPM driver Chenhui Zhao
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 14+ messages in thread
From: Chenhui Zhao @ 2015-11-20  9:13 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: scottwood

Various e500 core have different cache architecture, so they
need different cache flush operations. Therefore, add a callback
function cpu_flush_caches to the struct cpu_spec. The cache flush
operation for the specific kind of e500 is selected at init time.
The callback function will flush all caches inside the current cpu.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com>
---
Changes for v3:
* remove unnecessary ifdef

 arch/powerpc/include/asm/cacheflush.h     |   2 -
 arch/powerpc/include/asm/cputable.h       |   8 +++
 arch/powerpc/kernel/asm-offsets.c         |   1 +
 arch/powerpc/kernel/cpu_setup_fsl_booke.S | 112 ++++++++++++++++++++++++++++++
 arch/powerpc/kernel/cputable.c            |   4 ++
 arch/powerpc/kernel/head_fsl_booke.S      |  74 --------------------
 arch/powerpc/platforms/85xx/smp.c         |   5 +-
 7 files changed, 128 insertions(+), 78 deletions(-)

diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 6229e6b..47add2e 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,8 +30,6 @@ extern void flush_dcache_page(struct page *page);
 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 
-extern void __flush_disable_L1(void);
-
 extern void flush_icache_range(unsigned long, unsigned long);
 extern void flush_icache_user_range(struct vm_area_struct *vma,
 				    struct page *page, unsigned long addr,
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b118072..c25de2d 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -43,6 +43,11 @@ extern int machine_check_e500(struct pt_regs *regs);
 extern int machine_check_e200(struct pt_regs *regs);
 extern int machine_check_47x(struct pt_regs *regs);
 
+extern void cpu_down_flush_e500v2(void);
+extern void cpu_down_flush_e500mc(void);
+extern void cpu_down_flush_e5500(void);
+extern void cpu_down_flush_e6500(void);
+
 /* NOTE WELL: Update identify_cpu() if fields are added or removed! */
 struct cpu_spec {
 	/* CPU is matched via (PVR & pvr_mask) == pvr_value */
@@ -59,6 +64,9 @@ struct cpu_spec {
 	unsigned int	icache_bsize;
 	unsigned int	dcache_bsize;
 
+	/* flush caches inside the current cpu */
+	void (*cpu_down_flush)(void);
+
 	/* number of performance monitor counters */
 	unsigned int	num_pmcs;
 	enum powerpc_pmc_type pmc_type;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 221d584..188e433 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -372,6 +372,7 @@ int main(void)
 	DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
 	DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
 	DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
+	DEFINE(CPU_DOWN_FLUSH, offsetof(struct cpu_spec, cpu_down_flush));
 
 	DEFINE(pbe_address, offsetof(struct pbe, address));
 	DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index dddba3e..462aed9 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -13,11 +13,13 @@
  *
  */
 
+#include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/cputable.h>
 #include <asm/ppc_asm.h>
 #include <asm/mmu-book3e.h>
 #include <asm/asm-offsets.h>
+#include <asm/mpc85xx.h>
 
 _GLOBAL(__e500_icache_setup)
 	mfspr	r0, SPRN_L1CSR1
@@ -233,3 +235,113 @@ _GLOBAL(__setup_cpu_e5500)
 	mtlr	r5
 	blr
 #endif
+
+/* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
+_GLOBAL(flush_dcache_L1)
+	mfmsr	r10
+	wrteei	0
+
+	mfspr	r3,SPRN_L1CFG0
+	rlwinm	r5,r3,9,3	/* Extract cache block size */
+	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
+				 * are currently defined.
+				 */
+	li	r4,32
+	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
+				 *      log2(number of ways)
+				 */
+	slw	r5,r4,r5	/* r5 = cache block size */
+
+	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
+	mulli	r7,r7,13	/* An 8-way cache will require 13
+				 * loads per set.
+				 */
+	slw	r7,r7,r6
+
+	/* save off HID0 and set DCFA */
+	mfspr	r8,SPRN_HID0
+	ori	r9,r8,HID0_DCFA@l
+	mtspr	SPRN_HID0,r9
+	isync
+
+	LOAD_REG_IMMEDIATE(r6, KERNELBASE)
+	mr	r4, r6
+	mtctr	r7
+
+1:	lwz	r3,0(r4)	/* Load... */
+	add	r4,r4,r5
+	bdnz	1b
+
+	msync
+	mr	r4, r6
+	mtctr	r7
+
+1:	dcbf	0,r4		/* ...and flush. */
+	add	r4,r4,r5
+	bdnz	1b
+
+	/* restore HID0 */
+	mtspr	SPRN_HID0,r8
+	isync
+
+	wrtee r10
+
+	blr
+
+has_L2_cache:
+	/* skip L2 cache on P2040/P2040E as they have no L2 cache */
+	mfspr	r3, SPRN_SVR
+	/* shift right by 8 bits and clear E bit of SVR */
+	rlwinm	r4, r3, 24, ~0x800
+
+	lis	r3, SVR_P2040@h
+	ori	r3, r3, SVR_P2040@l
+	cmpw	r4, r3
+	beq	1f
+
+	li	r3, 1
+	blr
+1:
+	li	r3, 0
+	blr
+
+/* flush backside L2 cache */
+flush_backside_L2_cache:
+	mflr	r10
+	bl	has_L2_cache
+	mtlr	r10
+	cmpwi	r3, 0
+	beq	2f
+
+	/* Flush the L2 cache */
+	mfspr	r3, SPRN_L2CSR0
+	ori	r3, r3, L2CSR0_L2FL@l
+	msync
+	isync
+	mtspr	SPRN_L2CSR0,r3
+	isync
+
+	/* check if it is complete */
+1:	mfspr	r3,SPRN_L2CSR0
+	andi.	r3, r3, L2CSR0_L2FL@l
+	bne	1b
+2:
+	blr
+
+_GLOBAL(cpu_down_flush_e500v2)
+	mflr r0
+	bl	flush_dcache_L1
+	mtlr r0
+	blr
+
+_GLOBAL(cpu_down_flush_e500mc)
+_GLOBAL(cpu_down_flush_e5500)
+	mflr r0
+	bl	flush_dcache_L1
+	bl	flush_backside_L2_cache
+	mtlr r0
+	blr
+
+/* L1 Data Cache of e6500 contains no modified data, no flush is required */
+_GLOBAL(cpu_down_flush_e6500)
+	blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 7d80bfd..d65b45a 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2023,6 +2023,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.cpu_setup		= __setup_cpu_e500v2,
 		.machine_check		= machine_check_e500,
 		.platform		= "ppc8548",
+		.cpu_down_flush		= cpu_down_flush_e500v2,
 	},
 #else
 	{	/* e500mc */
@@ -2042,6 +2043,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.cpu_setup		= __setup_cpu_e500mc,
 		.machine_check		= machine_check_e500mc,
 		.platform		= "ppce500mc",
+		.cpu_down_flush		= cpu_down_flush_e500mc,
 	},
 #endif /* CONFIG_PPC_E500MC */
 #endif /* CONFIG_PPC32 */
@@ -2066,6 +2068,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 #endif
 		.machine_check		= machine_check_e500mc,
 		.platform		= "ppce5500",
+		.cpu_down_flush		= cpu_down_flush_e5500,
 	},
 	{	/* e6500 */
 		.pvr_mask		= 0xffff0000,
@@ -2088,6 +2091,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 #endif
 		.machine_check		= machine_check_e500mc,
 		.platform		= "ppce6500",
+		.cpu_down_flush		= cpu_down_flush_e6500,
 	},
 #endif /* CONFIG_PPC_E500MC */
 #ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index fffd1f9..709bc50 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -1075,80 +1075,6 @@ _GLOBAL(set_context)
 	isync			/* Force context change */
 	blr
 
-_GLOBAL(flush_dcache_L1)
-	mfspr	r3,SPRN_L1CFG0
-
-	rlwinm	r5,r3,9,3	/* Extract cache block size */
-	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
-				 * are currently defined.
-				 */
-	li	r4,32
-	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
-				 *      log2(number of ways)
-				 */
-	slw	r5,r4,r5	/* r5 = cache block size */
-
-	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
-	mulli	r7,r7,13	/* An 8-way cache will require 13
-				 * loads per set.
-				 */
-	slw	r7,r7,r6
-
-	/* save off HID0 and set DCFA */
-	mfspr	r8,SPRN_HID0
-	ori	r9,r8,HID0_DCFA@l
-	mtspr	SPRN_HID0,r9
-	isync
-
-	lis	r4,KERNELBASE@h
-	mtctr	r7
-
-1:	lwz	r3,0(r4)	/* Load... */
-	add	r4,r4,r5
-	bdnz	1b
-
-	msync
-	lis	r4,KERNELBASE@h
-	mtctr	r7
-
-1:	dcbf	0,r4		/* ...and flush. */
-	add	r4,r4,r5
-	bdnz	1b
-	
-	/* restore HID0 */
-	mtspr	SPRN_HID0,r8
-	isync
-
-	blr
-
-/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
-_GLOBAL(__flush_disable_L1)
-	mflr	r10
-	bl	flush_dcache_L1	/* Flush L1 d-cache */
-	mtlr	r10
-
-	mfspr	r4, SPRN_L1CSR0	/* Invalidate and disable d-cache */
-	li	r5, 2
-	rlwimi	r4, r5, 0, 3
-
-	msync
-	isync
-	mtspr	SPRN_L1CSR0, r4
-	isync
-
-1:	mfspr	r4, SPRN_L1CSR0	/* Wait for the invalidate to finish */
-	andi.	r4, r4, 2
-	bne	1b
-
-	mfspr	r4, SPRN_L1CSR1	/* Invalidate and disable i-cache */
-	li	r5, 2
-	rlwimi	r4, r5, 0, 3
-
-	mtspr	SPRN_L1CSR1, r4
-	isync
-
-	blr
-
 #ifdef CONFIG_SMP
 /* When we get here, r24 needs to hold the CPU # */
 	.globl __secondary_start
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 6b107ce..4a78416 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -139,7 +139,8 @@ static void smp_85xx_mach_cpu_die(void)
 
 	mtspr(SPRN_TCR, 0);
 
-	__flush_disable_L1();
+	cur_cpu_spec->cpu_down_flush();
+
 	tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
 	mtspr(SPRN_HID0, tmp);
 	isync();
@@ -359,7 +360,7 @@ void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
 	local_irq_disable();
 
 	if (secondary) {
-		__flush_disable_L1();
+		cur_cpu_spec->cpu_down_flush();
 		atomic_inc(&kexec_down_cpus);
 		/* loop forever */
 		while (1);
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v3 3/6] powerpc/rcpm: add RCPM driver
  2015-11-20  9:13 [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
  2015-11-20  9:13 ` [PATCH v3 2/6] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
@ 2015-11-20  9:13 ` Chenhui Zhao
  2015-11-20  9:14 ` [PATCH v3 4/6] powerpc/mpc85xx: refactor the PM operations Chenhui Zhao
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 14+ messages in thread
From: Chenhui Zhao @ 2015-11-20  9:13 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: scottwood

There is a RCPM (Run Control/Power Management) in Freescale QorIQ
series processors. The device performs tasks associated with device
run control and power management.

The driver implements some features: mask/unmask irq, enter/exit low
power states, freeze time base, etc.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@freescale.com>
---
Changes for v3
* remove CONFIG_PPC_BOOK3E in cputhreads.h
* delete Documentation/devicetree/bindings/soc/fsl/rcpm.txt, as it had sent by Dongsheng
* move data structure definitions from
* arch/powerpc/include/asm/fsl_guts.h to include/linux/fsl/guts.h
* call the asm code in qoriq_disable_thread()
* just return 0 in fsl_rcpm_init() when there is no rcpm node
* remove isync in book3e_stop_thread()
* hw_cpu passed to cpu_core_index_of_thread() is replaced by cpu
* CONFIG_PPC_BOOK3E is replaced by CONFIG_PPC64
* removed unnecessary return
* removed unnecessary casts (void *)
* added the declaration of fsl_rcpm_init()

major changes for v2:
* rcpm_v1_cpu_die() and rcpm_v2_cpu_die() will be executed by the dying cpu. this way, more stable

 arch/powerpc/include/asm/cputhreads.h |   1 +
 arch/powerpc/include/asm/fsl_pm.h     |  52 +++++
 arch/powerpc/kernel/head_64.S         |  19 ++
 arch/powerpc/platforms/85xx/Kconfig   |   1 +
 arch/powerpc/platforms/85xx/common.c  |   3 +
 arch/powerpc/sysdev/Kconfig           |   5 +
 arch/powerpc/sysdev/Makefile          |   1 +
 arch/powerpc/sysdev/fsl_rcpm.c        | 385 ++++++++++++++++++++++++++++++++++
 include/linux/fsl/guts.h              | 105 ++++++++++
 9 files changed, 572 insertions(+)
 create mode 100644 arch/powerpc/include/asm/fsl_pm.h
 create mode 100644 arch/powerpc/sysdev/fsl_rcpm.c

diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index b56cece..e5a769d 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -102,6 +102,7 @@ static inline u32 get_tensr(void)
 		return 1;
 }
 
+void book3e_stop_thread(int thread);
 
 #endif /* _ASM_POWERPC_CPUTHREADS_H */
 
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
new file mode 100644
index 0000000..d168c3d
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -0,0 +1,52 @@
+/*
+ * Support Power Management
+ *
+ * Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __PPC_FSL_PM_H
+#define __PPC_FSL_PM_H
+#ifdef __KERNEL__
+
+#define E500_PM_PH10	1
+#define E500_PM_PH15	2
+#define E500_PM_PH20	3
+#define E500_PM_PH30	4
+#define E500_PM_DOZE	E500_PM_PH10
+#define E500_PM_NAP	E500_PM_PH15
+
+#define PLAT_PM_SLEEP	20
+#define PLAT_PM_LPM20	30
+
+#define FSL_PM_SLEEP		(1 << 0)
+#define FSL_PM_DEEP_SLEEP	(1 << 1)
+
+struct fsl_pm_ops {
+	/* mask pending interrupts to the RCPM from MPIC */
+	void (*irq_mask)(int cpu);
+
+	/* unmask pending interrupts to the RCPM from MPIC */
+	void (*irq_unmask)(int cpu);
+	void (*cpu_enter_state)(int cpu, int state);
+	void (*cpu_exit_state)(int cpu, int state);
+	void (*cpu_up_prepare)(int cpu);
+	void (*cpu_die)(int cpu);
+	int (*plat_enter_sleep)(void);
+	void (*freeze_time_base)(bool freeze);
+
+	/* keep the power of IP blocks during sleep/deep sleep */
+	void (*set_ip_power)(bool enable, u32 mask);
+
+	/* get platform supported power management modes */
+	unsigned int (*get_pm_modes)(void);
+};
+
+extern const struct fsl_pm_ops *qoriq_pm_ops;
+
+int __init fsl_rcpm_init(void);
+#endif /* __KERNEL__ */
+#endif /* __PPC_FSL_PM_H */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 1b77956..6036253 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -181,6 +181,25 @@ exception_marker:
 #endif
 
 #ifdef CONFIG_PPC_BOOK3E
+/*
+ * stop a thread in the same core
+ * input parameter:
+ * r3 = the thread physical id
+ */
+_GLOBAL(book3e_stop_thread)
+	cmpi	0, r3, 0
+	beq	10f
+	cmpi	0, r3, 1
+	beq	10f
+	/* If the thread id is invalid, just exit. */
+	b	13f
+10:
+	li	r4, 1
+	sld	r4, r4, r3
+	mtspr	SPRN_TENC, r4
+13:
+	blr
+
 _GLOBAL(fsl_secondary_thread_init)
 	mfspr	r4,SPRN_BUCSR
 
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 97915fe..e626461 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -8,6 +8,7 @@ menuconfig FSL_SOC_BOOKE
 	select FSL_PCI if PCI
 	select SERIAL_8250_EXTENDED if SERIAL_8250
 	select SERIAL_8250_SHARE_IRQ if SERIAL_8250
+	select FSL_CORENET_RCPM if PPC_E500MC
 	default y
 
 if FSL_SOC_BOOKE
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 23791de..d1d736d 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -10,10 +10,13 @@
 #include <linux/of_platform.h>
 
 #include <asm/qe.h>
+#include <asm/fsl_pm.h>
 #include <sysdev/cpm2_pic.h>
 
 #include "mpc85xx.h"
 
+const struct fsl_pm_ops *qoriq_pm_ops;
+
 static const struct of_device_id mpc85xx_common_ids[] __initconst = {
 	{ .type = "soc", },
 	{ .compatible = "soc", },
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index a19332a..52dc165 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -40,3 +40,8 @@ config SCOM_DEBUGFS
 config GE_FPGA
 	bool
 	default n
+
+config FSL_CORENET_RCPM
+	bool
+	help
+	  This option enables support for RCPM (Run Control/Power Management).
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5b492a6..d0e8a43 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MMIO_NVRAM)	+= mmio_nvram.o
 obj-$(CONFIG_FSL_SOC)		+= fsl_soc.o fsl_mpic_err.o
 obj-$(CONFIG_FSL_PCI)		+= fsl_pci.o $(fsl-msi-obj-y)
 obj-$(CONFIG_FSL_PMC)		+= fsl_pmc.o
+obj-$(CONFIG_FSL_CORENET_RCPM)	+= fsl_rcpm.o
 obj-$(CONFIG_FSL_LBC)		+= fsl_lbc.o
 obj-$(CONFIG_FSL_GTM)		+= fsl_gtm.o
 obj-$(CONFIG_FSL_85XX_CACHE_SRAM)	+= fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
diff --git a/arch/powerpc/sysdev/fsl_rcpm.c b/arch/powerpc/sysdev/fsl_rcpm.c
new file mode 100644
index 0000000..656d9ca
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_rcpm.c
@@ -0,0 +1,385 @@
+/*
+ * RCPM(Run Control/Power Management) support
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * Author: Chenhui Zhao <chenhui.zhao@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <linux/fsl/guts.h>
+#include <asm/cputhreads.h>
+#include <asm/fsl_pm.h>
+
+static struct ccsr_rcpm_v1 __iomem *rcpm_v1_regs;
+static struct ccsr_rcpm_v2 __iomem *rcpm_v2_regs;
+static unsigned int fsl_supported_pm_modes;
+
+static void rcpm_v1_irq_mask(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	setbits32(&rcpm_v1_regs->cpmimr, mask);
+	setbits32(&rcpm_v1_regs->cpmcimr, mask);
+	setbits32(&rcpm_v1_regs->cpmmcmr, mask);
+	setbits32(&rcpm_v1_regs->cpmnmimr, mask);
+}
+
+static void rcpm_v2_irq_mask(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	setbits32(&rcpm_v2_regs->tpmimr0, mask);
+	setbits32(&rcpm_v2_regs->tpmcimr0, mask);
+	setbits32(&rcpm_v2_regs->tpmmcmr0, mask);
+	setbits32(&rcpm_v2_regs->tpmnmimr0, mask);
+}
+
+static void rcpm_v1_irq_unmask(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	clrbits32(&rcpm_v1_regs->cpmimr, mask);
+	clrbits32(&rcpm_v1_regs->cpmcimr, mask);
+	clrbits32(&rcpm_v1_regs->cpmmcmr, mask);
+	clrbits32(&rcpm_v1_regs->cpmnmimr, mask);
+}
+
+static void rcpm_v2_irq_unmask(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	clrbits32(&rcpm_v2_regs->tpmimr0, mask);
+	clrbits32(&rcpm_v2_regs->tpmcimr0, mask);
+	clrbits32(&rcpm_v2_regs->tpmmcmr0, mask);
+	clrbits32(&rcpm_v2_regs->tpmnmimr0, mask);
+}
+
+static void rcpm_v1_set_ip_power(bool enable, u32 mask)
+{
+	if (enable)
+		setbits32(&rcpm_v1_regs->ippdexpcr, mask);
+	else
+		clrbits32(&rcpm_v1_regs->ippdexpcr, mask);
+}
+
+static void rcpm_v2_set_ip_power(bool enable, u32 mask)
+{
+	if (enable)
+		setbits32(&rcpm_v2_regs->ippdexpcr[0], mask);
+	else
+		clrbits32(&rcpm_v2_regs->ippdexpcr[0], mask);
+}
+
+static void rcpm_v1_cpu_enter_state(int cpu, int state)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	switch (state) {
+	case E500_PM_PH10:
+		setbits32(&rcpm_v1_regs->cdozcr, mask);
+		break;
+	case E500_PM_PH15:
+		setbits32(&rcpm_v1_regs->cnapcr, mask);
+		break;
+	default:
+		pr_warn("Unknown cpu PM state (%d)\n", state);
+		break;
+	}
+}
+
+static void rcpm_v2_cpu_enter_state(int cpu, int state)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	u32 mask = 1 << cpu_core_index_of_thread(cpu);
+
+	switch (state) {
+	case E500_PM_PH10:
+		/* one bit corresponds to one thread for PH10 of 6500 */
+		setbits32(&rcpm_v2_regs->tph10setr0, 1 << hw_cpu);
+		break;
+	case E500_PM_PH15:
+		setbits32(&rcpm_v2_regs->pcph15setr, mask);
+		break;
+	case E500_PM_PH20:
+		setbits32(&rcpm_v2_regs->pcph20setr, mask);
+		break;
+	case E500_PM_PH30:
+		setbits32(&rcpm_v2_regs->pcph30setr, mask);
+		break;
+	default:
+		pr_warn("Unknown cpu PM state (%d)\n", state);
+	}
+}
+
+static void rcpm_v1_cpu_die(int cpu)
+{
+	rcpm_v1_cpu_enter_state(cpu, E500_PM_PH15);
+}
+
+#ifdef CONFIG_PPC64
+static void qoriq_disable_thread(int cpu)
+{
+	int thread = cpu_thread_in_core(cpu);
+
+	book3e_stop_thread(thread);
+}
+#endif
+
+static void rcpm_v2_cpu_die(int cpu)
+{
+#ifdef CONFIG_PPC64
+	int primary;
+
+	if (threads_per_core == 2) {
+		primary = cpu_first_thread_sibling(cpu);
+		if (cpu_is_offline(primary) && cpu_is_offline(primary + 1)) {
+			/* if both threads are offline, put the cpu in PH20 */
+			rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
+		} else {
+			/* if only one thread is offline, disable the thread */
+			qoriq_disable_thread(cpu);
+		}
+	}
+#endif
+
+	if (threads_per_core == 1)
+		rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
+}
+
+static void rcpm_v1_cpu_exit_state(int cpu, int state)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	switch (state) {
+	case E500_PM_PH10:
+		clrbits32(&rcpm_v1_regs->cdozcr, mask);
+		break;
+	case E500_PM_PH15:
+		clrbits32(&rcpm_v1_regs->cnapcr, mask);
+		break;
+	default:
+		pr_warn("Unknown cpu PM state (%d)\n", state);
+		break;
+	}
+}
+
+static void rcpm_v1_cpu_up_prepare(int cpu)
+{
+	rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
+	rcpm_v1_irq_unmask(cpu);
+}
+
+static void rcpm_v2_cpu_exit_state(int cpu, int state)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	u32 mask = 1 << cpu_core_index_of_thread(cpu);
+
+	switch (state) {
+	case E500_PM_PH10:
+		setbits32(&rcpm_v2_regs->tph10clrr0, 1 << hw_cpu);
+		break;
+	case E500_PM_PH15:
+		setbits32(&rcpm_v2_regs->pcph15clrr, mask);
+		break;
+	case E500_PM_PH20:
+		setbits32(&rcpm_v2_regs->pcph20clrr, mask);
+		break;
+	case E500_PM_PH30:
+		setbits32(&rcpm_v2_regs->pcph30clrr, mask);
+		break;
+	default:
+		pr_warn("Unknown cpu PM state (%d)\n", state);
+	}
+}
+
+static void rcpm_v2_cpu_up_prepare(int cpu)
+{
+	rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20);
+	rcpm_v2_irq_unmask(cpu);
+}
+
+static int rcpm_v1_plat_enter_state(int state)
+{
+	u32 *pmcsr_reg = &rcpm_v1_regs->powmgtcsr;
+	int ret = 0;
+	int result;
+
+	switch (state) {
+	case PLAT_PM_SLEEP:
+		setbits32(pmcsr_reg, RCPM_POWMGTCSR_SLP);
+
+		/* Upon resume, wait for RCPM_POWMGTCSR_SLP bit to be clear. */
+		result = spin_event_timeout(
+		  !(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_SLP), 10000, 10);
+		if (!result) {
+			pr_err("timeout waiting for SLP bit to be cleared\n");
+			ret = -ETIMEDOUT;
+		}
+		break;
+	default:
+		pr_warn("Unknown platform PM state (%d)", state);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int rcpm_v2_plat_enter_state(int state)
+{
+	u32 *pmcsr_reg = &rcpm_v2_regs->powmgtcsr;
+	int ret = 0;
+	int result;
+
+	switch (state) {
+	case PLAT_PM_LPM20:
+		/* clear previous LPM20 status */
+		setbits32(pmcsr_reg, RCPM_POWMGTCSR_P_LPM20_ST);
+		/* enter LPM20 status */
+		setbits32(pmcsr_reg, RCPM_POWMGTCSR_LPM20_RQ);
+
+		/* At this point, the device is in LPM20 status. */
+
+		/* resume ... */
+		result = spin_event_timeout(
+		  !(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_LPM20_ST), 10000, 10);
+		if (!result) {
+			pr_err("timeout waiting for LPM20 bit to be cleared\n");
+			ret = -ETIMEDOUT;
+		}
+		break;
+	default:
+		pr_warn("Unknown platform PM state (%d)\n", state);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int rcpm_v1_plat_enter_sleep(void)
+{
+	return rcpm_v1_plat_enter_state(PLAT_PM_SLEEP);
+}
+
+static int rcpm_v2_plat_enter_sleep(void)
+{
+	return rcpm_v2_plat_enter_state(PLAT_PM_LPM20);
+}
+
+static void rcpm_common_freeze_time_base(u32 *tben_reg, int freeze)
+{
+	static u32 mask;
+
+	if (freeze) {
+		mask = in_be32(tben_reg);
+		clrbits32(tben_reg, mask);
+	} else {
+		setbits32(tben_reg, mask);
+	}
+
+	/* read back to push the previous write */
+	in_be32(tben_reg);
+}
+
+static void rcpm_v1_freeze_time_base(bool freeze)
+{
+	rcpm_common_freeze_time_base(&rcpm_v1_regs->ctbenr, freeze);
+}
+
+static void rcpm_v2_freeze_time_base(bool freeze)
+{
+	rcpm_common_freeze_time_base(&rcpm_v2_regs->pctbenr, freeze);
+}
+
+static unsigned int rcpm_get_pm_modes(void)
+{
+	return fsl_supported_pm_modes;
+}
+
+static const struct fsl_pm_ops qoriq_rcpm_v1_ops = {
+	.irq_mask = rcpm_v1_irq_mask,
+	.irq_unmask = rcpm_v1_irq_unmask,
+	.cpu_enter_state = rcpm_v1_cpu_enter_state,
+	.cpu_exit_state = rcpm_v1_cpu_exit_state,
+	.cpu_up_prepare = rcpm_v1_cpu_up_prepare,
+	.cpu_die = rcpm_v1_cpu_die,
+	.plat_enter_sleep = rcpm_v1_plat_enter_sleep,
+	.set_ip_power = rcpm_v1_set_ip_power,
+	.freeze_time_base = rcpm_v1_freeze_time_base,
+	.get_pm_modes = rcpm_get_pm_modes,
+};
+
+static const struct fsl_pm_ops qoriq_rcpm_v2_ops = {
+	.irq_mask = rcpm_v2_irq_mask,
+	.irq_unmask = rcpm_v2_irq_unmask,
+	.cpu_enter_state = rcpm_v2_cpu_enter_state,
+	.cpu_exit_state = rcpm_v2_cpu_exit_state,
+	.cpu_up_prepare = rcpm_v2_cpu_up_prepare,
+	.cpu_die = rcpm_v2_cpu_die,
+	.plat_enter_sleep = rcpm_v2_plat_enter_sleep,
+	.set_ip_power = rcpm_v2_set_ip_power,
+	.freeze_time_base = rcpm_v2_freeze_time_base,
+	.get_pm_modes = rcpm_get_pm_modes,
+};
+
+static const struct of_device_id rcpm_matches[] = {
+	{
+		.compatible = "fsl,qoriq-rcpm-1.0",
+		.data = &qoriq_rcpm_v1_ops,
+	},
+	{
+		.compatible = "fsl,qoriq-rcpm-2.0",
+		.data = &qoriq_rcpm_v2_ops,
+	},
+	{
+		.compatible = "fsl,qoriq-rcpm-2.1",
+		.data = &qoriq_rcpm_v2_ops,
+	},
+	{},
+};
+
+int __init fsl_rcpm_init(void)
+{
+	struct device_node *np;
+	const struct of_device_id *match;
+	void __iomem *base;
+
+	np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
+	if (!np)
+		return 0;
+
+	base = of_iomap(np, 0);
+	of_node_put(np);
+	if (!base) {
+		pr_err("of_iomap() error.\n");
+		return -ENOMEM;
+	}
+
+	rcpm_v1_regs = base;
+	rcpm_v2_regs = base;
+
+	/* support sleep by default */
+	fsl_supported_pm_modes = FSL_PM_SLEEP;
+
+	qoriq_pm_ops = match->data;
+
+	return 0;
+}
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 84d971f..649e917 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -189,4 +189,109 @@ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
 
 #endif
 
+struct ccsr_rcpm_v1 {
+	u8	res0000[4];
+	__be32	cdozsr;	    /* 0x0004 Core Doze Status Register */
+	u8	res0008[4];
+	__be32	cdozcr;	    /* 0x000c Core Doze Control Register */
+	u8	res0010[4];
+	__be32	cnapsr;	    /* 0x0014 Core Nap Status Register */
+	u8	res0018[4];
+	__be32	cnapcr;	    /* 0x001c Core Nap Control Register */
+	u8	res0020[4];
+	__be32	cdozpsr;    /* 0x0024 Core Doze Previous Status Register */
+	u8	res0028[4];
+	__be32	cnappsr;    /* 0x002c Core Nap Previous Status Register */
+	u8	res0030[4];
+	__be32	cwaitsr;    /* 0x0034 Core Wait Status Register */
+	u8	res0038[4];
+	__be32	cwdtdsr;    /* 0x003c Core Watchdog Detect Status Register */
+	__be32	powmgtcsr;  /* 0x0040 PM Control&Status Register */
+#define RCPM_POWMGTCSR_SLP	0x00020000
+	u8	res0044[12];
+	__be32	ippdexpcr;  /* 0x0050 IP Powerdown Exception Control Register */
+	u8	res0054[16];
+	__be32	cpmimr;	    /* 0x0064 Core PM IRQ Mask Register */
+	u8	res0068[4];
+	__be32	cpmcimr;    /* 0x006c Core PM Critical IRQ Mask Register */
+	u8	res0070[4];
+	__be32	cpmmcmr;    /* 0x0074 Core PM Machine Check Mask Register */
+	u8	res0078[4];
+	__be32	cpmnmimr;   /* 0x007c Core PM NMI Mask Register */
+	u8	res0080[4];
+	__be32	ctbenr;	    /* 0x0084 Core Time Base Enable Register */
+	u8	res0088[4];
+	__be32	ctbckselr;  /* 0x008c Core Time Base Clock Select Register */
+	u8	res0090[4];
+	__be32	ctbhltcr;   /* 0x0094 Core Time Base Halt Control Register */
+	u8	res0098[4];
+	__be32	cmcpmaskcr; /* 0x00a4 Core Machine Check Mask Register */
+};
+
+struct ccsr_rcpm_v2 {
+	u8	res_00[12];
+	__be32	tph10sr0;	/* Thread PH10 Status Register */
+	u8	res_10[12];
+	__be32	tph10setr0;	/* Thread PH10 Set Control Register */
+	u8	res_20[12];
+	__be32	tph10clrr0;	/* Thread PH10 Clear Control Register */
+	u8	res_30[12];
+	__be32	tph10psr0;	/* Thread PH10 Previous Status Register */
+	u8	res_40[12];
+	__be32	twaitsr0;	/* Thread Wait Status Register */
+	u8	res_50[96];
+	__be32	pcph15sr;	/* Physical Core PH15 Status Register */
+	__be32	pcph15setr;	/* Physical Core PH15 Set Control Register */
+	__be32	pcph15clrr;	/* Physical Core PH15 Clear Control Register */
+	__be32	pcph15psr;	/* Physical Core PH15 Prev Status Register */
+	u8	res_c0[16];
+	__be32	pcph20sr;	/* Physical Core PH20 Status Register */
+	__be32	pcph20setr;	/* Physical Core PH20 Set Control Register */
+	__be32	pcph20clrr;	/* Physical Core PH20 Clear Control Register */
+	__be32	pcph20psr;	/* Physical Core PH20 Prev Status Register */
+	__be32	pcpw20sr;	/* Physical Core PW20 Status Register */
+	u8	res_e0[12];
+	__be32	pcph30sr;	/* Physical Core PH30 Status Register */
+	__be32	pcph30setr;	/* Physical Core PH30 Set Control Register */
+	__be32	pcph30clrr;	/* Physical Core PH30 Clear Control Register */
+	__be32	pcph30psr;	/* Physical Core PH30 Prev Status Register */
+	u8	res_100[32];
+	__be32	ippwrgatecr;	/* IP Power Gating Control Register */
+	u8	res_124[12];
+	__be32	powmgtcsr;	/* Power Management Control & Status Reg */
+#define RCPM_POWMGTCSR_LPM20_RQ		0x00100000
+#define RCPM_POWMGTCSR_LPM20_ST		0x00000200
+#define RCPM_POWMGTCSR_P_LPM20_ST	0x00000100
+	u8	res_134[12];
+	__be32	ippdexpcr[4];	/* IP Powerdown Exception Control Reg */
+	u8	res_150[12];
+	__be32	tpmimr0;	/* Thread PM Interrupt Mask Reg */
+	u8	res_160[12];
+	__be32	tpmcimr0;	/* Thread PM Crit Interrupt Mask Reg */
+	u8	res_170[12];
+	__be32	tpmmcmr0;	/* Thread PM Machine Check Interrupt Mask Reg */
+	u8	res_180[12];
+	__be32	tpmnmimr0;	/* Thread PM NMI Mask Reg */
+	u8	res_190[12];
+	__be32	tmcpmaskcr0;	/* Thread Machine Check Mask Control Reg */
+	__be32	pctbenr;	/* Physical Core Time Base Enable Reg */
+	__be32	pctbclkselr;	/* Physical Core Time Base Clock Select */
+	__be32	tbclkdivr;	/* Time Base Clock Divider Register */
+	u8	res_1ac[4];
+	__be32	ttbhltcr[4];	/* Thread Time Base Halt Control Register */
+	__be32	clpcl10sr;	/* Cluster PCL10 Status Register */
+	__be32	clpcl10setr;	/* Cluster PCL30 Set Control Register */
+	__be32	clpcl10clrr;	/* Cluster PCL30 Clear Control Register */
+	__be32	clpcl10psr;	/* Cluster PCL30 Prev Status Register */
+	__be32	cddslpsetr;	/* Core Domain Deep Sleep Set Register */
+	__be32	cddslpclrr;	/* Core Domain Deep Sleep Clear Register */
+	__be32	cdpwroksetr;	/* Core Domain Power OK Set Register */
+	__be32	cdpwrokclrr;	/* Core Domain Power OK Clear Register */
+	__be32	cdpwrensr;	/* Core Domain Power Enable Status Register */
+	__be32	cddslsr;	/* Core Domain Deep Sleep Status Register */
+	u8	res_1e8[8];
+	__be32	dslpcntcr[8];	/* Deep Sleep Counter Cfg Register */
+	u8	res_300[3568];
+};
+
 #endif
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v3 4/6] powerpc/mpc85xx: refactor the PM operations
  2015-11-20  9:13 [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
  2015-11-20  9:13 ` [PATCH v3 2/6] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
  2015-11-20  9:13 ` [PATCH v3 3/6] powerpc/rcpm: add RCPM driver Chenhui Zhao
@ 2015-11-20  9:14 ` Chenhui Zhao
  2015-11-20  9:14 ` [PATCH v3 5/6] powerpc/mpc85xx: Add hotplug support on E5500 and E500MC cores Chenhui Zhao
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 14+ messages in thread
From: Chenhui Zhao @ 2015-11-20  9:14 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: scottwood

Freescale CoreNet-based and Non-CoreNet-based platforms require
different PM operations. This patch extracted existing PM operations
on Non-CoreNet-based platforms to a new file which can accommodate
both platforms. In this way, PM operation codes are clearer structurally.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com>
---
 arch/powerpc/platforms/85xx/Makefile         |   1 +
 arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c | 106 +++++++++++++++++++++++++++
 arch/powerpc/platforms/85xx/smp.c            |  73 +++++-------------
 arch/powerpc/platforms/85xx/smp.h            |   1 +
 4 files changed, 127 insertions(+), 54 deletions(-)
 create mode 100644 arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c

diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 1fe7fb9..7bc86da 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -2,6 +2,7 @@
 # Makefile for the PowerPC 85xx linux kernel.
 #
 obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_FSL_PMC)		  += mpc85xx_pm_ops.o
 
 obj-y += common.o
 
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
new file mode 100644
index 0000000..f05325f
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
@@ -0,0 +1,106 @@
+/*
+ * MPC85xx PM operators
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/fsl/guts.h>
+
+#include <asm/io.h>
+#include <asm/fsl_pm.h>
+
+static struct ccsr_guts __iomem *guts;
+
+static void mpc85xx_irq_mask(int cpu)
+{
+
+}
+
+static void mpc85xx_irq_unmask(int cpu)
+{
+
+}
+
+static void mpc85xx_cpu_die(int cpu)
+{
+	u32 tmp;
+
+	tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
+	mtspr(SPRN_HID0, tmp);
+
+	/* Enter NAP mode. */
+	tmp = mfmsr();
+	tmp |= MSR_WE;
+	asm volatile(
+		"msync\n"
+		"mtmsr %0\n"
+		"isync\n"
+		:
+		: "r" (tmp));
+}
+
+static void mpc85xx_cpu_up_prepare(int cpu)
+{
+
+}
+
+static void mpc85xx_freeze_time_base(bool freeze)
+{
+	uint32_t mask;
+
+	mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
+	if (freeze)
+		setbits32(&guts->devdisr, mask);
+	else
+		clrbits32(&guts->devdisr, mask);
+
+	in_be32(&guts->devdisr);
+}
+
+static const struct of_device_id mpc85xx_smp_guts_ids[] = {
+	{ .compatible = "fsl,mpc8572-guts", },
+	{ .compatible = "fsl,p1020-guts", },
+	{ .compatible = "fsl,p1021-guts", },
+	{ .compatible = "fsl,p1022-guts", },
+	{ .compatible = "fsl,p1023-guts", },
+	{ .compatible = "fsl,p2020-guts", },
+	{ .compatible = "fsl,bsc9132-guts", },
+	{},
+};
+
+static const struct fsl_pm_ops mpc85xx_pm_ops = {
+	.freeze_time_base = mpc85xx_freeze_time_base,
+	.irq_mask = mpc85xx_irq_mask,
+	.irq_unmask = mpc85xx_irq_unmask,
+	.cpu_die = mpc85xx_cpu_die,
+	.cpu_up_prepare = mpc85xx_cpu_up_prepare,
+};
+
+int __init mpc85xx_setup_pmc(void)
+{
+	struct device_node *np;
+
+	np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
+	if (np) {
+		guts = of_iomap(np, 0);
+		of_node_put(np);
+		if (!guts) {
+			pr_err("Could not map guts node address\n");
+			return -ENOMEM;
+		}
+	}
+
+	qoriq_pm_ops = &mpc85xx_pm_ops;
+
+	return 0;
+}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 4a78416..ab0459d 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
  * Author: Andy Fleming <afleming@freescale.com>
  * 	   Kumar Gala <galak@kernel.crashing.org>
  *
- * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
+ * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/of.h>
-#include <linux/of_address.h>
 #include <linux/kexec.h>
 #include <linux/highmem.h>
 #include <linux/cpu.h>
@@ -29,6 +28,7 @@
 #include <asm/dbell.h>
 #include <asm/code-patching.h>
 #include <asm/cputhreads.h>
+#include <asm/fsl_pm.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/mpic.h>
@@ -43,24 +43,11 @@ struct epapr_spin_table {
 	u32	pir;
 };
 
-static struct ccsr_guts __iomem *guts;
+#ifdef CONFIG_HOTPLUG_CPU
 static u64 timebase;
 static int tb_req;
 static int tb_valid;
 
-static void mpc85xx_timebase_freeze(int freeze)
-{
-	uint32_t mask;
-
-	mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
-	if (freeze)
-		setbits32(&guts->devdisr, mask);
-	else
-		clrbits32(&guts->devdisr, mask);
-
-	in_be32(&guts->devdisr);
-}
-
 static void mpc85xx_give_timebase(void)
 {
 	unsigned long flags;
@@ -71,7 +58,7 @@ static void mpc85xx_give_timebase(void)
 		barrier();
 	tb_req = 0;
 
-	mpc85xx_timebase_freeze(1);
+	qoriq_pm_ops->freeze_time_base(true);
 #ifdef CONFIG_PPC64
 	/*
 	 * e5500/e6500 have a workaround for erratum A-006958 in place
@@ -104,7 +91,7 @@ static void mpc85xx_give_timebase(void)
 	while (tb_valid)
 		barrier();
 
-	mpc85xx_timebase_freeze(0);
+	qoriq_pm_ops->freeze_time_base(false);
 
 	local_irq_restore(flags);
 }
@@ -126,31 +113,25 @@ static void mpc85xx_take_timebase(void)
 	local_irq_restore(flags);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void smp_85xx_mach_cpu_die(void)
 {
 	unsigned int cpu = smp_processor_id();
-	u32 tmp;
 
 	local_irq_disable();
+	hard_irq_disable();
+	/* mask all irqs to prevent cpu wakeup */
+	qoriq_pm_ops->irq_mask(cpu);
+
 	idle_task_exit();
-	generic_set_cpu_dead(cpu);
-	mb();
 
 	mtspr(SPRN_TCR, 0);
+	mtspr(SPRN_TSR, mfspr(SPRN_TSR));
 
-	cur_cpu_spec->cpu_down_flush();
+	generic_set_cpu_dead(cpu);
 
-	tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
-	mtspr(SPRN_HID0, tmp);
-	isync();
+	cur_cpu_spec->cpu_down_flush();
 
-	/* Enter NAP mode. */
-	tmp = mfmsr();
-	tmp |= MSR_WE;
-	mb();
-	mtmsr(tmp);
-	isync();
+	qoriq_pm_ops->cpu_die(cpu);
 
 	while (1)
 		;
@@ -468,16 +449,6 @@ static void smp_85xx_setup_cpu(int cpu_nr)
 	smp_85xx_basic_setup(cpu_nr);
 }
 
-static const struct of_device_id mpc85xx_smp_guts_ids[] = {
-	{ .compatible = "fsl,mpc8572-guts", },
-	{ .compatible = "fsl,p1020-guts", },
-	{ .compatible = "fsl,p1021-guts", },
-	{ .compatible = "fsl,p1022-guts", },
-	{ .compatible = "fsl,p1023-guts", },
-	{ .compatible = "fsl,p2020-guts", },
-	{},
-};
-
 void __init mpc85xx_smp_init(void)
 {
 	struct device_node *np;
@@ -501,22 +472,16 @@ void __init mpc85xx_smp_init(void)
 		smp_85xx_ops.probe = NULL;
 	}
 
-	np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
-	if (np) {
-		guts = of_iomap(np, 0);
-		of_node_put(np);
-		if (!guts) {
-			pr_err("%s: Could not map guts node address\n",
-								__func__);
-			return;
-		}
+#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_FSL_PMC
+	mpc85xx_setup_pmc();
+#endif
+	if (qoriq_pm_ops) {
 		smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
 		smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
-#ifdef CONFIG_HOTPLUG_CPU
 		ppc_md.cpu_die = smp_85xx_mach_cpu_die;
-#endif
 	}
-
+#endif
 	smp_ops = &smp_85xx_ops;
 
 #ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/85xx/smp.h b/arch/powerpc/platforms/85xx/smp.h
index e2b4493..0b20ae3 100644
--- a/arch/powerpc/platforms/85xx/smp.h
+++ b/arch/powerpc/platforms/85xx/smp.h
@@ -5,6 +5,7 @@
 
 #ifdef CONFIG_SMP
 void __init mpc85xx_smp_init(void);
+int __init mpc85xx_setup_pmc(void);
 #else
 static inline void mpc85xx_smp_init(void)
 {
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v3 5/6] powerpc/mpc85xx: Add hotplug support on E5500 and E500MC cores
  2015-11-20  9:13 [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
                   ` (2 preceding siblings ...)
  2015-11-20  9:14 ` [PATCH v3 4/6] powerpc/mpc85xx: refactor the PM operations Chenhui Zhao
@ 2015-11-20  9:14 ` Chenhui Zhao
  2015-11-20  9:14 ` [PATCH v3 6/6] powerpc/mpc85xx: Add CPU hotplug support for E6500 Chenhui Zhao
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 14+ messages in thread
From: Chenhui Zhao @ 2015-11-20  9:14 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: scottwood

Freescale E500MC and E5500 core-based platforms, like P4080, T1040,
support disabling/enabling CPU dynamically.
This patch adds this feature on those platforms.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com>
---

changes for v3
* move changes in arch/powerpc/platforms/85xx/smp.h to patch 2
* get rid of unused primary_hw
* check_cpu_dead() renamed to is_cpu_dead()
* move the declaration of fsl_rcpm_init() to patch 2/5

major changes for v2:
* factor out smp_85xx_start_cpu()
* move fsl_rcpm_init() into mpc85xx_smp_init() due to the init sequence
* add hard_irq_disable() after local_irq_save(). for platforms that
  implement lazy enabling/disabling of interrupts, call
  hard_irq_disable() to ensure interrupts are disabled physically.

 arch/powerpc/Kconfig              |   2 +-
 arch/powerpc/include/asm/smp.h    |   3 +
 arch/powerpc/kernel/smp.c         |   7 +-
 arch/powerpc/platforms/85xx/smp.c | 198 +++++++++++++++++++++-----------------
 4 files changed, 120 insertions(+), 90 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index db49e0d..1093143 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -391,7 +391,7 @@ config SWIOTLB
 config HOTPLUG_CPU
 	bool "Support for enabling/disabling CPUs"
 	depends on SMP && (PPC_PSERIES || \
-	PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
+	PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
 	---help---
 	  Say Y here to be able to disable and re-enable individual
 	  CPUs at runtime on SMP machines.
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 825663c..bdb8111 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -67,6 +67,9 @@ void generic_cpu_die(unsigned int cpu);
 void generic_set_cpu_dead(unsigned int cpu);
 void generic_set_cpu_up(unsigned int cpu);
 int generic_check_cpu_restart(unsigned int cpu);
+int is_cpu_dead(unsigned int cpu);
+#else
+#define generic_set_cpu_up(i)	do { } while (0)
 #endif
 
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ec9ec20..8575d04 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -427,7 +427,7 @@ void generic_cpu_die(unsigned int cpu)
 
 	for (i = 0; i < 100; i++) {
 		smp_rmb();
-		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
+		if (is_cpu_dead(cpu))
 			return;
 		msleep(100);
 	}
@@ -454,6 +454,11 @@ int generic_check_cpu_restart(unsigned int cpu)
 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 }
 
+int is_cpu_dead(unsigned int cpu)
+{
+	return per_cpu(cpu_state, cpu) == CPU_DEAD;
+}
+
 static bool secondaries_inhibited(void)
 {
 	return kvm_hv_mode_active();
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index ab0459d..0f56dd5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -10,6 +10,8 @@
  * option) any later version.
  */
 
+#define pr_fmt(fmt) "smp: %s: " fmt, __func__
+
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -53,6 +55,7 @@ static void mpc85xx_give_timebase(void)
 	unsigned long flags;
 
 	local_irq_save(flags);
+	hard_irq_disable();
 
 	while (!tb_req)
 		barrier();
@@ -101,6 +104,7 @@ static void mpc85xx_take_timebase(void)
 	unsigned long flags;
 
 	local_irq_save(flags);
+	hard_irq_disable();
 
 	tb_req = 1;
 	while (!tb_valid)
@@ -136,8 +140,31 @@ static void smp_85xx_mach_cpu_die(void)
 	while (1)
 		;
 }
+
+static void qoriq_cpu_kill(unsigned int cpu)
+{
+	int i;
+
+	for (i = 0; i < 500; i++) {
+		if (is_cpu_dead(cpu)) {
+#ifdef CONFIG_PPC64
+			paca[cpu].cpu_start = 0;
+#endif
+			return;
+		}
+		msleep(20);
+	}
+	pr_err("CPU%d didn't die...\n", cpu);
+}
 #endif
 
+/*
+ * To keep it compatible with old boot program which uses
+ * cache-inhibit spin table, we need to flush the cache
+ * before accessing spin table to invalidate any staled data.
+ * We also need to flush the cache after writing to spin
+ * table to push data out.
+ */
 static inline void flush_spin_table(void *spin_table)
 {
 	flush_dcache_range((ulong)spin_table,
@@ -176,57 +203,20 @@ static void wake_hw_thread(void *info)
 }
 #endif
 
-static int smp_85xx_kick_cpu(int nr)
+static int smp_85xx_start_cpu(int cpu)
 {
-	unsigned long flags;
-	const u64 *cpu_rel_addr;
-	__iomem struct epapr_spin_table *spin_table;
+	int ret = 0;
 	struct device_node *np;
-	int hw_cpu = get_hard_smp_processor_id(nr);
+	const u64 *cpu_rel_addr;
+	unsigned long flags;
 	int ioremappable;
-	int ret = 0;
-
-	WARN_ON(nr < 0 || nr >= NR_CPUS);
-	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
-
-	pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
-
-#ifdef CONFIG_PPC64
-	/* Threads don't use the spin table */
-	if (cpu_thread_in_core(nr) != 0) {
-		int primary = cpu_first_thread_sibling(nr);
-
-		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
-			return -ENOENT;
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	struct epapr_spin_table __iomem *spin_table;
 
-		if (cpu_thread_in_core(nr) != 1) {
-			pr_err("%s: cpu %d: invalid hw thread %d\n",
-			       __func__, nr, cpu_thread_in_core(nr));
-			return -ENOENT;
-		}
-
-		if (!cpu_online(primary)) {
-			pr_err("%s: cpu %d: primary %d not online\n",
-			       __func__, nr, primary);
-			return -ENOENT;
-		}
-
-		smp_call_function_single(primary, wake_hw_thread, &nr, 0);
-		return 0;
-	} else if (cpu_thread_in_core(boot_cpuid) != 0 &&
-		   cpu_first_thread_sibling(boot_cpuid) == nr) {
-		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
-			return -ENOENT;
-
-		smp_call_function_single(boot_cpuid, wake_hw_thread, &nr, 0);
-	}
-#endif
-
-	np = of_get_cpu_node(nr, NULL);
+	np = of_get_cpu_node(cpu, NULL);
 	cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
-
-	if (cpu_rel_addr == NULL) {
-		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
+	if (!cpu_rel_addr) {
+		pr_err("No cpu-release-addr for cpu %d\n", cpu);
 		return -ENOENT;
 	}
 
@@ -246,28 +236,18 @@ static int smp_85xx_kick_cpu(int nr)
 		spin_table = phys_to_virt(*cpu_rel_addr);
 
 	local_irq_save(flags);
-#ifdef CONFIG_PPC32
-#ifdef CONFIG_HOTPLUG_CPU
-	/* Corresponding to generic_set_cpu_dead() */
-	generic_set_cpu_up(nr);
+	hard_irq_disable();
 
-	if (system_state == SYSTEM_RUNNING) {
-		/*
-		 * To keep it compatible with old boot program which uses
-		 * cache-inhibit spin table, we need to flush the cache
-		 * before accessing spin table to invalidate any staled data.
-		 * We also need to flush the cache after writing to spin
-		 * table to push data out.
-		 */
-		flush_spin_table(spin_table);
-		out_be32(&spin_table->addr_l, 0);
-		flush_spin_table(spin_table);
+	if (qoriq_pm_ops)
+		qoriq_pm_ops->cpu_up_prepare(cpu);
 
+	/* if cpu is not spinning, reset it */
+	if (read_spin_table_addr_l(spin_table) != 1) {
 		/*
 		 * We don't set the BPTR register here since it already points
 		 * to the boot page properly.
 		 */
-		mpic_reset_core(nr);
+		mpic_reset_core(cpu);
 
 		/*
 		 * wait until core is ready...
@@ -277,40 +257,23 @@ static int smp_85xx_kick_cpu(int nr)
 		if (!spin_event_timeout(
 				read_spin_table_addr_l(spin_table) == 1,
 				10000, 100)) {
-			pr_err("%s: timeout waiting for core %d to reset\n",
-							__func__, hw_cpu);
-			ret = -ENOENT;
-			goto out;
+			pr_err("timeout waiting for cpu %d to reset\n",
+				hw_cpu);
+			ret = -EAGAIN;
+			goto err;
 		}
-
-		/*  clear the acknowledge status */
-		__secondary_hold_acknowledge = -1;
 	}
-#endif
-	flush_spin_table(spin_table);
-	out_be32(&spin_table->pir, hw_cpu);
-	out_be32(&spin_table->addr_l, __pa(__early_start));
-	flush_spin_table(spin_table);
-
-	/* Wait a bit for the CPU to ack. */
-	if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
-					10000, 100)) {
-		pr_err("%s: timeout waiting for core %d to ack\n",
-						__func__, hw_cpu);
-		ret = -ENOENT;
-		goto out;
-	}
-out:
-#else
-	smp_generic_kick_cpu(nr);
 
 	flush_spin_table(spin_table);
 	out_be32(&spin_table->pir, hw_cpu);
+#ifdef CONFIG_PPC64
 	out_be64((u64 *)(&spin_table->addr_h),
 		__pa(ppc_function_entry(generic_secondary_smp_init)));
-	flush_spin_table(spin_table);
+#else
+	out_be32(&spin_table->addr_l, __pa(__early_start));
 #endif
-
+	flush_spin_table(spin_table);
+err:
 	local_irq_restore(flags);
 
 	if (ioremappable)
@@ -319,6 +282,60 @@ out:
 	return ret;
 }
 
+static int smp_85xx_kick_cpu(int nr)
+{
+	int ret = 0;
+#ifdef CONFIG_PPC64
+	int primary = nr;
+#endif
+
+	WARN_ON(nr < 0 || nr >= num_possible_cpus());
+
+	pr_debug("kick CPU #%d\n", nr);
+
+#ifdef CONFIG_PPC64
+	/* Threads don't use the spin table */
+	if (cpu_thread_in_core(nr) != 0) {
+		int primary = cpu_first_thread_sibling(nr);
+
+		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
+			return -ENOENT;
+
+		if (cpu_thread_in_core(nr) != 1) {
+			pr_err("%s: cpu %d: invalid hw thread %d\n",
+			       __func__, nr, cpu_thread_in_core(nr));
+			return -ENOENT;
+		}
+
+		if (!cpu_online(primary)) {
+			pr_err("%s: cpu %d: primary %d not online\n",
+			       __func__, nr, primary);
+			return -ENOENT;
+		}
+
+		smp_call_function_single(primary, wake_hw_thread, &nr, 0);
+		return 0;
+	}
+
+	ret = smp_85xx_start_cpu(primary);
+	if (ret)
+		return ret;
+
+	paca[nr].cpu_start = 1;
+	generic_set_cpu_up(nr);
+
+	return ret;
+#else
+	ret = smp_85xx_start_cpu(nr);
+	if (ret)
+		return ret;
+
+	generic_set_cpu_up(nr);
+
+	return ret;
+#endif
+}
+
 struct smp_ops_t smp_85xx_ops = {
 	.kick_cpu = smp_85xx_kick_cpu,
 	.cpu_bootable = smp_generic_cpu_bootable,
@@ -473,6 +490,10 @@ void __init mpc85xx_smp_init(void)
 	}
 
 #ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_FSL_CORENET_RCPM
+	fsl_rcpm_init();
+#endif
+
 #ifdef CONFIG_FSL_PMC
 	mpc85xx_setup_pmc();
 #endif
@@ -480,6 +501,7 @@ void __init mpc85xx_smp_init(void)
 		smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
 		smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
 		ppc_md.cpu_die = smp_85xx_mach_cpu_die;
+		smp_85xx_ops.cpu_die = qoriq_cpu_kill;
 	}
 #endif
 	smp_ops = &smp_85xx_ops;
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v3 6/6] powerpc/mpc85xx: Add CPU hotplug support for E6500
  2015-11-20  9:13 [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
                   ` (3 preceding siblings ...)
  2015-11-20  9:14 ` [PATCH v3 5/6] powerpc/mpc85xx: Add hotplug support on E5500 and E500MC cores Chenhui Zhao
@ 2015-11-20  9:14 ` Chenhui Zhao
  2015-12-02 11:04 ` [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
  2015-12-02 12:12 ` Denis Kirjanov
  6 siblings, 0 replies; 14+ messages in thread
From: Chenhui Zhao @ 2015-11-20  9:14 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: scottwood

Support Freescale E6500 core-based platforms, like t4240.
Support disabling/enabling individual CPU thread dynamically.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
---

changes for v3
* rebase patches on the latest code
* add const for info in wake_hw_thread()
* use r8 instead of r13 in the assemble code
* added some comments in code
* checked SPRN_BUCSR to see whether the current thread has been initialized
* changed the flow in smp_85xx_kick_cpu()
* changed that the code to start thread in generic_secondary_smp_init does not depend
  on which thread it is running

major changes for v2:
* start Thread1 by Thread0 when we want to boot Thread1 only replacing
  the method of changing cpu physical id
 arch/powerpc/include/asm/cputhreads.h |  6 +++
 arch/powerpc/include/asm/smp.h        |  1 +
 arch/powerpc/kernel/head_64.S         | 78 +++++++++++++++++++++++++++++++++++
 arch/powerpc/platforms/85xx/smp.c     | 70 +++++++++++++++++--------------
 4 files changed, 124 insertions(+), 31 deletions(-)

diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index e5a769d..9ec41e5 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_POWERPC_CPUTHREADS_H
 #define _ASM_POWERPC_CPUTHREADS_H
 
+#ifndef __ASSEMBLY__
 #include <linux/cpumask.h>
 
 /*
@@ -102,7 +103,12 @@ static inline u32 get_tensr(void)
 		return 1;
 }
 
+void book3e_start_thread(int thread, unsigned long addr);
 void book3e_stop_thread(int thread);
 
+#endif /* __ASSEMBLY__ */
+
+#define INVALID_THREAD_HWID	0x0fff
+
 #endif /* _ASM_POWERPC_CPUTHREADS_H */
 
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index bdb8111..174271e 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -200,6 +200,7 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+extern unsigned int booting_thread_hwid;
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 6036253..2916283 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -40,6 +40,7 @@
 #include <asm/kvm_book3s_asm.h>
 #include <asm/ptrace.h>
 #include <asm/hw_irq.h>
+#include <asm/cputhreads.h>
 
 /* The physical memory is laid out such that the secondary processor
  * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -182,6 +183,45 @@ exception_marker:
 
 #ifdef CONFIG_PPC_BOOK3E
 /*
+ * The booting_thread_hwid holds the thread id we want to boot in cpu
+ * hotplug case. It is set by cpu hotplug code, and is invalid by default.
+ * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
+ * bit field.
+ */
+	.globl	booting_thread_hwid
+booting_thread_hwid:
+	.long  INVALID_THREAD_HWID
+	.align 3
+/*
+ * start a thread in the same core
+ * input parameters:
+ * r3 = the thread physical id
+ * r4 = the entry point where thread starts
+ */
+_GLOBAL(book3e_start_thread)
+	LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
+	cmpi	0, r3, 0
+	beq	10f
+	cmpi	0, r3, 1
+	beq	11f
+	/* If the thread id is invalid, just exit. */
+	b	13f
+10:
+	mttmr	TMRN_IMSR0, r5
+	mttmr	TMRN_INIA0, r4
+	b	12f
+11:
+	mttmr	TMRN_IMSR1, r5
+	mttmr	TMRN_INIA1, r4
+12:
+	isync
+	li	r6, 1
+	sld	r6, r6, r3
+	mtspr	SPRN_TENS, r6
+13:
+	blr
+
+/*
  * stop a thread in the same core
  * input parameter:
  * r3 = the thread physical id
@@ -280,6 +320,44 @@ _GLOBAL(generic_secondary_smp_init)
 	mr	r3,r24
 	mr	r4,r25
 	bl	book3e_secondary_core_init
+
+/*
+ * After common core init has finished, check if the current thread is the
+ * one we wanted to boot. If not, start the specified thread and stop the
+ * current thread.
+ */
+	LOAD_REG_ADDR(r4, booting_thread_hwid)
+	lwz     r3, 0(r4)
+	li	r5, INVALID_THREAD_HWID
+	cmpw	r3, r5
+	beq	20f
+
+	/*
+	 * The value of booting_thread_hwid has been stored in r3,
+	 * so make it invalid.
+	 */
+	stw	r5, 0(r4)
+
+	/*
+	 * Get the current thread id and check if it is the one we wanted.
+	 * If not, start the one specified in booting_thread_hwid and stop
+	 * the current thread.
+	 */
+	mfspr	r8, SPRN_TIR
+	cmpw	r3, r8
+	beq	20f
+
+	/* start the specified thread */
+	LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
+	ld	r4, 0(r5)
+	bl	book3e_start_thread
+
+	/* stop the current thread */
+	mr	r3, r8
+	bl	book3e_stop_thread
+10:
+	b	10b
+20:
 #endif
 
 generic_secondary_common_init:
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 0f56dd5..cc22737 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -182,24 +182,11 @@ static inline u32 read_spin_table_addr_l(void *spin_table)
 static void wake_hw_thread(void *info)
 {
 	void fsl_secondary_thread_init(void);
-	unsigned long imsr, inia;
-	int nr = *(const int *)info;
+	unsigned long inia;
+	int cpu = *(const int *)info;
 
-	imsr = MSR_KERNEL;
 	inia = *(unsigned long *)fsl_secondary_thread_init;
-
-	if (cpu_thread_in_core(nr) == 0) {
-		/* For when we boot on a secondary thread with kdump */
-		mttmr(TMRN_IMSR0, imsr);
-		mttmr(TMRN_INIA0, inia);
-		mtspr(SPRN_TENS, TEN_THREAD(0));
-	} else {
-		mttmr(TMRN_IMSR1, imsr);
-		mttmr(TMRN_INIA1, inia);
-		mtspr(SPRN_TENS, TEN_THREAD(1));
-	}
-
-	smp_generic_kick_cpu(nr);
+	book3e_start_thread(cpu_thread_in_core(cpu), inia);
 }
 #endif
 
@@ -294,33 +281,54 @@ static int smp_85xx_kick_cpu(int nr)
 	pr_debug("kick CPU #%d\n", nr);
 
 #ifdef CONFIG_PPC64
-	/* Threads don't use the spin table */
-	if (cpu_thread_in_core(nr) != 0) {
-		int primary = cpu_first_thread_sibling(nr);
-
+	if (threads_per_core == 2) {
 		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
 			return -ENOENT;
 
-		if (cpu_thread_in_core(nr) != 1) {
-			pr_err("%s: cpu %d: invalid hw thread %d\n",
-			       __func__, nr, cpu_thread_in_core(nr));
-			return -ENOENT;
-		}
+		booting_thread_hwid = cpu_thread_in_core(nr);
+		primary = cpu_first_thread_sibling(nr);
 
-		if (!cpu_online(primary)) {
-			pr_err("%s: cpu %d: primary %d not online\n",
-			       __func__, nr, primary);
-			return -ENOENT;
+		if (qoriq_pm_ops)
+			qoriq_pm_ops->cpu_up_prepare(nr);
+
+		/*
+		 * If either thread in the core is online, use it to start
+		 * the other.
+		 */
+		if (cpu_online(primary)) {
+			smp_call_function_single(primary,
+					wake_hw_thread, &nr, 1);
+			goto done;
+		} else if (cpu_online(primary + 1)) {
+			smp_call_function_single(primary + 1,
+					wake_hw_thread, &nr, 1);
+			goto done;
 		}
 
-		smp_call_function_single(primary, wake_hw_thread, &nr, 0);
-		return 0;
+		/*
+		 * If getting here, it means both threads in the core are
+		 * offline. So start the primary thread, then it will start
+		 * the thread specified in booting_thread_hwid, the one
+		 * corresponding to nr.
+		 */
+
+	} else if (threads_per_core == 1) {
+		/*
+		 * If one core has only one thread, set booting_thread_hwid to
+		 * an invalid value.
+		 */
+		booting_thread_hwid = INVALID_THREAD_HWID;
+
+	} else if (threads_per_core > 2) {
+		pr_err("Do not support more than 2 threads per CPU.");
+		return -EINVAL;
 	}
 
 	ret = smp_85xx_start_cpu(primary);
 	if (ret)
 		return ret;
 
+done:
 	paca[nr].cpu_start = 1;
 	generic_set_cpu_up(nr);
 
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
  2015-11-20  9:13 [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
                   ` (4 preceding siblings ...)
  2015-11-20  9:14 ` [PATCH v3 6/6] powerpc/mpc85xx: Add CPU hotplug support for E6500 Chenhui Zhao
@ 2015-12-02 11:04 ` Chenhui Zhao
  2015-12-02 12:12 ` Denis Kirjanov
  6 siblings, 0 replies; 14+ messages in thread
From: Chenhui Zhao @ 2015-12-02 11:04 UTC (permalink / raw)
  To: linuxppc-dev, scottwood

Hi Scott,

Any comment on these pathes?

Thanks,
Chenhui


On Fri, Nov 20, 2015 at 5:13 PM, Chenhui Zhao 
<chenhui.zhao@freescale.com> wrote:
> On e6500, in the case of cpu hotplug, either thread in one core
> may be the first thread initilzing the TLB1. The subsequent threads
> must not setup it again.
> 
> The code is derived from the comment of Scott Wood.
> 
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
>  arch/powerpc/include/asm/cputhreads.h | 7 +++++++
>  arch/powerpc/mm/tlb_nohash.c          | 4 +---
>  2 files changed, 8 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/cputhreads.h 
> b/arch/powerpc/include/asm/cputhreads.h
> index ba42e46..b56cece 100644
> --- a/arch/powerpc/include/asm/cputhreads.h
> +++ b/arch/powerpc/include/asm/cputhreads.h
> @@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int cpu)
>  	return cpu | (threads_per_core - 1);
>  }
> 
> +static inline u32 get_tensr(void)
> +{
> +	if (cpu_has_feature(CPU_FTR_SMT))
> +		return mfspr(SPRN_TENSR);
> +	else
> +		return 1;
> +}
> 
> 
>  #endif /* _ASM_POWERPC_CPUTHREADS_H */
> diff --git a/arch/powerpc/mm/tlb_nohash.c 
> b/arch/powerpc/mm/tlb_nohash.c
> index bb04e4d..f466848 100644
> --- a/arch/powerpc/mm/tlb_nohash.c
> +++ b/arch/powerpc/mm/tlb_nohash.c
> @@ -640,9 +640,7 @@ static void early_init_this_mmu(void)
>  		 * transient mapping would cause problems.
>  		 */
>  #ifdef CONFIG_SMP
> -		if (cpu != boot_cpuid &&
> -		    (cpu != cpu_first_thread_sibling(cpu) ||
> -		     cpu == cpu_first_thread_sibling(boot_cpuid)))
> +		if (hweight32(get_tensr()) > 1)
>  			map = false;
>  #endif
> 
> --
> 1.9.1
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
  2015-11-20  9:13 [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
                   ` (5 preceding siblings ...)
  2015-12-02 11:04 ` [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
@ 2015-12-02 12:12 ` Denis Kirjanov
  2015-12-03 11:27   ` Chenhui Zhao
  2015-12-03 19:26   ` Scott Wood
  6 siblings, 2 replies; 14+ messages in thread
From: Denis Kirjanov @ 2015-12-02 12:12 UTC (permalink / raw)
  To: Chenhui Zhao; +Cc: linuxppc-dev, scottwood

On 11/20/15, Chenhui Zhao <chenhui.zhao@freescale.com> wrote:
> On e6500, in the case of cpu hotplug, either thread in one core
> may be the first thread initilzing the TLB1. The subsequent threads
> must not setup it again.
>
> The code is derived from the comment of Scott Wood.
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
>  arch/powerpc/include/asm/cputhreads.h | 7 +++++++
>  arch/powerpc/mm/tlb_nohash.c          | 4 +---
>  2 files changed, 8 insertions(+), 3 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/cputhreads.h
> b/arch/powerpc/include/asm/cputhreads.h
> index ba42e46..b56cece 100644
> --- a/arch/powerpc/include/asm/cputhreads.h
> +++ b/arch/powerpc/include/asm/cputhreads.h
> @@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int cpu)
>  	return cpu | (threads_per_core - 1);
>  }
>
> +static inline u32 get_tensr(void)
> +{
> +	if (cpu_has_feature(CPU_FTR_SMT))
> +		return mfspr(SPRN_TENSR);
> +	else
> +		return 1;
> +}
If i get it right, SPRN_TENSR used in the code only if CONFIG_PPC64
is defined. Then we can make it noop on ppc32.

Thanks!

>
>  #endif /* _ASM_POWERPC_CPUTHREADS_H */
> diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
> index bb04e4d..f466848 100644
> --- a/arch/powerpc/mm/tlb_nohash.c
> +++ b/arch/powerpc/mm/tlb_nohash.c
> @@ -640,9 +640,7 @@ static void early_init_this_mmu(void)
>  		 * transient mapping would cause problems.
>  		 */
>  #ifdef CONFIG_SMP
> -		if (cpu != boot_cpuid &&
> -		    (cpu != cpu_first_thread_sibling(cpu) ||
> -		     cpu == cpu_first_thread_sibling(boot_cpuid)))
> +		if (hweight32(get_tensr()) > 1)
>  			map = false;
>  #endif
>
> --
> 1.9.1
>
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
  2015-12-02 12:12 ` Denis Kirjanov
@ 2015-12-03 11:27   ` Chenhui Zhao
  2015-12-23 21:27     ` Scott Wood
  2015-12-03 19:26   ` Scott Wood
  1 sibling, 1 reply; 14+ messages in thread
From: Chenhui Zhao @ 2015-12-03 11:27 UTC (permalink / raw)
  To: Denis Kirjanov; +Cc: linuxppc-dev, scottwood



On Wed, Dec 2, 2015 at 8:12 PM, Denis Kirjanov <kda@linux-powerpc.org> 
wrote:
> On 11/20/15, Chenhui Zhao <chenhui.zhao@freescale.com> wrote:
>>  On e6500, in the case of cpu hotplug, either thread in one core
>>  may be the first thread initilzing the TLB1. The subsequent threads
>>  must not setup it again.
>> 
>>  The code is derived from the comment of Scott Wood.
>> 
>>  Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
>>  ---
>>   arch/powerpc/include/asm/cputhreads.h | 7 +++++++
>>   arch/powerpc/mm/tlb_nohash.c          | 4 +---
>>   2 files changed, 8 insertions(+), 3 deletions(-)
>> 
>>  diff --git a/arch/powerpc/include/asm/cputhreads.h
>>  b/arch/powerpc/include/asm/cputhreads.h
>>  index ba42e46..b56cece 100644
>>  --- a/arch/powerpc/include/asm/cputhreads.h
>>  +++ b/arch/powerpc/include/asm/cputhreads.h
>>  @@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int 
>> cpu)
>>   	return cpu | (threads_per_core - 1);
>>   }
>> 
>>  +static inline u32 get_tensr(void)
>>  +{
>>  +	if (cpu_has_feature(CPU_FTR_SMT))
>>  +		return mfspr(SPRN_TENSR);
>>  +	else
>>  +		return 1;
>>  +}
> If i get it right, SPRN_TENSR used in the code only if CONFIG_PPC64
> is defined. Then we can make it noop on ppc32.
> 
> Thanks!

Yeah, SPRN_TENSR is defined when CONFIG_BOOKE or CONFIG_40x is enabled. 
I'd like to change the code like:

static inline u32 get_tensr(void)
{
#ifdef CONFIG_BOOKE
        if (cpu_has_feature(CPU_FTR_SMT))
                return mfspr(SPRN_TENSR);
#endif
        return 1;
}

Thanks,
Chenhui

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
  2015-12-02 12:12 ` Denis Kirjanov
  2015-12-03 11:27   ` Chenhui Zhao
@ 2015-12-03 19:26   ` Scott Wood
  2015-12-04  8:04     ` Denis Kirjanov
  1 sibling, 1 reply; 14+ messages in thread
From: Scott Wood @ 2015-12-03 19:26 UTC (permalink / raw)
  To: Denis Kirjanov, Chenhui Zhao; +Cc: linuxppc-dev

On Wed, 2015-12-02 at 15:12 +0300, Denis Kirjanov wrote:
> On 11/20/15, Chenhui Zhao <chenhui.zhao@freescale.com> wrote:
> > On e6500, in the case of cpu hotplug, either thread in one core
> > may be the first thread initilzing the TLB1. The subsequent threads
> > must not setup it again.
> > 
> > The code is derived from the comment of Scott Wood.
> > 
> > Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> > ---
> >  arch/powerpc/include/asm/cputhreads.h | 7 +++++++
> >  arch/powerpc/mm/tlb_nohash.c          | 4 +---
> >  2 files changed, 8 insertions(+), 3 deletions(-)
> > 
> > diff --git a/arch/powerpc/include/asm/cputhreads.h
> > b/arch/powerpc/include/asm/cputhreads.h
> > index ba42e46..b56cece 100644
> > --- a/arch/powerpc/include/asm/cputhreads.h
> > +++ b/arch/powerpc/include/asm/cputhreads.h
> > @@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int cpu)
> >  	return cpu | (threads_per_core - 1);
> >  }
> > 
> > +static inline u32 get_tensr(void)
> > +{
> > +	if (cpu_has_feature(CPU_FTR_SMT))
> > +		return mfspr(SPRN_TENSR);
> > +	else
> > +		return 1;
> > +}
> If i get it right, SPRN_TENSR used in the code only if CONFIG_PPC64
> is defined. Then we can make it noop on ppc32.

Please don't.  It accomplishes nothing other than adding an obstacle to
supporting this on ppc32.

-Scott

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
  2015-12-03 19:26   ` Scott Wood
@ 2015-12-04  8:04     ` Denis Kirjanov
  2015-12-04 19:48       ` Scott Wood
  0 siblings, 1 reply; 14+ messages in thread
From: Denis Kirjanov @ 2015-12-04  8:04 UTC (permalink / raw)
  To: Scott Wood; +Cc: Chenhui Zhao, linuxppc-dev

On 12/3/15, Scott Wood <scottwood@freescale.com> wrote:
> On Wed, 2015-12-02 at 15:12 +0300, Denis Kirjanov wrote:
>> On 11/20/15, Chenhui Zhao <chenhui.zhao@freescale.com> wrote:
>> > On e6500, in the case of cpu hotplug, either thread in one core
>> > may be the first thread initilzing the TLB1. The subsequent threads
>> > must not setup it again.
>> >
>> > The code is derived from the comment of Scott Wood.
>> >
>> > Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
>> > ---
>> >  arch/powerpc/include/asm/cputhreads.h | 7 +++++++
>> >  arch/powerpc/mm/tlb_nohash.c          | 4 +---
>> >  2 files changed, 8 insertions(+), 3 deletions(-)
>> >
>> > diff --git a/arch/powerpc/include/asm/cputhreads.h
>> > b/arch/powerpc/include/asm/cputhreads.h
>> > index ba42e46..b56cece 100644
>> > --- a/arch/powerpc/include/asm/cputhreads.h
>> > +++ b/arch/powerpc/include/asm/cputhreads.h
>> > @@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int cpu)
>> >  	return cpu | (threads_per_core - 1);
>> >  }
>> >
>> > +static inline u32 get_tensr(void)
>> > +{
>> > +	if (cpu_has_feature(CPU_FTR_SMT))
>> > +		return mfspr(SPRN_TENSR);
>> > +	else
>> > +		return 1;
>> > +}
>> If i get it right, SPRN_TENSR used in the code only if CONFIG_PPC64
>> is defined. Then we can make it noop on ppc32.
>
> Please don't.  It accomplishes nothing other than adding an obstacle to
> supporting this on ppc32.

The idea is make it noop since the function defined in header file and
some core parts include it like:

arch/powerpc/kernel/smp.c
arch/powerpc/kernel/setup-common.c


>
> -Scott
>
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
  2015-12-04  8:04     ` Denis Kirjanov
@ 2015-12-04 19:48       ` Scott Wood
  0 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2015-12-04 19:48 UTC (permalink / raw)
  To: Denis Kirjanov; +Cc: Chenhui Zhao, linuxppc-dev

On Fri, 2015-12-04 at 11:04 +0300, Denis Kirjanov wrote:
> On 12/3/15, Scott Wood <scottwood@freescale.com> wrote:
> > On Wed, 2015-12-02 at 15:12 +0300, Denis Kirjanov wrote:
> > > On 11/20/15, Chenhui Zhao <chenhui.zhao@freescale.com> wrote:
> > > > On e6500, in the case of cpu hotplug, either thread in one core
> > > > may be the first thread initilzing the TLB1. The subsequent threads
> > > > must not setup it again.
> > > > 
> > > > The code is derived from the comment of Scott Wood.
> > > > 
> > > > Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> > > > ---
> > > >  arch/powerpc/include/asm/cputhreads.h | 7 +++++++
> > > >  arch/powerpc/mm/tlb_nohash.c          | 4 +---
> > > >  2 files changed, 8 insertions(+), 3 deletions(-)
> > > > 
> > > > diff --git a/arch/powerpc/include/asm/cputhreads.h
> > > > b/arch/powerpc/include/asm/cputhreads.h
> > > > index ba42e46..b56cece 100644
> > > > --- a/arch/powerpc/include/asm/cputhreads.h
> > > > +++ b/arch/powerpc/include/asm/cputhreads.h
> > > > @@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int cpu)
> > > >  	return cpu | (threads_per_core - 1);
> > > >  }
> > > > 
> > > > +static inline u32 get_tensr(void)
> > > > +{
> > > > +	if (cpu_has_feature(CPU_FTR_SMT))
> > > > +		return mfspr(SPRN_TENSR);
> > > > +	else
> > > > +		return 1;
> > > > +}
> > > If i get it right, SPRN_TENSR used in the code only if CONFIG_PPC64
> > > is defined. Then we can make it noop on ppc32.
> > 
> > Please don't.  It accomplishes nothing other than adding an obstacle to
> > supporting this on ppc32.
> 
> The idea is make it noop since the function defined in header file and
> some core parts include it like:
> 
> arch/powerpc/kernel/smp.c
> arch/powerpc/kernel/setup-common.c

What does that have to do with making it a no-op on 32-bit?  I understand
ifdeffing on CONFIG_BOOKE due to the build issue, but not CONFIG_PPC64.

-Scott

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
  2015-12-03 11:27   ` Chenhui Zhao
@ 2015-12-23 21:27     ` Scott Wood
  2015-12-24  0:47       ` Zhao C.H.
  0 siblings, 1 reply; 14+ messages in thread
From: Scott Wood @ 2015-12-23 21:27 UTC (permalink / raw)
  To: Chenhui Zhao, Denis Kirjanov; +Cc: linuxppc-dev

On Thu, 2015-12-03 at 19:27 +0800, Chenhui Zhao wrote:
> 
> On Wed, Dec 2, 2015 at 8:12 PM, Denis Kirjanov <kda@linux-powerpc.org> 
> wrote:
> > On 11/20/15, Chenhui Zhao <chenhui.zhao@freescale.com> wrote:
> > >  On e6500, in the case of cpu hotplug, either thread in one core
> > >  may be the first thread initilzing the TLB1. The subsequent threads
> > >  must not setup it again.
> > > 
> > >  The code is derived from the comment of Scott Wood.
> > > 
> > >  Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> > >  ---
> > >   arch/powerpc/include/asm/cputhreads.h | 7 +++++++
> > >   arch/powerpc/mm/tlb_nohash.c          | 4 +---
> > >   2 files changed, 8 insertions(+), 3 deletions(-)
> > > 
> > >  diff --git a/arch/powerpc/include/asm/cputhreads.h
> > >  b/arch/powerpc/include/asm/cputhreads.h
> > >  index ba42e46..b56cece 100644
> > >  --- a/arch/powerpc/include/asm/cputhreads.h
> > >  +++ b/arch/powerpc/include/asm/cputhreads.h
> > >  @@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int 
> > > cpu)
> > >   	return cpu | (threads_per_core - 1);
> > >   }
> > > 
> > >  +static inline u32 get_tensr(void)
> > >  +{
> > >  +	if (cpu_has_feature(CPU_FTR_SMT))
> > >  +		return mfspr(SPRN_TENSR);
> > >  +	else
> > >  +		return 1;
> > >  +}
> > If i get it right, SPRN_TENSR used in the code only if CONFIG_PPC64
> > is defined. Then we can make it noop on ppc32.
> > 
> > Thanks!
> 
> Yeah, SPRN_TENSR is defined when CONFIG_BOOKE or CONFIG_40x is enabled. 
> I'd like to change the code like:
> 
> static inline u32 get_tensr(void)
> {
> #ifdef CONFIG_BOOKE
>         if (cpu_has_feature(CPU_FTR_SMT))
>                 return mfspr(SPRN_TENSR);
> #endif
>         return 1;
> }

Are you going to respin?

-Scott

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1
  2015-12-23 21:27     ` Scott Wood
@ 2015-12-24  0:47       ` Zhao C.H.
  0 siblings, 0 replies; 14+ messages in thread
From: Zhao C.H. @ 2015-12-24  0:47 UTC (permalink / raw)
  To: Scott Wood, Denis Kirjanov; +Cc: linuxppc-dev

Hi Scott,

I updated the patch a moment ago at http://patchwork.ozlabs.org/patch/56077=
1/ .

Thanks,
Chenhui
________________________________________
From: Wood Scott-B07421
Sent: Thursday, December 24, 2015 5:27
To: Zhao Chenhui-B35336; Denis Kirjanov
Cc: linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH v3 1/6] powerpc/mm: any thread in one core can be the f=
irst to setup TLB1

On Thu, 2015-12-03 at 19:27 +0800, Chenhui Zhao wrote:
>
> On Wed, Dec 2, 2015 at 8:12 PM, Denis Kirjanov <kda@linux-powerpc.org>
> wrote:
> > On 11/20/15, Chenhui Zhao <chenhui.zhao@freescale.com> wrote:
> > >  On e6500, in the case of cpu hotplug, either thread in one core
> > >  may be the first thread initilzing the TLB1. The subsequent threads
> > >  must not setup it again.
> > >
> > >  The code is derived from the comment of Scott Wood.
> > >
> > >  Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> > >  ---
> > >   arch/powerpc/include/asm/cputhreads.h | 7 +++++++
> > >   arch/powerpc/mm/tlb_nohash.c          | 4 +---
> > >   2 files changed, 8 insertions(+), 3 deletions(-)
> > >
> > >  diff --git a/arch/powerpc/include/asm/cputhreads.h
> > >  b/arch/powerpc/include/asm/cputhreads.h
> > >  index ba42e46..b56cece 100644
> > >  --- a/arch/powerpc/include/asm/cputhreads.h
> > >  +++ b/arch/powerpc/include/asm/cputhreads.h
> > >  @@ -94,6 +94,13 @@ static inline int cpu_last_thread_sibling(int
> > > cpu)
> > >           return cpu | (threads_per_core - 1);
> > >   }
> > >
> > >  +static inline u32 get_tensr(void)
> > >  +{
> > >  +        if (cpu_has_feature(CPU_FTR_SMT))
> > >  +                return mfspr(SPRN_TENSR);
> > >  +        else
> > >  +                return 1;
> > >  +}
> > If i get it right, SPRN_TENSR used in the code only if CONFIG_PPC64
> > is defined. Then we can make it noop on ppc32.
> >
> > Thanks!
>
> Yeah, SPRN_TENSR is defined when CONFIG_BOOKE or CONFIG_40x is enabled.
> I'd like to change the code like:
>
> static inline u32 get_tensr(void)
> {
> #ifdef CONFIG_BOOKE
>         if (cpu_has_feature(CPU_FTR_SMT))
>                 return mfspr(SPRN_TENSR);
> #endif
>         return 1;
> }

Are you going to respin?

-Scott

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2015-12-24  0:47 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-11-20  9:13 [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
2015-11-20  9:13 ` [PATCH v3 2/6] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
2015-11-20  9:13 ` [PATCH v3 3/6] powerpc/rcpm: add RCPM driver Chenhui Zhao
2015-11-20  9:14 ` [PATCH v3 4/6] powerpc/mpc85xx: refactor the PM operations Chenhui Zhao
2015-11-20  9:14 ` [PATCH v3 5/6] powerpc/mpc85xx: Add hotplug support on E5500 and E500MC cores Chenhui Zhao
2015-11-20  9:14 ` [PATCH v3 6/6] powerpc/mpc85xx: Add CPU hotplug support for E6500 Chenhui Zhao
2015-12-02 11:04 ` [PATCH v3 1/6] powerpc/mm: any thread in one core can be the first to setup TLB1 Chenhui Zhao
2015-12-02 12:12 ` Denis Kirjanov
2015-12-03 11:27   ` Chenhui Zhao
2015-12-23 21:27     ` Scott Wood
2015-12-24  0:47       ` Zhao C.H.
2015-12-03 19:26   ` Scott Wood
2015-12-04  8:04     ` Denis Kirjanov
2015-12-04 19:48       ` Scott Wood

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.