All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv2 0/5] arm64: initial CPU hotplug support
@ 2013-07-22 15:37 Mark Rutland
  2013-07-22 15:37 ` [PATCHv2 1/5] arm64: reorganise smp_enable_ops Mark Rutland
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Mark Rutland @ 2013-07-22 15:37 UTC (permalink / raw)
  To: linux-arm-kernel

The following patches add basic HOTPLUG_CPU support to arm64, which
combined with appropriate firmware (e.g. [1]) can be used to power CPUs
up and down dynamically.

Currently this clashes with Xen support on arm64, but there's already a patch
to fix that up [2], so hopefully that will get solved during the merge process.

I've tested this series with the bootwrapper PSCI implementation I've
placed on linux-arm.org [1] and a modified foundation model dts with a
psci node and each CPU's enable-method set to "psci", using a shell
while repeatedly cycling all cpus off and on:

for C in $(seq 0 3); do
	./cyclichotplug.sh $C >/dev/null 2>&1 &
done

---->8----
#!/bin/sh
# cyclichotplug.sh

CPU=$1;

if [ -z "$CPU" ]; then
	printf "Usage: $0 <cpu id>\n";
	exit 1;
fi

ONLINEFILE=/sys/devices/system/cpu/cpu$CPU/online;

while true; do
	echo 0 > $ONLINEFILE;
	echo 1 > $ONLINEFILE;
done
---->8----

Since v1 [3]:
* Rebased to v3.11-rc2 to solve cpuinit removal conflicts.
* Removed failure path for cpu_die, it causes more problems than it solves.
* Removed cpu_kill, we don't currently need it.
* Test for cpu_die in op_cpu_disable to fail early and survive when there's no
  mechanism for hot unplug.
* Change pr_err on failed cpu_die to a pr_crit.
* Removed dependency on HOTPLUG, which has been obliterated.

Thanks,
Mark.

[1] http://linux-arm.org/git?p=boot-wrapper-aarch64.git;a=shortlog;h=refs/tags/simple-psci
[2] http://lists.infradead.org/pipermail/linux-arm-kernel/2013-July/185237.html
[3] http://lists.infradead.org/pipermail/linux-arm-kernel/2013-July/182880.html

Mark Rutland (5):
  arm64: reorganise smp_enable_ops
  arm64: factor out spin-table boot method
  arm64: read enable-method for CPU0
  arm64: add CPU_HOTPLUG infrastructure
  arm64: add PSCI CPU_OFF-based hotplug support

 arch/arm64/Kconfig                 |   7 ++
 arch/arm64/include/asm/irq.h       |   1 +
 arch/arm64/include/asm/smp.h       |  36 +++++--
 arch/arm64/kernel/cputable.c       |   2 +-
 arch/arm64/kernel/head.S           |  12 ++-
 arch/arm64/kernel/irq.c            |  61 +++++++++++
 arch/arm64/kernel/process.c        |   7 ++
 arch/arm64/kernel/smp.c            | 201 ++++++++++++++++++++++---------------
 arch/arm64/kernel/smp_psci.c       |  54 ++++++++--
 arch/arm64/kernel/smp_spin_table.c |  85 +++++++++++++++-
 arch/arm64/kernel/vmlinux.lds.S    |   1 -
 11 files changed, 360 insertions(+), 107 deletions(-)

-- 
1.8.1.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCHv2 1/5] arm64: reorganise smp_enable_ops
  2013-07-22 15:37 [PATCHv2 0/5] arm64: initial CPU hotplug support Mark Rutland
@ 2013-07-22 15:37 ` Mark Rutland
  2013-07-22 15:37 ` [PATCHv2 2/5] arm64: factor out spin-table boot method Mark Rutland
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Mark Rutland @ 2013-07-22 15:37 UTC (permalink / raw)
  To: linux-arm-kernel

For hotplug support, we're going to want a place to store operations
that do more than bring CPUs online, and it makes sense to group these
with our current smp_enable_ops.

This patch renames smp_enable_ops to smp_ops to make the intended use of
the structure clearer. While we're at it, fix up instances of the cpu
parameter to be an unsigned int, drop the init markings and rename the
*_cpu functions to cpu_* to reduce future churn when smp_operations is
extended.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
---
 arch/arm64/include/asm/smp.h       | 10 +++++-----
 arch/arm64/kernel/smp.c            | 24 ++++++++++++------------
 arch/arm64/kernel/smp_psci.c       | 10 +++++-----
 arch/arm64/kernel/smp_spin_table.c | 10 +++++-----
 4 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 4b8023c..90626b6 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -68,13 +68,13 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 struct device_node;
 
-struct smp_enable_ops {
+struct smp_operations {
 	const char	*name;
-	int		(*init_cpu)(struct device_node *, int);
-	int		(*prepare_cpu)(int);
+	int		(*cpu_init)(struct device_node *, unsigned int);
+	int		(*cpu_prepare)(unsigned int);
 };
 
-extern const struct smp_enable_ops smp_spin_table_ops;
-extern const struct smp_enable_ops smp_psci_ops;
+extern const struct smp_operations smp_spin_table_ops;
+extern const struct smp_operations smp_psci_ops;
 
 #endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index fee5cce..533f405 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -236,17 +236,17 @@ void __init smp_prepare_boot_cpu(void)
 
 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 
-static const struct smp_enable_ops *enable_ops[] __initconst = {
+static const struct smp_operations *supported_smp_ops[] __initconst = {
 	&smp_spin_table_ops,
 	&smp_psci_ops,
 	NULL,
 };
 
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+static const struct smp_operations *smp_ops[NR_CPUS];
 
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
+static const struct smp_operations * __init smp_get_ops(const char *name)
 {
-	const struct smp_enable_ops **ops = enable_ops;
+	const struct smp_operations **ops = supported_smp_ops;
 
 	while (*ops) {
 		if (!strcmp(name, (*ops)->name))
@@ -267,7 +267,7 @@ void __init smp_init_cpus(void)
 {
 	const char *enable_method;
 	struct device_node *dn = NULL;
-	int i, cpu = 1;
+	unsigned int i, cpu = 1;
 	bool bootcpu_valid = false;
 
 	while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -346,15 +346,15 @@ void __init smp_init_cpus(void)
 			goto next;
 		}
 
-		smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
+		smp_ops[cpu] = smp_get_ops(enable_method);
 
-		if (!smp_enable_ops[cpu]) {
+		if (!smp_ops[cpu]) {
 			pr_err("%s: invalid enable-method property: %s\n",
 			       dn->full_name, enable_method);
 			goto next;
 		}
 
-		if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+		if (smp_ops[cpu]->cpu_init(dn, cpu))
 			goto next;
 
 		pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -384,8 +384,8 @@ next:
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-	int cpu, err;
-	unsigned int ncores = num_possible_cpus();
+	int err;
+	unsigned int cpu, ncores = num_possible_cpus();
 
 	/*
 	 * are we trying to boot more cores than exist?
@@ -412,10 +412,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 		if (cpu == smp_processor_id())
 			continue;
 
-		if (!smp_enable_ops[cpu])
+		if (!smp_ops[cpu])
 			continue;
 
-		err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+		err = smp_ops[cpu]->cpu_prepare(cpu);
 		if (err)
 			continue;
 
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
index 0c53330..2f0d3dd 100644
--- a/arch/arm64/kernel/smp_psci.c
+++ b/arch/arm64/kernel/smp_psci.c
@@ -23,12 +23,12 @@
 #include <asm/psci.h>
 #include <asm/smp_plat.h>
 
-static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
+static int smp_psci_cpu_init(struct device_node *dn, unsigned int cpu)
 {
 	return 0;
 }
 
-static int __init smp_psci_prepare_cpu(int cpu)
+static int smp_psci_cpu_prepare(unsigned int cpu)
 {
 	int err;
 
@@ -46,8 +46,8 @@ static int __init smp_psci_prepare_cpu(int cpu)
 	return 0;
 }
 
-const struct smp_enable_ops smp_psci_ops __initconst = {
+const struct smp_operations smp_psci_ops = {
 	.name		= "psci",
-	.init_cpu	= smp_psci_init_cpu,
-	.prepare_cpu	= smp_psci_prepare_cpu,
+	.cpu_init	= smp_psci_cpu_init,
+	.cpu_prepare	= smp_psci_cpu_prepare,
 };
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7c35fa6..5fecffc 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -24,7 +24,7 @@
 
 static phys_addr_t cpu_release_addr[NR_CPUS];
 
-static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
+static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
 {
 	/*
 	 * Determine the address from which the CPU is polling.
@@ -40,7 +40,7 @@ static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
 	return 0;
 }
 
-static int __init smp_spin_table_prepare_cpu(int cpu)
+static int smp_spin_table_cpu_prepare(unsigned int cpu)
 {
 	void **release_addr;
 
@@ -59,8 +59,8 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
 	return 0;
 }
 
-const struct smp_enable_ops smp_spin_table_ops __initconst = {
+const struct smp_operations smp_spin_table_ops = {
 	.name		= "spin-table",
-	.init_cpu 	= smp_spin_table_init_cpu,
-	.prepare_cpu	= smp_spin_table_prepare_cpu,
+	.cpu_init	= smp_spin_table_cpu_init,
+	.cpu_prepare	= smp_spin_table_cpu_prepare,
 };
-- 
1.8.1.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCHv2 2/5] arm64: factor out spin-table boot method
  2013-07-22 15:37 [PATCHv2 0/5] arm64: initial CPU hotplug support Mark Rutland
  2013-07-22 15:37 ` [PATCHv2 1/5] arm64: reorganise smp_enable_ops Mark Rutland
@ 2013-07-22 15:37 ` Mark Rutland
  2013-07-23 15:17   ` Santosh Shilimkar
  2013-07-22 15:37 ` [PATCHv2 3/5] arm64: read enable-method for CPU0 Mark Rutland
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 8+ messages in thread
From: Mark Rutland @ 2013-07-22 15:37 UTC (permalink / raw)
  To: linux-arm-kernel

The arm64 kernel has an internal holding pen, which is necessary for
some systems where we can't bring CPUs online individually and must hold
multiple CPUs in a safe area until the kernel is able to handle them.
The current SMP infrastructure for arm64 is closely coupled to this
holding pen, and alternative boot methods must launch CPUs into the pen,
from whence they are launched into the kernel proper.

With PSCI (and possibly other future boot methods), we can bring CPUs
online individually, and need not perform the secondary_holding_pen
dance. Instead, this patch factors the holding pen management code out
to the spin-table boot method code, as it is the only boot method
requiring the pen.

A new entry point for secondaries, secondary_entry is added for other
boot methods to use, which bypasses the holding pen and its associated
overhead when bringing CPUs online. The smp.pen.text section is also
removed, as the pen can live in head.text without problem.

The smp_operations structure is extended with two new functions,
cpu_boot and cpu_postboot, for bringing a cpu into the kernel and
performing any post-boot cleanup required by a bootmethod (e.g.
resetting the secondary_holding_pen_release to INVALID_HWID).

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
---
 arch/arm64/include/asm/smp.h       | 17 ++++++++-
 arch/arm64/kernel/head.S           | 12 +++++-
 arch/arm64/kernel/smp.c            | 67 +++-------------------------------
 arch/arm64/kernel/smp_psci.c       | 16 ++++----
 arch/arm64/kernel/smp_spin_table.c | 75 ++++++++++++++++++++++++++++++++++++++
 arch/arm64/kernel/vmlinux.lds.S    |  1 -
 6 files changed, 115 insertions(+), 73 deletions(-)

diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 90626b6..af39644 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,8 +60,7 @@ struct secondary_data {
 	void *stack;
 };
 extern struct secondary_data secondary_data;
-extern void secondary_holding_pen(void);
-extern volatile unsigned long secondary_holding_pen_release;
+extern void secondary_entry(void);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -70,8 +69,22 @@ struct device_node;
 
 struct smp_operations {
 	const char	*name;
+	/*
+	 * Check devicetree data for cpu
+	 */
 	int		(*cpu_init)(struct device_node *, unsigned int);
+	/*
+	 * Test if cpu is present and bootable
+	 */
 	int		(*cpu_prepare)(unsigned int);
+	/*
+	 * Boot cpu into the kernel
+	 */
+	int		(*cpu_boot)(unsigned int);
+	/*
+	 * Performs post-boot cleanup
+	 */
+	void		(*cpu_postboot)(void);
 };
 
 extern const struct smp_operations smp_spin_table_ops;
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 53dcae4..3532ca6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -217,7 +217,6 @@ ENTRY(__boot_cpu_mode)
 	.quad	PAGE_OFFSET
 
 #ifdef CONFIG_SMP
-	.pushsection    .smp.pen.text, "ax"
 	.align	3
 1:	.quad	.
 	.quad	secondary_holding_pen_release
@@ -242,7 +241,16 @@ pen:	ldr	x4, [x3]
 	wfe
 	b	pen
 ENDPROC(secondary_holding_pen)
-	.popsection
+
+	/*
+	 * Secondary entry point that jumps straight into the kernel. Only to
+	 * be used where CPUs are brought online dynamically by the kernel.
+	 */
+ENTRY(secondary_entry)
+	bl	__calc_phys_offset		// x2=phys offset
+	bl	el2_setup			// Drop to EL1
+	b	secondary_startup
+ENDPROC(secondary_entry)
 
 ENTRY(secondary_startup)
 	/*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 533f405..72c2823 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -54,7 +54,6 @@
  * where to place its SVC stack
  */
 struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
 
 enum ipi_msg_type {
 	IPI_RESCHEDULE,
@@ -63,22 +62,7 @@ enum ipi_msg_type {
 	IPI_CPU_STOP,
 };
 
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not.  This is necessary for the hotplug code to work
- * reliably.
- */
-static void write_pen_release(u64 val)
-{
-	void *start = (void *)&secondary_holding_pen_release;
-	unsigned long size = sizeof(secondary_holding_pen_release);
-
-	secondary_holding_pen_release = val;
-	__flush_dcache_area(start, size);
-}
+static const struct smp_operations *smp_ops[NR_CPUS];
 
 /*
  * Boot a secondary CPU, and assign it the specified idle task.
@@ -86,38 +70,10 @@ static void write_pen_release(u64 val)
  */
 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-	unsigned long timeout;
-
-	/*
-	 * Set synchronisation state between this boot processor
-	 * and the secondary one
-	 */
-	raw_spin_lock(&boot_lock);
-
-	/*
-	 * Update the pen release flag.
-	 */
-	write_pen_release(cpu_logical_map(cpu));
-
-	/*
-	 * Send an event, causing the secondaries to read pen_release.
-	 */
-	sev();
-
-	timeout = jiffies + (1 * HZ);
-	while (time_before(jiffies, timeout)) {
-		if (secondary_holding_pen_release == INVALID_HWID)
-			break;
-		udelay(10);
-	}
-
-	/*
-	 * Now the secondary core is starting up let it run its
-	 * calibrations, then wait for it to finish
-	 */
-	raw_spin_unlock(&boot_lock);
+	if (smp_ops[cpu]->cpu_boot)
+		return smp_ops[cpu]->cpu_boot(cpu);
 
-	return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+	return -EOPNOTSUPP;
 }
 
 static DECLARE_COMPLETION(cpu_running);
@@ -187,17 +143,8 @@ asmlinkage void secondary_start_kernel(void)
 	preempt_disable();
 	trace_hardirqs_off();
 
-	/*
-	 * Let the primary processor know we're out of the
-	 * pen, then head off into the C entry point
-	 */
-	write_pen_release(INVALID_HWID);
-
-	/*
-	 * Synchronise with the boot thread.
-	 */
-	raw_spin_lock(&boot_lock);
-	raw_spin_unlock(&boot_lock);
+	if (smp_ops[cpu]->cpu_postboot)
+		smp_ops[cpu]->cpu_postboot();
 
 	/*
 	 * OK, now it's safe to let the boot CPU continue.  Wait for
@@ -242,8 +189,6 @@ static const struct smp_operations *supported_smp_ops[] __initconst = {
 	NULL,
 };
 
-static const struct smp_operations *smp_ops[NR_CPUS];
-
 static const struct smp_operations * __init smp_get_ops(const char *name)
 {
 	const struct smp_operations **ops = supported_smp_ops;
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
index 2f0d3dd..20499bc 100644
--- a/arch/arm64/kernel/smp_psci.c
+++ b/arch/arm64/kernel/smp_psci.c
@@ -30,24 +30,26 @@ static int smp_psci_cpu_init(struct device_node *dn, unsigned int cpu)
 
 static int smp_psci_cpu_prepare(unsigned int cpu)
 {
-	int err;
-
 	if (!psci_ops.cpu_on) {
 		pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
 		return -ENODEV;
 	}
 
-	err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
-	if (err) {
+	return 0;
+}
+
+static int smp_psci_cpu_boot(unsigned int cpu)
+{
+	int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+	if (err)
 		pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
-		return err;
-	}
 
-	return 0;
+	return err;
 }
 
 const struct smp_operations smp_psci_ops = {
 	.name		= "psci",
 	.cpu_init	= smp_psci_cpu_init,
 	.cpu_prepare	= smp_psci_cpu_prepare,
+	.cpu_boot	= smp_psci_cpu_boot,
 };
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 5fecffc..87af6bb 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -16,13 +16,36 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+extern void secondary_holding_pen(void);
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
 
 static phys_addr_t cpu_release_addr[NR_CPUS];
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not.  This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+	void *start = (void *)&secondary_holding_pen_release;
+	unsigned long size = sizeof(secondary_holding_pen_release);
+
+	secondary_holding_pen_release = val;
+	__flush_dcache_area(start, size);
+}
+
 
 static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
 {
@@ -59,8 +82,60 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
 	return 0;
 }
 
+static int smp_spin_table_cpu_boot(unsigned int cpu)
+{
+	unsigned long timeout;
+
+	/*
+	 * Set synchronisation state between this boot processor
+	 * and the secondary one
+	 */
+	raw_spin_lock(&boot_lock);
+
+	/*
+	 * Update the pen release flag.
+	 */
+	write_pen_release(cpu_logical_map(cpu));
+
+	/*
+	 * Send an event, causing the secondaries to read pen_release.
+	 */
+	sev();
+
+	timeout = jiffies + (1 * HZ);
+	while (time_before(jiffies, timeout)) {
+		if (secondary_holding_pen_release == INVALID_HWID)
+			break;
+		udelay(10);
+	}
+
+	/*
+	 * Now the secondary core is starting up let it run its
+	 * calibrations, then wait for it to finish
+	 */
+	raw_spin_unlock(&boot_lock);
+
+	return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+}
+
+void smp_spin_table_cpu_postboot(void)
+{
+	/*
+	 * Let the primary processor know we're out of the pen.
+	 */
+	write_pen_release(INVALID_HWID);
+
+	/*
+	 * Synchronise with the boot thread.
+	 */
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
+}
+
 const struct smp_operations smp_spin_table_ops = {
 	.name		= "spin-table",
 	.cpu_init	= smp_spin_table_cpu_init,
 	.cpu_prepare	= smp_spin_table_cpu_prepare,
+	.cpu_boot	= smp_spin_table_cpu_boot,
+	.cpu_postboot	= smp_spin_table_cpu_postboot,
 };
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index f5e5574..d8ca8d9 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -54,7 +54,6 @@ SECTIONS
 	}
 	.text : {			/* Real text segment		*/
 		_stext = .;		/* Text and read-only data	*/
-			*(.smp.pen.text)
 			__exception_text_start = .;
 			*(.exception.text)
 			__exception_text_end = .;
-- 
1.8.1.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCHv2 3/5] arm64: read enable-method for CPU0
  2013-07-22 15:37 [PATCHv2 0/5] arm64: initial CPU hotplug support Mark Rutland
  2013-07-22 15:37 ` [PATCHv2 1/5] arm64: reorganise smp_enable_ops Mark Rutland
  2013-07-22 15:37 ` [PATCHv2 2/5] arm64: factor out spin-table boot method Mark Rutland
@ 2013-07-22 15:37 ` Mark Rutland
  2013-07-22 15:37 ` [PATCHv2 4/5] arm64: add CPU_HOTPLUG infrastructure Mark Rutland
  2013-07-22 15:37 ` [PATCHv2 5/5] arm64: add PSCI CPU_OFF-based hotplug support Mark Rutland
  4 siblings, 0 replies; 8+ messages in thread
From: Mark Rutland @ 2013-07-22 15:37 UTC (permalink / raw)
  To: linux-arm-kernel

With the advent of CPU_HOTPLUG, the enable-method property for CPU0
may tells us something useful (i.e. how to hotplug it back on), so
we must read it along with all the enable-method for all the other CPUs.

This patch ensures that CPU0's enable-method property is read.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
---
 arch/arm64/kernel/smp.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 72c2823..f6ce2ae 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -254,6 +254,8 @@ void __init smp_init_cpus(void)
 			}
 		}
 
+		enable_method = of_get_property(dn, "enable-method", NULL);
+
 		/*
 		 * The numbering scheme requires that the boot CPU
 		 * must be assigned logical id 0. Record it so that
@@ -269,11 +271,12 @@ void __init smp_init_cpus(void)
 
 			bootcpu_valid = true;
 
+			if (enable_method)
+				smp_ops[0] = smp_get_ops(enable_method);
+
 			/*
-			 * cpu_logical_map has already been
-			 * initialized and the boot cpu doesn't need
-			 * the enable-method so continue without
-			 * incrementing cpu.
+			 * cpu_logical_map has already been initialized so
+			 * continue without incrementing cpu.
 			 */
 			continue;
 		}
@@ -281,10 +284,6 @@ void __init smp_init_cpus(void)
 		if (cpu >= NR_CPUS)
 			goto next;
 
-		/*
-		 * We currently support only the "spin-table" enable-method.
-		 */
-		enable_method = of_get_property(dn, "enable-method", NULL);
 		if (!enable_method) {
 			pr_err("%s: missing enable-method property\n",
 				dn->full_name);
-- 
1.8.1.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCHv2 4/5] arm64: add CPU_HOTPLUG infrastructure
  2013-07-22 15:37 [PATCHv2 0/5] arm64: initial CPU hotplug support Mark Rutland
                   ` (2 preceding siblings ...)
  2013-07-22 15:37 ` [PATCHv2 3/5] arm64: read enable-method for CPU0 Mark Rutland
@ 2013-07-22 15:37 ` Mark Rutland
  2013-07-22 15:37 ` [PATCHv2 5/5] arm64: add PSCI CPU_OFF-based hotplug support Mark Rutland
  4 siblings, 0 replies; 8+ messages in thread
From: Mark Rutland @ 2013-07-22 15:37 UTC (permalink / raw)
  To: linux-arm-kernel

This patch adds the basic infrastructure necessary to support
CPU_HOTPLUG on arm64, based on the arm implementation. Actual hotplug
support will depend on an implementation's smp_operations (e.g. PSCI).

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
---
 arch/arm64/Kconfig           |  7 ++++
 arch/arm64/include/asm/irq.h |  1 +
 arch/arm64/include/asm/smp.h |  9 ++++
 arch/arm64/kernel/cputable.c |  2 +-
 arch/arm64/kernel/irq.c      | 61 ++++++++++++++++++++++++++++
 arch/arm64/kernel/process.c  |  7 ++++
 arch/arm64/kernel/smp.c      | 97 ++++++++++++++++++++++++++++++++++++++++++++
 7 files changed, 183 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9737e97..5ce4ccb 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -158,6 +158,13 @@ config NR_CPUS
 	default "8" if ARCH_XGENE
 	default "4"
 
+config HOTPLUG_CPU
+	bool "Support for hot-pluggable CPUs"
+	depends on SMP
+	help
+	  Say Y here to experiment with turning CPUs off and on.  CPUs
+	  can be controlled through /sys/devices/system/cpu.
+
 source kernel/Kconfig.preempt
 
 config HZ
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 0332fc0..e1f7ecd 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,6 +4,7 @@
 #include <asm-generic/irq.h>
 
 extern void (*handle_arch_irq)(struct pt_regs *);
+extern void migrate_irqs(void);
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 
 #endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index af39644..844925d 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -67,6 +67,11 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 struct device_node;
 
+extern int __cpu_disable(void);
+
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void);
+
 struct smp_operations {
 	const char	*name;
 	/*
@@ -85,6 +90,10 @@ struct smp_operations {
 	 * Performs post-boot cleanup
 	 */
 	void		(*cpu_postboot)(void);
+#ifdef CONFIG_HOTPLUG_CPU
+	int  (*cpu_disable)(unsigned int cpu);
+	void (*cpu_die)(unsigned int cpu);
+#endif
 };
 
 extern const struct smp_operations smp_spin_table_ops;
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
index 63cfc4a..fd3993c 100644
--- a/arch/arm64/kernel/cputable.c
+++ b/arch/arm64/kernel/cputable.c
@@ -22,7 +22,7 @@
 
 extern unsigned long __cpu_setup(void);
 
-struct cpu_info __initdata cpu_table[] = {
+struct cpu_info cpu_table[] = {
 	{
 		.cpu_id_val	= 0x000f0000,
 		.cpu_id_mask	= 0x000f0000,
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index ecb3354..473e5db 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -81,3 +81,64 @@ void __init init_IRQ(void)
 	if (!handle_arch_irq)
 		panic("No interrupt controller found.");
 }
+
+#ifdef CONFIG_HOTPLUG_CPU
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+	struct irq_data *d = irq_desc_get_irq_data(desc);
+	const struct cpumask *affinity = d->affinity;
+	struct irq_chip *c;
+	bool ret = false;
+
+	/*
+	 * If this is a per-CPU interrupt, or the affinity does not
+	 * include this CPU, then we have nothing to do.
+	 */
+	if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+		return false;
+
+	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+		affinity = cpu_online_mask;
+		ret = true;
+	}
+
+	c = irq_data_get_irq_chip(d);
+	if (!c->irq_set_affinity)
+		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+	else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+		cpumask_copy(d->affinity, affinity);
+
+	return ret;
+}
+
+/*
+ * The current CPU has been marked offline.  Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void migrate_irqs(void)
+{
+	unsigned int i;
+	struct irq_desc *desc;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	for_each_irq_desc(i, desc) {
+		bool affinity_broken;
+
+		raw_spin_lock(&desc->lock);
+		affinity_broken = migrate_one_irq(desc);
+		raw_spin_unlock(&desc->lock);
+
+		if (affinity_broken)
+			pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+					    i, smp_processor_id());
+	}
+
+	local_irq_restore(flags);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 1788bf6..4835a33 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -102,6 +102,13 @@ void arch_cpu_idle(void)
 	local_irq_enable();
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+       cpu_die();
+}
+#endif
+
 void machine_shutdown(void)
 {
 #ifdef CONFIG_SMP
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index f6ce2ae..bd9c980 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -168,6 +168,103 @@ asmlinkage void secondary_start_kernel(void)
 	cpu_startup_entry(CPUHP_ONLINE);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
+{
+	/*
+	 * If we don't have a cpu_die method, abort before we reach the point
+	 * of no return. CPU0 may not have an smp_ops, so test for it.
+	 */
+	if (!smp_ops[cpu] || !smp_ops[cpu]->cpu_die)
+		return -EOPNOTSUPP;
+
+	/*
+	 * We may need to abort a hot unplug for some other mechanism-specific
+	 * reason.
+	 */
+	if (smp_ops[cpu]->cpu_disable)
+		return smp_ops[cpu]->cpu_disable(cpu);
+
+	return 0;
+}
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+	unsigned int cpu = smp_processor_id();
+	int ret;
+
+	ret = op_cpu_disable(cpu);
+	if (ret)
+		return ret;
+
+	/*
+	 * Take this CPU offline.  Once we clear this, we can't return,
+	 * and we must not schedule until we're ready to give up the cpu.
+	 */
+	set_cpu_online(cpu, false);
+
+	/*
+	 * OK - migrate IRQs away from this CPU
+	 */
+	migrate_irqs();
+
+	/*
+	 * Remove this CPU from the vm mask set of all processes.
+	 */
+	clear_tasks_mm_cpumask(cpu);
+
+	return 0;
+}
+
+static DECLARE_COMPLETION(cpu_died);
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+		pr_crit("CPU%u: cpu didn't die\n", cpu);
+		return;
+	}
+	pr_notice("CPU%u: shutdown\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+	unsigned int cpu = smp_processor_id();
+
+	idle_task_exit();
+
+	local_irq_disable();
+	mb();
+
+	/* Tell __cpu_die() that this CPU is now safe to dispose of */
+	RCU_NONIDLE(complete(&cpu_died));
+
+	/*
+	 * Actually shutdown the CPU. This must never fail.
+	 */
+	smp_ops[cpu]->cpu_die(cpu);
+
+	BUG();
+}
+#endif
+
+
+
 void __init smp_cpus_done(unsigned int max_cpus)
 {
 	unsigned long bogosum = loops_per_jiffy * num_online_cpus();
-- 
1.8.1.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCHv2 5/5] arm64: add PSCI CPU_OFF-based hotplug support
  2013-07-22 15:37 [PATCHv2 0/5] arm64: initial CPU hotplug support Mark Rutland
                   ` (3 preceding siblings ...)
  2013-07-22 15:37 ` [PATCHv2 4/5] arm64: add CPU_HOTPLUG infrastructure Mark Rutland
@ 2013-07-22 15:37 ` Mark Rutland
  4 siblings, 0 replies; 8+ messages in thread
From: Mark Rutland @ 2013-07-22 15:37 UTC (permalink / raw)
  To: linux-arm-kernel

This patch adds support for using PSCI CPU_OFF calls for CPU hotplug.
With this code it is possible to hot unplug CPUs with "psci" as their
boot-method, as long as there's an appropriate cpu_off function id
specified in the psci node.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
---
 arch/arm64/kernel/smp_psci.c | 30 ++++++++++++++++++++++++++++++
 1 file changed, 30 insertions(+)

diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
index 20499bc..06123de 100644
--- a/arch/arm64/kernel/smp_psci.c
+++ b/arch/arm64/kernel/smp_psci.c
@@ -47,9 +47,39 @@ static int smp_psci_cpu_boot(unsigned int cpu)
 	return err;
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int smp_psci_cpu_disable(unsigned int cpu)
+{
+	/* Fail early if we don't have CPU_OFF support */
+	if (!psci_ops.cpu_off)
+		return -EOPNOTSUPP;
+	return 0;
+}
+
+static void smp_psci_cpu_die(unsigned int cpu)
+{
+	int ret;
+	/*
+	 * There are no known implementations of PSCI actually using the
+	 * power state field, pass a sensible default for now.
+	 */
+	struct psci_power_state state = {
+		.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+	};
+
+	ret = psci_ops.cpu_off(state);
+
+	pr_crit("psci: unable to power off CPU%u (%d)", cpu, ret);
+}
+#endif
+
 const struct smp_operations smp_psci_ops = {
 	.name		= "psci",
 	.cpu_init	= smp_psci_cpu_init,
 	.cpu_prepare	= smp_psci_cpu_prepare,
 	.cpu_boot	= smp_psci_cpu_boot,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable	= smp_psci_cpu_disable,
+	.cpu_die	= smp_psci_cpu_die,
+#endif
 };
-- 
1.8.1.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCHv2 2/5] arm64: factor out spin-table boot method
  2013-07-22 15:37 ` [PATCHv2 2/5] arm64: factor out spin-table boot method Mark Rutland
@ 2013-07-23 15:17   ` Santosh Shilimkar
  2013-08-02 14:14     ` Mark Rutland
  0 siblings, 1 reply; 8+ messages in thread
From: Santosh Shilimkar @ 2013-07-23 15:17 UTC (permalink / raw)
  To: linux-arm-kernel

On Monday 22 July 2013 11:37 AM, Mark Rutland wrote:
> The arm64 kernel has an internal holding pen, which is necessary for
> some systems where we can't bring CPUs online individually and must hold
> multiple CPUs in a safe area until the kernel is able to handle them.
> The current SMP infrastructure for arm64 is closely coupled to this
> holding pen, and alternative boot methods must launch CPUs into the pen,
> from whence they are launched into the kernel proper.
>
s/whence/where
 
> With PSCI (and possibly other future boot methods), we can bring CPUs
> online individually, and need not perform the secondary_holding_pen
> dance. Instead, this patch factors the holding pen management code out
> to the spin-table boot method code, as it is the only boot method
> requiring the pen.
> 
> A new entry point for secondaries, secondary_entry is added for other
> boot methods to use, which bypasses the holding pen and its associated
> overhead when bringing CPUs online. The smp.pen.text section is also
> removed, as the pen can live in head.text without problem.
> 
> The smp_operations structure is extended with two new functions,
> cpu_boot and cpu_postboot, for bringing a cpu into the kernel and
> performing any post-boot cleanup required by a bootmethod (e.g.
> resetting the secondary_holding_pen_release to INVALID_HWID).
> 
> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> ---
>  arch/arm64/include/asm/smp.h       | 17 ++++++++-
>  arch/arm64/kernel/head.S           | 12 +++++-
>  arch/arm64/kernel/smp.c            | 67 +++-------------------------------
>  arch/arm64/kernel/smp_psci.c       | 16 ++++----
>  arch/arm64/kernel/smp_spin_table.c | 75 ++++++++++++++++++++++++++++++++++++++
>  arch/arm64/kernel/vmlinux.lds.S    |  1 -
>  6 files changed, 115 insertions(+), 73 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
> index 90626b6..af39644 100644
> --- a/arch/arm64/include/asm/smp.h
> +++ b/arch/arm64/include/asm/smp.h
> @@ -60,8 +60,7 @@ struct secondary_data {
>  	void *stack;
>  };
>  extern struct secondary_data secondary_data;
> -extern void secondary_holding_pen(void);
> -extern volatile unsigned long secondary_holding_pen_release;
> +extern void secondary_entry(void);
>  
>  extern void arch_send_call_function_single_ipi(int cpu);
>  extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
> @@ -70,8 +69,22 @@ struct device_node;
>  
>  struct smp_operations {
>  	const char	*name;
> +	/*
> +	 * Check devicetree data for cpu
> +	 */
All these comments can go above the struct as mentioned
in kerneldoc.

>  	int		(*cpu_init)(struct device_node *, unsigned int);
> +	/*
> +	 * Test if cpu is present and bootable
> +	 */
>  	int		(*cpu_prepare)(unsigned int);
> +	/*
> +	 * Boot cpu into the kernel
> +	 */
> +	int		(*cpu_boot)(unsigned int);
> +	/*
> +	 * Performs post-boot cleanup
> +	 */
> +	void		(*cpu_postboot)(void);
>  };
>  
>  extern const struct smp_operations smp_spin_table_ops;
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 53dcae4..3532ca6 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -217,7 +217,6 @@ ENTRY(__boot_cpu_mode)
>  	.quad	PAGE_OFFSET
>  
>  #ifdef CONFIG_SMP
> -	.pushsection    .smp.pen.text, "ax"
>  	.align	3
>  1:	.quad	.
>  	.quad	secondary_holding_pen_release
> @@ -242,7 +241,16 @@ pen:	ldr	x4, [x3]
>  	wfe
>  	b	pen
>  ENDPROC(secondary_holding_pen)
> -	.popsection
> +
> +	/*
> +	 * Secondary entry point that jumps straight into the kernel. Only to
> +	 * be used where CPUs are brought online dynamically by the kernel.
> +	 */
> +ENTRY(secondary_entry)
> +	bl	__calc_phys_offset		// x2=phys offset
> +	bl	el2_setup			// Drop to EL1
> +	b	secondary_startup
> +ENDPROC(secondary_entry)
>  
>  ENTRY(secondary_startup)
>  	/*
Other than that, rest of the patch looks fine too me.

regards,
Santosh

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCHv2 2/5] arm64: factor out spin-table boot method
  2013-07-23 15:17   ` Santosh Shilimkar
@ 2013-08-02 14:14     ` Mark Rutland
  0 siblings, 0 replies; 8+ messages in thread
From: Mark Rutland @ 2013-08-02 14:14 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Jul 23, 2013 at 04:17:13PM +0100, Santosh Shilimkar wrote:
> On Monday 22 July 2013 11:37 AM, Mark Rutland wrote:
> > The arm64 kernel has an internal holding pen, which is necessary for
> > some systems where we can't bring CPUs online individually and must hold
> > multiple CPUs in a safe area until the kernel is able to handle them.
> > The current SMP infrastructure for arm64 is closely coupled to this
> > holding pen, and alternative boot methods must launch CPUs into the pen,
> > from whence they are launched into the kernel proper.
> >
> s/whence/where

Will fix up.

>  
> > With PSCI (and possibly other future boot methods), we can bring CPUs
> > online individually, and need not perform the secondary_holding_pen
> > dance. Instead, this patch factors the holding pen management code out
> > to the spin-table boot method code, as it is the only boot method
> > requiring the pen.
> > 
> > A new entry point for secondaries, secondary_entry is added for other
> > boot methods to use, which bypasses the holding pen and its associated
> > overhead when bringing CPUs online. The smp.pen.text section is also
> > removed, as the pen can live in head.text without problem.
> > 
> > The smp_operations structure is extended with two new functions,
> > cpu_boot and cpu_postboot, for bringing a cpu into the kernel and
> > performing any post-boot cleanup required by a bootmethod (e.g.
> > resetting the secondary_holding_pen_release to INVALID_HWID).
> > 
> > Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> > ---
> >  arch/arm64/include/asm/smp.h       | 17 ++++++++-
> >  arch/arm64/kernel/head.S           | 12 +++++-
> >  arch/arm64/kernel/smp.c            | 67 +++-------------------------------
> >  arch/arm64/kernel/smp_psci.c       | 16 ++++----
> >  arch/arm64/kernel/smp_spin_table.c | 75 ++++++++++++++++++++++++++++++++++++++
> >  arch/arm64/kernel/vmlinux.lds.S    |  1 -
> >  6 files changed, 115 insertions(+), 73 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
> > index 90626b6..af39644 100644
> > --- a/arch/arm64/include/asm/smp.h
> > +++ b/arch/arm64/include/asm/smp.h
> > @@ -60,8 +60,7 @@ struct secondary_data {
> >  	void *stack;
> >  };
> >  extern struct secondary_data secondary_data;
> > -extern void secondary_holding_pen(void);
> > -extern volatile unsigned long secondary_holding_pen_release;
> > +extern void secondary_entry(void);
> >  
> >  extern void arch_send_call_function_single_ipi(int cpu);
> >  extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
> > @@ -70,8 +69,22 @@ struct device_node;
> >  
> >  struct smp_operations {
> >  	const char	*name;
> > +	/*
> > +	 * Check devicetree data for cpu
> > +	 */
> All these comments can go above the struct as mentioned
> in kerneldoc.

Ok, I'll take a look at the kerneldoc documentation.

> 
> >  	int		(*cpu_init)(struct device_node *, unsigned int);
> > +	/*
> > +	 * Test if cpu is present and bootable
> > +	 */
> >  	int		(*cpu_prepare)(unsigned int);
> > +	/*
> > +	 * Boot cpu into the kernel
> > +	 */
> > +	int		(*cpu_boot)(unsigned int);
> > +	/*
> > +	 * Performs post-boot cleanup
> > +	 */
> > +	void		(*cpu_postboot)(void);
> >  };
> >  
> >  extern const struct smp_operations smp_spin_table_ops;
> > diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> > index 53dcae4..3532ca6 100644
> > --- a/arch/arm64/kernel/head.S
> > +++ b/arch/arm64/kernel/head.S
> > @@ -217,7 +217,6 @@ ENTRY(__boot_cpu_mode)
> >  	.quad	PAGE_OFFSET
> >  
> >  #ifdef CONFIG_SMP
> > -	.pushsection    .smp.pen.text, "ax"
> >  	.align	3
> >  1:	.quad	.
> >  	.quad	secondary_holding_pen_release
> > @@ -242,7 +241,16 @@ pen:	ldr	x4, [x3]
> >  	wfe
> >  	b	pen
> >  ENDPROC(secondary_holding_pen)
> > -	.popsection
> > +
> > +	/*
> > +	 * Secondary entry point that jumps straight into the kernel. Only to
> > +	 * be used where CPUs are brought online dynamically by the kernel.
> > +	 */
> > +ENTRY(secondary_entry)
> > +	bl	__calc_phys_offset		// x2=phys offset
> > +	bl	el2_setup			// Drop to EL1
> > +	b	secondary_startup
> > +ENDPROC(secondary_entry)
> >  
> >  ENTRY(secondary_startup)
> >  	/*
> Other than that, rest of the patch looks fine too me.

Great!

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2013-08-02 14:14 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-07-22 15:37 [PATCHv2 0/5] arm64: initial CPU hotplug support Mark Rutland
2013-07-22 15:37 ` [PATCHv2 1/5] arm64: reorganise smp_enable_ops Mark Rutland
2013-07-22 15:37 ` [PATCHv2 2/5] arm64: factor out spin-table boot method Mark Rutland
2013-07-23 15:17   ` Santosh Shilimkar
2013-08-02 14:14     ` Mark Rutland
2013-07-22 15:37 ` [PATCHv2 3/5] arm64: read enable-method for CPU0 Mark Rutland
2013-07-22 15:37 ` [PATCHv2 4/5] arm64: add CPU_HOTPLUG infrastructure Mark Rutland
2013-07-22 15:37 ` [PATCHv2 5/5] arm64: add PSCI CPU_OFF-based hotplug support Mark Rutland

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.