linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Oliver O'Halloran <oohall@gmail.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Oliver O'Halloran <oohall@gmail.com>
Subject: [PATCH 3/5] powerpc/smp: Add update_cpu_masks()
Date: Thu,  2 Mar 2017 11:49:18 +1100	[thread overview]
Message-ID: <20170302004920.21948-3-oohall@gmail.com> (raw)
In-Reply-To: <20170302004920.21948-1-oohall@gmail.com>

When adding and removing a CPU from the system the per-cpu masks that
are used by the scheduler to construct scheduler domains need to be updated
to account for the cpu entering or exiting the system. Currently logic this
is open-coded for the thread sibling mask and shared for the core mask.
This patch moves all the logic for rebuilding these masks into a single
function and simplifies the logic which determines which CPUs are within
a "core".

Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
---
 arch/powerpc/kernel/smp.c | 90 ++++++++++++++++++++++++++++-------------------
 1 file changed, 54 insertions(+), 36 deletions(-)

diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 1c531887ca51..3922cace927e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -630,14 +630,20 @@ int cpu_first_thread_of_core(int core)
 }
 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
 
-static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
+static bool update_core_mask_by_chip_id(int cpu, bool add)
 {
 	const struct cpumask *mask = add ? cpu_online_mask : cpu_present_mask;
+	int chipid = cpu_to_chip_id(cpu);
 	int i;
 
+	if (chipid == -1)
+		return false;
+
 	for_each_cpu(i, mask)
 		if (cpu_to_chip_id(i) == chipid)
 			set_cpus_related(cpu, i, add, cpu_core_mask);
+
+	return true;
 }
 
 /* Must be called when no change can occur to cpu_present_mask,
@@ -662,42 +668,72 @@ static struct device_node *cpu_to_l2cache(int cpu)
 	return cache;
 }
 
-static void traverse_core_siblings(int cpu, bool add)
+static bool update_core_mask_by_l2(int cpu, bool onlining)
 {
+	const struct cpumask *mask = onlining ? cpu_online_mask : cpu_present_mask;
 	struct device_node *l2_cache, *np;
-	const struct cpumask *mask;
-	int chip_id;
 	int i;
 
-	/* threads that share a chip-id are considered siblings (same die) */
-	chip_id = cpu_to_chip_id(cpu);
-
-	if (chip_id >= 0) {
-		traverse_siblings_chip_id(cpu, add, chip_id);
-		return;
-	}
-
-	/* if the chip-id fails then group siblings by the L2 cache */
 	l2_cache = cpu_to_l2cache(cpu);
-	mask = add ? cpu_online_mask : cpu_present_mask;
+	if (l2_cache == NULL)
+		return false;
+
 	for_each_cpu(i, mask) {
 		np = cpu_to_l2cache(i);
 		if (!np)
 			continue;
 
 		if (np == l2_cache)
-			set_cpus_related(cpu, i, add, cpu_core_mask);
+			set_cpus_related(cpu, i, onlining, cpu_core_mask);
 
 		of_node_put(np);
 	}
 	of_node_put(l2_cache);
+
+	return true;
+}
+
+static void update_thread_mask(int cpu, bool onlining)
+{
+	int base = cpu_first_thread_sibling(cpu);
+	int i;
+
+	pr_info("CPUDEBUG: onlining cpu %d, base %d, thread_per_core %d",
+		cpu, base, threads_per_core);
+
+	for (i = 0; i < threads_per_core; i++) {
+		/* Threads are onlined one by one. By the final time this
+		 * function is called for the core the sibling mask for each
+		 * thread will be complete, but we need to ensure that offline
+		 * threads aren't touched before they run start_secondary() */
+		if (onlining && cpu_is_offline(base + i) && (cpu != base + i))
+			continue;
+
+		set_cpus_related(cpu, base + i, onlining, cpu_sibling_mask);
+	}
+}
+
+static void update_cpu_masks(int cpu, bool onlining)
+{
+	int i;
+
+	update_thread_mask(cpu, onlining);
+
+	if (update_core_mask_by_chip_id(cpu, onlining))
+		return;
+
+	if (update_core_mask_by_l2(cpu, onlining))
+		return;
+
+	/* if all else fails duplicate the sibling mask */
+	for_each_cpu(i, cpu_sibling_mask(cpu))
+		set_cpus_related(cpu, i, onlining, cpu_core_mask);
 }
 
 /* Activate a secondary processor. */
 void start_secondary(void *unused)
 {
 	unsigned int cpu = smp_processor_id();
-	int i, base;
 
 	atomic_inc(&init_mm.mm_count);
 	current->active_mm = &init_mm;
@@ -721,19 +757,7 @@ void start_secondary(void *unused)
 	vdso_getcpu_init();
 #endif
 	/* Update sibling maps */
-	base = cpu_first_thread_sibling(cpu);
-	for (i = 0; i < threads_per_core; i++) {
-		if (cpu_is_offline(base + i) && (cpu != base + i))
-			continue;
-		set_cpus_related(cpu, base + i, true, cpu_sibling_mask);
-
-		/* cpu_core_map should be a superset of
-		 * cpu_sibling_map even if we don't have cache
-		 * information, so update the former here, too.
-		 */
-		set_cpus_related(cpu, base + i, true, cpu_core_mask);
-	}
-	traverse_core_siblings(cpu, true);
+	update_cpu_masks(cpu, true);
 
 	set_numa_node(numa_cpu_lookup_table[cpu]);
 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
@@ -808,7 +832,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
 int __cpu_disable(void)
 {
 	int cpu = smp_processor_id();
-	int base, i;
 	int err;
 
 	if (!smp_ops->cpu_disable)
@@ -819,12 +842,7 @@ int __cpu_disable(void)
 		return err;
 
 	/* Update sibling maps */
-	base = cpu_first_thread_sibling(cpu);
-	for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
-		set_cpus_related(cpu, base + i, false, cpu_sibling_mask);
-		set_cpus_related(cpu, base + i, false, cpu_core_mask);
-	}
-	traverse_core_siblings(cpu, false);
+	update_cpu_masks(cpu, false);
 
 	return 0;
 }
-- 
2.9.3

  parent reply	other threads:[~2017-03-02  0:49 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-02  0:49 [PATCH 1/5] powerpc/smp: use cpu_to_chip_id() to find siblings Oliver O'Halloran
2017-03-02  0:49 ` [PATCH 2/5] powerpc/smp: add set_cpus_related() Oliver O'Halloran
2017-03-15 11:18   ` Michael Ellerman
2017-03-23  1:27     ` Oliver O'Halloran
2017-03-28  1:15       ` Michael Ellerman
2017-03-02  0:49 ` Oliver O'Halloran [this message]
2017-03-15 11:18   ` [PATCH 3/5] powerpc/smp: Add update_cpu_masks() Michael Ellerman
2017-03-02  0:49 ` [PATCH 4/5] powerpc/smp: add cpu_cache_mask Oliver O'Halloran
2017-03-15 11:26   ` Michael Ellerman
2017-03-23  3:33     ` Oliver O'Halloran
2017-03-28  1:05       ` Michael Ellerman
2017-03-02  0:49 ` [PATCH 5/5] powerpc/smp: Add Power9 scheduler topology Oliver O'Halloran
2017-03-02 10:25   ` Balbir Singh
2017-03-15 11:33     ` Michael Ellerman
2017-03-15 11:30   ` Michael Ellerman
2017-03-02  3:44 ` [PATCH 1/5] powerpc/smp: use cpu_to_chip_id() to find siblings Balbir Singh
2017-03-15 11:18 ` Michael Ellerman
2017-03-23  1:09   ` Oliver O'Halloran
2017-03-28  3:03     ` Michael Ellerman
2017-03-28  3:23       ` Oliver O'Halloran

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170302004920.21948-3-oohall@gmail.com \
    --to=oohall@gmail.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).