All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
@ 2015-04-09 19:14 chris.hyser
  2015-04-09 23:06 ` David Miller
                   ` (10 more replies)
  0 siblings, 11 replies; 12+ messages in thread
From: chris.hyser @ 2015-04-09 19:14 UTC (permalink / raw)
  To: sparclinux

sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly

The current sparc kernel has no representation for sockets though tools like
lscpu can pull this from sysfs. This patch walks the machine description cache
and socket hierarchy and marks sockets as well as cores and threads such that a
representative sysfs is created by drivers/base/topology.c.

Before this patch:
$ lscpu
Architecture:          sparc64
CPU op-mode(s):        32-bit, 64-bit
Byte Order:            Big Endian
CPU(s):                1024
On-line CPU(s) list:   0-1023
Thread(s) per core:    8
Core(s) per socket:    1     <--- wrong
Socket(s):             128   <--- wrong
NUMA node(s):          4
NUMA node0 CPU(s):     0-255
NUMA node1 CPU(s):     256-511
NUMA node2 CPU(s):     512-767
NUMA node3 CPU(s):     768-1023

After this patch:
$ lscpu
Architecture:          sparc64
CPU op-mode(s):        32-bit, 64-bit
Byte Order:            Big Endian
CPU(s):                1024
On-line CPU(s) list:   0-1023
Thread(s) per core:    8
Core(s) per socket:    32
Socket(s):             4
NUMA node(s):          4
NUMA node0 CPU(s):     0-255
NUMA node1 CPU(s):     256-511
NUMA node2 CPU(s):     512-767
NUMA node3 CPU(s):     768-1023

Most of this patch was done by Chris with updates by David.

Signed-off-by: Chris Hyser <chris.hyser@oracle.com>
Signed-off-by: David Ahern <david.ahern@oracle.com>

diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a6e424d..a6cfdab 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -24,7 +24,8 @@ typedef struct {
 	unsigned int	icache_line_size;
 	unsigned int	ecache_size;
 	unsigned int	ecache_line_size;
-	int		core_id;
+	unsigned short	sock_id;
+	unsigned short	core_id;
 	int		proc_id;
 } cpuinfo_sparc;
 
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index ed8f071..d1761df 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -40,11 +40,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
 #ifdef CONFIG_SMP
 #define topology_physical_package_id(cpu)	(cpu_data(cpu).proc_id)
 #define topology_core_id(cpu)			(cpu_data(cpu).core_id)
-#define topology_core_cpumask(cpu)		(&cpu_core_map[cpu])
+#define topology_core_cpumask(cpu)		(&cpu_core_sib_map[cpu])
 #define topology_thread_cpumask(cpu)		(&per_cpu(cpu_sibling_map, cpu))
 #endif /* CONFIG_SMP */
 
 extern cpumask_t cpu_core_map[NR_CPUS];
+extern cpumask_t cpu_core_sib_map[NR_CPUS];
 static inline const struct cpumask *cpu_coregroup_mask(int cpu)
 {
         return &cpu_core_map[cpu];
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 99632a8..78beb94 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
 	}
 }
 
-static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
+static void find_back_node_value(struct mdesc_handle *hp, u64 node,
+				 char *srch_val,
+				 void(*func)(struct mdesc_handle *, u64, int),
+				 u64 val, int depth)
 {
-	u64 a;
-
-	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
-		u64 t = mdesc_arc_target(hp, a);
-		const char *name;
-		const u64 *id;
+	u64 arc;
 
-		name = mdesc_node_name(hp, t);
-		if (!strcmp(name, "cpu")) {
-			id = mdesc_get_property(hp, t, "id", NULL);
-			if (*id < NR_CPUS)
-				cpu_data(*id).core_id = core_id;
-		} else {
-			u64 j;
+	/* Since we have an estimate of recursion depth, do a sanity check. */
+	if (depth = 0)
+		return;
 
-			mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
-				u64 n = mdesc_arc_target(hp, j);
-				const char *n_name;
+	mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
+		u64 n = mdesc_arc_target(hp, arc);
+		const char *name = mdesc_node_name(hp, n);
 
-				n_name = mdesc_node_name(hp, n);
-				if (strcmp(n_name, "cpu"))
-					continue;
+		if (!strcmp(srch_val, name))
+			(*func)(hp, n, val);
 
-				id = mdesc_get_property(hp, n, "id", NULL);
-				if (*id < NR_CPUS)
-					cpu_data(*id).core_id = core_id;
-			}
-		}
+		find_back_node_value(hp, n, srch_val, func, val, depth-1);
 	}
 }
 
+static void __mark_core_id(struct mdesc_handle *hp, u64 node,
+			   int core_id)
+{
+	const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+	if (*id < num_possible_cpus())
+		cpu_data(*id).core_id = core_id;
+}
+
+static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
+			   int sock_id)
+{
+	const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+	if (*id < num_possible_cpus())
+		cpu_data(*id).sock_id = sock_id;
+}
+
+static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
+			  int core_id)
+{
+	find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
+}
+
+static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
+			  int sock_id)
+{
+	find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
+}
+
 static void set_core_ids(struct mdesc_handle *hp)
 {
 	int idx;
 	u64 mp;
 
 	idx = 1;
+
+	/* Identify unique cores by looking for cpus backpointed to by
+	 * level 1 instruction caches.
+	 */
 	mdesc_for_each_node_by_name(hp, mp, "cache") {
 		const u64 *level;
 		const char *type;
@@ -667,11 +690,67 @@ static void set_core_ids(struct mdesc_handle *hp)
 			continue;
 
 		mark_core_ids(hp, mp, idx);
+		idx++;
+	}
+}
+
+static void set_sock_ids_by_cache(struct mdesc_handle *hp, u64 mp)
+{
+	int idx = 1;
+
+	/* Identify unique sockets by looking for cpus backpointed to by
+	 * level 3 caches.
+	 */
+	mdesc_for_each_node_by_name(hp, mp, "cache") {
+		const u64 *level;
+
+		level = mdesc_get_property(hp, mp, "level", NULL);
+		if (*level != 3)
+			continue;
 
+		mark_sock_ids(hp, mp, idx);
 		idx++;
 	}
 }
 
+static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
+{
+	int idx = 1;
+
+	mdesc_for_each_node_by_name(hp, mp, "socket") {
+		u64 a;
+
+		mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+			u64 t = mdesc_arc_target(hp, a);
+			const char *name;
+			const u64 *id;
+
+			name = mdesc_node_name(hp, t);
+			if (strcmp(name, "cpu"))
+				continue;
+
+			id = mdesc_get_property(hp, t, "id", NULL);
+			if (*id < num_possible_cpus())
+				cpu_data(*id).sock_id = idx;
+		}
+		idx++;
+	}
+}
+
+static void set_sock_ids(struct mdesc_handle *hp)
+{
+	u64 mp;
+
+	/* If machine description exposes sockets data use it.
+	 * Otherwise fallback to use L3 cache
+	 */
+	mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
+	if (mp = MDESC_NODE_NULL)
+		return set_sock_ids_by_cache(hp, mp);
+
+	return set_sock_ids_by_socket(hp, mp);
+}
+
 static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
 {
 	u64 a;
@@ -707,7 +786,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
 			continue;
 
 		mark_proc_ids(hp, mp, idx);
-
 		idx++;
 	}
 }
@@ -900,6 +978,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
 
 	set_core_ids(hp);
 	set_proc_ids(hp);
+	set_sock_ids(hp);
 
 	mdesc_release(hp);
 
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 61139d9..19cd08d 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -60,8 +60,12 @@ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly  	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
+cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
+	[0 ... NR_CPUS-1] = CPU_MASK_NONE };
+
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
+EXPORT_SYMBOL(cpu_core_sib_map);
 
 static cpumask_t smp_commenced_mask;
 
@@ -1243,6 +1247,15 @@ void smp_fill_in_sib_core_maps(void)
 		}
 	}
 
+	for_each_present_cpu(i)  {
+		unsigned int j;
+
+		for_each_present_cpu(j)  {
+			if (cpu_data(i).sock_id = cpu_data(j).sock_id)
+				cpumask_set_cpu(j, &cpu_core_sib_map[i]);
+		}
+	}
+
 	for_each_present_cpu(i) {
 		unsigned int j;
 

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
@ 2015-04-09 23:06 ` David Miller
  2015-04-10 15:27 ` chris hyser
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: David Miller @ 2015-04-09 23:06 UTC (permalink / raw)
  To: sparclinux

From: chris.hyser@oracle.com
Date: Thu, 09 Apr 2015 15:14:35 -0400

> +static void set_sock_ids(struct mdesc_handle *hp)
> +{
> +	u64 mp;
> +
> +	/* If machine description exposes sockets data use it.
> +	 * Otherwise fallback to use L3 cache
> +	 */
> +	mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
> +	if (mp = MDESC_NODE_NULL)
> +		return set_sock_ids_by_cache(hp, mp);
> +
> +	return set_sock_ids_by_socket(hp, mp);
> +}
> +

How will this work on T3, T2, and T1 which all neither have the
"socket" mdesc nodes nor level 3 caches?

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
  2015-04-09 23:06 ` David Miller
@ 2015-04-10 15:27 ` chris hyser
  2015-04-10 19:25 ` David Miller
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: chris hyser @ 2015-04-10 15:27 UTC (permalink / raw)
  To: sparclinux

On 4/9/2015 7:06 PM, David Miller wrote:
> How will this work on T3, T2, and T1 which all neither have the
> "socket" mdesc nodes nor level 3 caches?

The patch only works for sun4v and is only called in that path. If this is broken for older architectures it will 
require a completely different solution. I do not believe there is sufficient info in the openboot device tree and even 
if there was that would be insufficient for sun4v as it is never updated in the presence of dynamic addition/removal of 
processors as the machine description table is. That said, this patch will likely need refinement when someone tries to 
get that working again. I've tested adding and removing CPUs on a few older kernels and it's broke though the code 
suggests it worked at some point in the past. It is on my list of things to look at, but no where near the top.

-chrish


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
  2015-04-09 23:06 ` David Miller
  2015-04-10 15:27 ` chris hyser
@ 2015-04-10 19:25 ` David Miller
  2015-04-10 19:55 ` chris hyser
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: David Miller @ 2015-04-10 19:25 UTC (permalink / raw)
  To: sparclinux

From: chris hyser <chris.hyser@oracle.com>
Date: Fri, 10 Apr 2015 11:27:23 -0400

> On 4/9/2015 7:06 PM, David Miller wrote:
>> How will this work on T3, T2, and T1 which all neither have the
>> "socket" mdesc nodes nor level 3 caches?
> 
> The patch only works for sun4v and is only called in that path.

T1, T2, and T3 are sun4v systems.

I can't believe I'm having this conversation with an Oracle engineer.


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
                   ` (2 preceding siblings ...)
  2015-04-10 19:25 ` David Miller
@ 2015-04-10 19:55 ` chris hyser
  2015-04-11 21:57 ` David Miller
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: chris hyser @ 2015-04-10 19:55 UTC (permalink / raw)
  To: sparclinux

On 4/10/2015 3:25 PM, David Miller wrote:
> From: chris hyser <chris.hyser@oracle.com>
> Date: Fri, 10 Apr 2015 11:27:23 -0400
>
>> On 4/9/2015 7:06 PM, David Miller wrote:
>>> How will this work on T3, T2, and T1 which all neither have the
>>> "socket" mdesc nodes nor level 3 caches?
>>
>> The patch only works for sun4v and is only called in that path.
>
> T1, T2, and T3 are sun4v systems.
>
> I can't believe I'm having this conversation with an Oracle engineer.

Your mileage will vary. So you want a generic solution that works on platforms that do not have the key element required 
for this patch? Absent useful machine provided info, I'm guessing it will require some sort of hardcoded table.

-chrish


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
                   ` (3 preceding siblings ...)
  2015-04-10 19:55 ` chris hyser
@ 2015-04-11 21:57 ` David Miller
  2015-04-16 19:41 ` David Miller
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: David Miller @ 2015-04-11 21:57 UTC (permalink / raw)
  To: sparclinux

From: chris hyser <chris.hyser@oracle.com>
Date: Fri, 10 Apr 2015 15:55:44 -0400

> On 4/10/2015 3:25 PM, David Miller wrote:
>> From: chris hyser <chris.hyser@oracle.com>
>> Date: Fri, 10 Apr 2015 11:27:23 -0400
>>
>>> On 4/9/2015 7:06 PM, David Miller wrote:
>>>> How will this work on T3, T2, and T1 which all neither have the
>>>> "socket" mdesc nodes nor level 3 caches?
>>>
>>> The patch only works for sun4v and is only called in that path.
>>
>> T1, T2, and T3 are sun4v systems.
>>
>> I can't believe I'm having this conversation with an Oracle engineer.
> 
> Your mileage will vary. So you want a generic solution that works on
> platforms that do not have the key element required for this patch?
> Absent useful machine provided info, I'm guessing it will require some
> sort of hardcoded table.

I'm pretty sure on the T2 you can use the level 2 cache instead of
the level 3 one.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
                   ` (4 preceding siblings ...)
  2015-04-11 21:57 ` David Miller
@ 2015-04-16 19:41 ` David Miller
  2015-04-16 20:31 ` chris hyser
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: David Miller @ 2015-04-16 19:41 UTC (permalink / raw)
  To: sparclinux

From: David Miller <davem@davemloft.net>
Date: Thu, 09 Apr 2015 19:06:47 -0400 (EDT)

> From: chris.hyser@oracle.com
> Date: Thu, 09 Apr 2015 15:14:35 -0400
> 
>> +static void set_sock_ids(struct mdesc_handle *hp)
>> +{
>> +	u64 mp;
>> +
>> +	/* If machine description exposes sockets data use it.
>> +	 * Otherwise fallback to use L3 cache
>> +	 */
>> +	mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
>> +	if (mp = MDESC_NODE_NULL)
>> +		return set_sock_ids_by_cache(hp, mp);
>> +
>> +	return set_sock_ids_by_socket(hp, mp);
>> +}
>> +
> 
> How will this work on T3, T2, and T1 which all neither have the
> "socket" mdesc nodes nor level 3 caches?

I'm still waiting for you to resolve this Chris.

I don't think it's much effort to make this change back down
to using the level=2 cache if no level=3 cache is found.  Please
implement that and resubmit.

Thanks.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
                   ` (5 preceding siblings ...)
  2015-04-16 19:41 ` David Miller
@ 2015-04-16 20:31 ` chris hyser
  2015-04-21 18:25 ` chris hyser
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: chris hyser @ 2015-04-16 20:31 UTC (permalink / raw)
  To: sparclinux

On 4/16/2015 3:41 PM, David Miller wrote:
> I'm still waiting for you to resolve this Chris.

understood.

> I don't think it's much effort to make this change back down
> to using the level=2 cache if no level=3 cache is found.  Please
> implement that and resubmit.

as soon as i reasonably can.

-chrish

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
                   ` (6 preceding siblings ...)
  2015-04-16 20:31 ` chris hyser
@ 2015-04-21 18:25 ` chris hyser
  2015-04-21 18:33 ` David Miller
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: chris hyser @ 2015-04-21 18:25 UTC (permalink / raw)
  To: sparclinux

On 4/16/2015 3:41 PM, David Miller wrote:
> From: David Miller <davem@davemloft.net>
> Date: Thu, 09 Apr 2015 19:06:47 -0400 (EDT)
>> How will this work on T3, T2, and T1 which all neither have the
>> "socket" mdesc nodes nor level 3 caches?
>
> I'm still waiting for you to resolve this Chris.
>
> I don't think it's much effort to make this change back down
> to using the level=2 cache if no level=3 cache is found.  Please
> implement that and resubmit.

So that does appear to work. This is not the patch. I will send that out shortly but I thought I'd give you a chance to 
provide feedback while I'm getting that ready.

Here is what I see on a T2.

before:
-----------------
tct2000-52> lscpu
Architecture:          sparc64
CPU op-mode(s):        32-bit, 64-bit
Byte Order:            Big Endian
CPU(s):                32
On-line CPU(s) list:   0-31
Thread(s) per core:    4
Core(s) per socket:    1
Socket(s):             8

after:
----------------------
tct2000-52> lscpu
Architecture:          sparc64
CPU op-mode(s):        32-bit, 64-bit
Byte Order:            Big Endian
CPU(s):                32
On-line CPU(s) list:   0-31
Thread(s) per core:    4
Core(s) per socket:    8
Socket(s):             1


Diff from the prior patch (I also see some cleanup here I will do):

diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 78beb94..75e2890 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -694,23 +694,26 @@ static void set_core_ids(struct mdesc_handle *hp)
         }
  }

-static void set_sock_ids_by_cache(struct mdesc_handle *hp, u64 mp)
+static int set_sock_ids_by_cache(struct mdesc_handle *hp, u64 mp, int level)
  {
         int idx = 1;
+       int fnd = 0;

         /* Identify unique sockets by looking for cpus backpointed to by
-        * level 3 caches.
+        * shared level n caches.
          */
         mdesc_for_each_node_by_name(hp, mp, "cache") {
-               const u64 *level;
+               const u64 *cur_lvl;

-               level = mdesc_get_property(hp, mp, "level", NULL);
-               if (*level != 3)
+               cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
+               if (*cur_lvl != level)
                         continue;

                 mark_sock_ids(hp, mp, idx);
                 idx++;
+               fnd = 1;
         }
+       return fnd;
  }

  static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
@@ -742,11 +745,12 @@ static void set_sock_ids(struct mdesc_handle *hp)
         u64 mp;

         /* If machine description exposes sockets data use it.
-        * Otherwise fallback to use L3 cache
+        * Otherwise fallback to use shared L3 or L2 caches
          */
         mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
         if (mp = MDESC_NODE_NULL)
-               return set_sock_ids_by_cache(hp, mp);
+               if (!set_sock_ids_by_cache(hp, mp, 3))
+                       set_sock_ids_by_cache(hp, mp, 2);

         return set_sock_ids_by_socket(hp, mp);
  }


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
                   ` (7 preceding siblings ...)
  2015-04-21 18:25 ` chris hyser
@ 2015-04-21 18:33 ` David Miller
  2015-04-21 18:39 ` chris hyser
  2015-04-21 18:41 ` chris hyser
  10 siblings, 0 replies; 12+ messages in thread
From: David Miller @ 2015-04-21 18:33 UTC (permalink / raw)
  To: sparclinux

From: chris hyser <chris.hyser@oracle.com>
Date: Tue, 21 Apr 2015 14:25:40 -0400

> On 4/16/2015 3:41 PM, David Miller wrote:
>> From: David Miller <davem@davemloft.net>
>> Date: Thu, 09 Apr 2015 19:06:47 -0400 (EDT)
>>> How will this work on T3, T2, and T1 which all neither have the
>>> "socket" mdesc nodes nor level 3 caches?
>>
>> I'm still waiting for you to resolve this Chris.
>>
>> I don't think it's much effort to make this change back down
>> to using the level=2 cache if no level=3 cache is found.  Please
>> implement that and resubmit.
> 
> So that does appear to work. This is not the patch. I will send that
> out shortly but I thought I'd give you a chance to provide feedback
> while I'm getting that ready.
> 
> Here is what I see on a T2.
 ...
> Diff from the prior patch (I also see some cleanup here I will do):

Yeah that should work just as well on T3 too.

BTW, there are mdesc dumps in my prtconfs repo that you can use
for reference when working on things that walk the mdesc graph.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
                   ` (8 preceding siblings ...)
  2015-04-21 18:33 ` David Miller
@ 2015-04-21 18:39 ` chris hyser
  2015-04-21 18:41 ` chris hyser
  10 siblings, 0 replies; 12+ messages in thread
From: chris hyser @ 2015-04-21 18:39 UTC (permalink / raw)
  To: sparclinux

On 4/21/2015 2:25 PM, chris hyser wrote:
>          mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
>          if (mp = MDESC_NODE_NULL)
> -               return set_sock_ids_by_cache(hp, mp);
> +               if (!set_sock_ids_by_cache(hp, mp, 3))
> +                       set_sock_ids_by_cache(hp, mp, 2);
>
>          return set_sock_ids_by_socket(hp, mp);

So I missed the return here. Will fix that. Surprised that even worked.

-chrish

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly
  2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
                   ` (9 preceding siblings ...)
  2015-04-21 18:39 ` chris hyser
@ 2015-04-21 18:41 ` chris hyser
  10 siblings, 0 replies; 12+ messages in thread
From: chris hyser @ 2015-04-21 18:41 UTC (permalink / raw)
  To: sparclinux

On 4/21/2015 2:33 PM, David Miller wrote:
> From: chris hyser <chris.hyser@oracle.com>
> Date: Tue, 21 Apr 2015 14:25:40 -0400
> BTW, there are mdesc dumps in my prtconfs repo that you can use
> for reference when working on things that walk the mdesc graph.

Cool. I did not know that. Thanks.



^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2015-04-21 18:41 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-04-09 19:14 [PATCH v3] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly chris.hyser
2015-04-09 23:06 ` David Miller
2015-04-10 15:27 ` chris hyser
2015-04-10 19:25 ` David Miller
2015-04-10 19:55 ` chris hyser
2015-04-11 21:57 ` David Miller
2015-04-16 19:41 ` David Miller
2015-04-16 20:31 ` chris hyser
2015-04-21 18:25 ` chris hyser
2015-04-21 18:33 ` David Miller
2015-04-21 18:39 ` chris hyser
2015-04-21 18:41 ` chris hyser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.