All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V2] sched: topology: make cache topology separate from cpu topology
@ 2022-03-11  3:25 ` Qing Wang
  0 siblings, 0 replies; 8+ messages in thread
From: Qing Wang @ 2022-03-11  3:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Sudeep Holla, Greg Kroah-Hartman,
	Rafael J. Wysocki, Ingo Molnar, Peter Zijlstra, Juri Lelli,
	Vincent Guittot, Dietmar Eggemann, Steven Rostedt, Ben Segall,
	Mel Gorman, Daniel Bristot de Oliveira, linux-arm-kernel,
	linux-kernel
  Cc: Wang Qing

From: Wang Qing <wangqing@vivo.com>

Some architectures(e.g. ARM64), caches are implemented like below:
SD(Level 1):          ************ DIE ************
SD(Level 0):          **** MC ****    **** MC *****
cluster:              **cluster 0**   **cluster 1**
cores:                0   1   2   3   4   5   6   7
cache(Level 1):       C   C   C   C   C   C   C   C
cache(Level 2):  	  **C**   **C**   **C**   **C**
cache(Level 3):       *******shared Level 3********
sd_llc_id(current):   0   0   0   0   4   4   4   4
sd_llc_id(should be): 0   0   2   2   4   4   6   6

Caches and cpus have different topology, this causes cpus_share_cache()
return the wrong value in sd, which will affect the CPU load balance.

The cost of migration in core[0-1] is different to core[2-3] within sd,
because core[0-1] shared L2 cache, but not shared with core[2-3].

Cache topology should be separated with CPU topology, it can be obtained
from "next-level-cache" in DTS preferentially.

V2:
move fix_cpu_llc() to arch_topology.c

Signed-off-by: Wang Qing <wangqing@vivo.com>
---
 arch/arm64/kernel/smp.c       |  1 +
 drivers/base/arch_topology.c  | 56 +++++++++++++++++++++++++++++++++++++++++++
 include/linux/arch_topology.h |  2 ++
 kernel/sched/topology.c       |  3 +++
 4 files changed, 62 insertions(+)

diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 27df5c1..94cf649
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -723,6 +723,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	unsigned int this_cpu;
 
 	init_cpu_topology();
+	init_cpu_cache_topology();
 
 	this_cpu = smp_processor_id();
 	store_cpu_topology(this_cpu);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 9761541..d6e59b8
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -609,6 +609,62 @@ static int __init parse_dt_topology(void)
 #endif
 
 /*
+ * cpu cache topology table
+ */
+#define MAX_CACHE_LEVEL 7
+struct device_node *cache_topology[NR_CPUS][MAX_CACHE_LEVEL];
+
+void init_cpu_cache_topology(void)
+{
+	struct device_node *node_cpu, *node_cache;
+	int cpu, level;
+
+	for_each_possible_cpu(cpu) {
+		node_cpu = of_get_cpu_node(cpu, NULL);
+		if (!node_cpu)
+			continue;
+
+		level = 0;
+		node_cache = node_cpu;
+		while (level < MAX_CACHE_LEVEL) {
+			node_cache = of_parse_phandle(node_cache, "next-level-cache", 0);
+			if (!node_cache)
+				break;
+
+			cache_topology[cpu][level++] = node_cache;
+		}
+		of_node_put(node_cpu);
+	}
+}
+
+void fix_cpu_llc(int cpu, int *first_cpu, int *cpu_num)
+{
+	int cache_level, cpu_id;
+	int first, last;
+	int id = *first_cpu;
+	int size = *cpu_num;
+
+	for (cache_level = 0; cache_level < MAX_CACHE_LEVEL; cache_level++) {
+		if (!cache_topology[cpu][cache_level])
+			break;
+
+		first = -1;
+		last = id;
+		for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) {
+			if (cache_topology[cpu][cache_level] == cache_topology[cpu_id][cache_level]) {
+				if (cpu_id < id || cpu_id >= id + size)
+					return;
+
+				first = (first == -1)?cpu_id:first;
+				last = cpu_id;
+			}
+		}
+		*first_cpu = first;
+		*cpu_num = last - first + 1;
+	}
+}
+
+/*
  * cpu topology table
  */
 struct cpu_topology cpu_topology[NR_CPUS];
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index cce6136b..3048fa6
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -82,6 +82,8 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
 #define topology_cluster_cpumask(cpu)	(&cpu_topology[cpu].cluster_sibling)
 #define topology_llc_cpumask(cpu)	(&cpu_topology[cpu].llc_sibling)
 void init_cpu_topology(void);
+void init_cpu_cache_topology(void);
+void fix_cpu_llc(int cpu, int *first_cpu, int *cpu_num);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 const struct cpumask *cpu_clustergroup_mask(int cpu);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index d201a70..d894ced
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -661,6 +661,9 @@ static void update_top_cache_domain(int cpu)
 	if (sd) {
 		id = cpumask_first(sched_domain_span(sd));
 		size = cpumask_weight(sched_domain_span(sd));
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
+		fix_cpu_llc(cpu, &id, &size);
+#endif
 		sds = sd->shared;
 	}
 
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH V2] sched: topology: make cache topology separate from cpu topology
@ 2022-03-11  3:25 ` Qing Wang
  0 siblings, 0 replies; 8+ messages in thread
From: Qing Wang @ 2022-03-11  3:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Sudeep Holla, Greg Kroah-Hartman,
	Rafael J. Wysocki, Ingo Molnar, Peter Zijlstra, Juri Lelli,
	Vincent Guittot, Dietmar Eggemann, Steven Rostedt, Ben Segall,
	Mel Gorman, Daniel Bristot de Oliveira, linux-arm-kernel,
	linux-kernel
  Cc: Wang Qing

From: Wang Qing <wangqing@vivo.com>

Some architectures(e.g. ARM64), caches are implemented like below:
SD(Level 1):          ************ DIE ************
SD(Level 0):          **** MC ****    **** MC *****
cluster:              **cluster 0**   **cluster 1**
cores:                0   1   2   3   4   5   6   7
cache(Level 1):       C   C   C   C   C   C   C   C
cache(Level 2):  	  **C**   **C**   **C**   **C**
cache(Level 3):       *******shared Level 3********
sd_llc_id(current):   0   0   0   0   4   4   4   4
sd_llc_id(should be): 0   0   2   2   4   4   6   6

Caches and cpus have different topology, this causes cpus_share_cache()
return the wrong value in sd, which will affect the CPU load balance.

The cost of migration in core[0-1] is different to core[2-3] within sd,
because core[0-1] shared L2 cache, but not shared with core[2-3].

Cache topology should be separated with CPU topology, it can be obtained
from "next-level-cache" in DTS preferentially.

V2:
move fix_cpu_llc() to arch_topology.c

Signed-off-by: Wang Qing <wangqing@vivo.com>
---
 arch/arm64/kernel/smp.c       |  1 +
 drivers/base/arch_topology.c  | 56 +++++++++++++++++++++++++++++++++++++++++++
 include/linux/arch_topology.h |  2 ++
 kernel/sched/topology.c       |  3 +++
 4 files changed, 62 insertions(+)

diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 27df5c1..94cf649
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -723,6 +723,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	unsigned int this_cpu;
 
 	init_cpu_topology();
+	init_cpu_cache_topology();
 
 	this_cpu = smp_processor_id();
 	store_cpu_topology(this_cpu);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 9761541..d6e59b8
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -609,6 +609,62 @@ static int __init parse_dt_topology(void)
 #endif
 
 /*
+ * cpu cache topology table
+ */
+#define MAX_CACHE_LEVEL 7
+struct device_node *cache_topology[NR_CPUS][MAX_CACHE_LEVEL];
+
+void init_cpu_cache_topology(void)
+{
+	struct device_node *node_cpu, *node_cache;
+	int cpu, level;
+
+	for_each_possible_cpu(cpu) {
+		node_cpu = of_get_cpu_node(cpu, NULL);
+		if (!node_cpu)
+			continue;
+
+		level = 0;
+		node_cache = node_cpu;
+		while (level < MAX_CACHE_LEVEL) {
+			node_cache = of_parse_phandle(node_cache, "next-level-cache", 0);
+			if (!node_cache)
+				break;
+
+			cache_topology[cpu][level++] = node_cache;
+		}
+		of_node_put(node_cpu);
+	}
+}
+
+void fix_cpu_llc(int cpu, int *first_cpu, int *cpu_num)
+{
+	int cache_level, cpu_id;
+	int first, last;
+	int id = *first_cpu;
+	int size = *cpu_num;
+
+	for (cache_level = 0; cache_level < MAX_CACHE_LEVEL; cache_level++) {
+		if (!cache_topology[cpu][cache_level])
+			break;
+
+		first = -1;
+		last = id;
+		for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) {
+			if (cache_topology[cpu][cache_level] == cache_topology[cpu_id][cache_level]) {
+				if (cpu_id < id || cpu_id >= id + size)
+					return;
+
+				first = (first == -1)?cpu_id:first;
+				last = cpu_id;
+			}
+		}
+		*first_cpu = first;
+		*cpu_num = last - first + 1;
+	}
+}
+
+/*
  * cpu topology table
  */
 struct cpu_topology cpu_topology[NR_CPUS];
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index cce6136b..3048fa6
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -82,6 +82,8 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
 #define topology_cluster_cpumask(cpu)	(&cpu_topology[cpu].cluster_sibling)
 #define topology_llc_cpumask(cpu)	(&cpu_topology[cpu].llc_sibling)
 void init_cpu_topology(void);
+void init_cpu_cache_topology(void);
+void fix_cpu_llc(int cpu, int *first_cpu, int *cpu_num);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 const struct cpumask *cpu_clustergroup_mask(int cpu);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index d201a70..d894ced
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -661,6 +661,9 @@ static void update_top_cache_domain(int cpu)
 	if (sd) {
 		id = cpumask_first(sched_domain_span(sd));
 		size = cpumask_weight(sched_domain_span(sd));
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
+		fix_cpu_llc(cpu, &id, &size);
+#endif
 		sds = sd->shared;
 	}
 
-- 
2.7.4


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] sched: topology: make cache topology separate from cpu topology
  2022-03-11  3:25 ` Qing Wang
@ 2022-03-11 18:25   ` Darren Hart
  -1 siblings, 0 replies; 8+ messages in thread
From: Darren Hart @ 2022-03-11 18:25 UTC (permalink / raw)
  To: Qing Wang
  Cc: Catalin Marinas, Will Deacon, Sudeep Holla, Greg Kroah-Hartman,
	Rafael J. Wysocki, Ingo Molnar, Peter Zijlstra, Juri Lelli,
	Vincent Guittot, Dietmar Eggemann, Steven Rostedt, Ben Segall,
	Mel Gorman, Daniel Bristot de Oliveira, linux-arm-kernel,
	linux-kernel

On Thu, Mar 10, 2022 at 07:25:33PM -0800, Qing Wang wrote:
> From: Wang Qing <wangqing@vivo.com>
> 
> Some architectures(e.g. ARM64), caches are implemented like below:
> SD(Level 1):          ************ DIE ************
> SD(Level 0):          **** MC ****    **** MC *****
> cluster:              **cluster 0**   **cluster 1**
> cores:                0   1   2   3   4   5   6   7
> cache(Level 1):       C   C   C   C   C   C   C   C
> cache(Level 2):       **C**   **C**   **C**   **C**
> cache(Level 3):       *******shared Level 3********
> sd_llc_id(current):   0   0   0   0   4   4   4   4
> sd_llc_id(should be): 0   0   2   2   4   4   6   6

Should cluster 0 and 1 span the same cpu mask as the MCs? Based on how
you describe the cache above, it seems like what you are looking for
would be:

(SD DIE level removed in favor of the same span MC)
SD(Level 1):          ************ MC  ************
SD(Level 0):          *CLS0*  *CLS1*  *CLS2*  *CLS3* (CONFIG_SCHED_CLUSTER)
cores:                0   1   2   3   4   5   6   7
cache(Level 1):       C   C   C   C   C   C   C   C
cache(Level 2):       **C**   **C**   **C**   **C**
cache(Level 3):       *******shared Level 3********

Provided cpu_coregroup_mask and cpu_clustergroup_mask return the
corresponding cpumasks, this should work with the default sched domain
topology.

It looks to me like the lack of nested cluster support in
parse_cluster() in drivers/base/arch_topology.c is what needs to be
updated to accomplish the above. With cpu_topology[cpu].cluster_sibling and
core_sibling updated to reflect the topology you describe, the rest of
the sched domains construction would work with the default sched domain
topology.

I'm not very familiar with DT, especially the cpu-map. Does your DT
reflect the topology you want to build?


-- 
Darren Hart
Ampere Computing / OS and Kernel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] sched: topology: make cache topology separate from cpu topology
@ 2022-03-11 18:25   ` Darren Hart
  0 siblings, 0 replies; 8+ messages in thread
From: Darren Hart @ 2022-03-11 18:25 UTC (permalink / raw)
  To: Qing Wang
  Cc: Catalin Marinas, Will Deacon, Sudeep Holla, Greg Kroah-Hartman,
	Rafael J. Wysocki, Ingo Molnar, Peter Zijlstra, Juri Lelli,
	Vincent Guittot, Dietmar Eggemann, Steven Rostedt, Ben Segall,
	Mel Gorman, Daniel Bristot de Oliveira, linux-arm-kernel,
	linux-kernel

On Thu, Mar 10, 2022 at 07:25:33PM -0800, Qing Wang wrote:
> From: Wang Qing <wangqing@vivo.com>
> 
> Some architectures(e.g. ARM64), caches are implemented like below:
> SD(Level 1):          ************ DIE ************
> SD(Level 0):          **** MC ****    **** MC *****
> cluster:              **cluster 0**   **cluster 1**
> cores:                0   1   2   3   4   5   6   7
> cache(Level 1):       C   C   C   C   C   C   C   C
> cache(Level 2):       **C**   **C**   **C**   **C**
> cache(Level 3):       *******shared Level 3********
> sd_llc_id(current):   0   0   0   0   4   4   4   4
> sd_llc_id(should be): 0   0   2   2   4   4   6   6

Should cluster 0 and 1 span the same cpu mask as the MCs? Based on how
you describe the cache above, it seems like what you are looking for
would be:

(SD DIE level removed in favor of the same span MC)
SD(Level 1):          ************ MC  ************
SD(Level 0):          *CLS0*  *CLS1*  *CLS2*  *CLS3* (CONFIG_SCHED_CLUSTER)
cores:                0   1   2   3   4   5   6   7
cache(Level 1):       C   C   C   C   C   C   C   C
cache(Level 2):       **C**   **C**   **C**   **C**
cache(Level 3):       *******shared Level 3********

Provided cpu_coregroup_mask and cpu_clustergroup_mask return the
corresponding cpumasks, this should work with the default sched domain
topology.

It looks to me like the lack of nested cluster support in
parse_cluster() in drivers/base/arch_topology.c is what needs to be
updated to accomplish the above. With cpu_topology[cpu].cluster_sibling and
core_sibling updated to reflect the topology you describe, the rest of
the sched domains construction would work with the default sched domain
topology.

I'm not very familiar with DT, especially the cpu-map. Does your DT
reflect the topology you want to build?


-- 
Darren Hart
Ampere Computing / OS and Kernel

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] sched: topology: make cache topology separate from cpu topology
  2022-03-11  3:25 ` Qing Wang
@ 2022-03-12 12:05   ` Peter Zijlstra
  -1 siblings, 0 replies; 8+ messages in thread
From: Peter Zijlstra @ 2022-03-12 12:05 UTC (permalink / raw)
  To: Qing Wang
  Cc: Catalin Marinas, Will Deacon, Sudeep Holla, Greg Kroah-Hartman,
	Rafael J. Wysocki, Ingo Molnar, Juri Lelli, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Ben Segall, Mel Gorman,
	Daniel Bristot de Oliveira, linux-arm-kernel, linux-kernel

On Thu, Mar 10, 2022 at 07:25:33PM -0800, Qing Wang wrote:
> From: Wang Qing <wangqing@vivo.com>
> 
> Some architectures(e.g. ARM64), caches are implemented like below:
> SD(Level 1):          ************ DIE ************
> SD(Level 0):          **** MC ****    **** MC *****
> cluster:              **cluster 0**   **cluster 1**
> cores:                0   1   2   3   4   5   6   7
> cache(Level 1):       C   C   C   C   C   C   C   C
> cache(Level 2):  	  **C**   **C**   **C**   **C**
> cache(Level 3):       *******shared Level 3********
> sd_llc_id(current):   0   0   0   0   4   4   4   4
> sd_llc_id(should be): 0   0   2   2   4   4   6   6
> 
> Caches and cpus have different topology, this causes cpus_share_cache()
> return the wrong value in sd, which will affect the CPU load balance.

Then fix your SD_flags already.

> diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
> index cce6136b..3048fa6
> --- a/include/linux/arch_topology.h
> +++ b/include/linux/arch_topology.h
> @@ -82,6 +82,8 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
>  #define topology_cluster_cpumask(cpu)	(&cpu_topology[cpu].cluster_sibling)
>  #define topology_llc_cpumask(cpu)	(&cpu_topology[cpu].llc_sibling)
>  void init_cpu_topology(void);
> +void init_cpu_cache_topology(void);
> +void fix_cpu_llc(int cpu, int *first_cpu, int *cpu_num);
>  void store_cpu_topology(unsigned int cpuid);
>  const struct cpumask *cpu_coregroup_mask(int cpu);
>  const struct cpumask *cpu_clustergroup_mask(int cpu);
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index d201a70..d894ced
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -661,6 +661,9 @@ static void update_top_cache_domain(int cpu)
>  	if (sd) {
>  		id = cpumask_first(sched_domain_span(sd));
>  		size = cpumask_weight(sched_domain_span(sd));
> +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
> +		fix_cpu_llc(cpu, &id, &size);
> +#endif
>  		sds = sd->shared;
>  	}

NAK on that.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] sched: topology: make cache topology separate from cpu topology
@ 2022-03-12 12:05   ` Peter Zijlstra
  0 siblings, 0 replies; 8+ messages in thread
From: Peter Zijlstra @ 2022-03-12 12:05 UTC (permalink / raw)
  To: Qing Wang
  Cc: Catalin Marinas, Will Deacon, Sudeep Holla, Greg Kroah-Hartman,
	Rafael J. Wysocki, Ingo Molnar, Juri Lelli, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Ben Segall, Mel Gorman,
	Daniel Bristot de Oliveira, linux-arm-kernel, linux-kernel

On Thu, Mar 10, 2022 at 07:25:33PM -0800, Qing Wang wrote:
> From: Wang Qing <wangqing@vivo.com>
> 
> Some architectures(e.g. ARM64), caches are implemented like below:
> SD(Level 1):          ************ DIE ************
> SD(Level 0):          **** MC ****    **** MC *****
> cluster:              **cluster 0**   **cluster 1**
> cores:                0   1   2   3   4   5   6   7
> cache(Level 1):       C   C   C   C   C   C   C   C
> cache(Level 2):  	  **C**   **C**   **C**   **C**
> cache(Level 3):       *******shared Level 3********
> sd_llc_id(current):   0   0   0   0   4   4   4   4
> sd_llc_id(should be): 0   0   2   2   4   4   6   6
> 
> Caches and cpus have different topology, this causes cpus_share_cache()
> return the wrong value in sd, which will affect the CPU load balance.

Then fix your SD_flags already.

> diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
> index cce6136b..3048fa6
> --- a/include/linux/arch_topology.h
> +++ b/include/linux/arch_topology.h
> @@ -82,6 +82,8 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
>  #define topology_cluster_cpumask(cpu)	(&cpu_topology[cpu].cluster_sibling)
>  #define topology_llc_cpumask(cpu)	(&cpu_topology[cpu].llc_sibling)
>  void init_cpu_topology(void);
> +void init_cpu_cache_topology(void);
> +void fix_cpu_llc(int cpu, int *first_cpu, int *cpu_num);
>  void store_cpu_topology(unsigned int cpuid);
>  const struct cpumask *cpu_coregroup_mask(int cpu);
>  const struct cpumask *cpu_clustergroup_mask(int cpu);
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index d201a70..d894ced
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -661,6 +661,9 @@ static void update_top_cache_domain(int cpu)
>  	if (sd) {
>  		id = cpumask_first(sched_domain_span(sd));
>  		size = cpumask_weight(sched_domain_span(sd));
> +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
> +		fix_cpu_llc(cpu, &id, &size);
> +#endif
>  		sds = sd->shared;
>  	}

NAK on that.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH V2] sched: topology: make cache topology separate from cpu topology
  2022-03-11 18:25   ` Darren Hart
@ 2022-03-14  2:13     ` 王擎
  -1 siblings, 0 replies; 8+ messages in thread
From: 王擎 @ 2022-03-14  2:13 UTC (permalink / raw)
  To: Darren Hart
  Cc: Catalin Marinas, Will Deacon, Sudeep Holla, Greg Kroah-Hartman,
	Rafael J. Wysocki, Ingo Molnar, Peter Zijlstra, Juri Lelli,
	Vincent Guittot, Dietmar Eggemann, Steven Rostedt, Ben Segall,
	Mel Gorman, Daniel Bristot de Oliveira, linux-arm-kernel,
	linux-kernel


>> From: Wang Qing <wangqing@vivo.com>
>> 
>> Some architectures(e.g. ARM64), caches are implemented like below:
>> SD(Level 1):          ************ DIE ************
>> SD(Level 0):          **** MC ****    **** MC *****
>> cluster:              **cluster 0**   **cluster 1**
>> cores:                0   1   2   3   4   5   6   7
v> cache(Level 1):       C   C   C   C   C   C   C   C
>> cache(Level 2):       **C**   **C**   **C**   **C**
>> cache(Level 3):       *******shared Level 3********
>> sd_llc_id(current):   0   0   0   0   4   4   4   4
>> sd_llc_id(should be): 0   0   2   2   4   4   6   6
>
>Should cluster 0 and 1 span the same cpu mask as the MCs? Based on how
>you describe the cache above, it seems like what you are looking for
>would be:
>
>(SD DIE level removed in favor of the same span MC)
>SD(Level 1):          ************ MC  ************
>SD(Level 0):          *CLS0*  *CLS1*  *CLS2*  *CLS3* (CONFIG_SCHED_CLUSTER)
>cores:                0   1   2   3   4   5   6   7
>cache(Level 1):       C   C   C   C   C   C   C   C
>cache(Level 2):       **C**   **C**   **C**   **C**
>cache(Level 3):       *******shared Level 3********
>
>Provided cpu_coregroup_mask and cpu_clustergroup_mask return the
>corresponding cpumasks, this should work with the default sched domain
>topology.
>
>It looks to me like the lack of nested cluster support in
>parse_cluster() in drivers/base/arch_topology.c is what needs to be
>updated to accomplish the above. With cpu_topology[cpu].cluster_sibling and
>core_sibling updated to reflect the topology you describe, the rest of
>the sched domains construction would work with the default sched domain
>topology.

Complex (core[0-1]) looks like a nested cluster, but is not exactly,.
They only share L2 cache. 
parse_cluster() only parses the CPU topology, and does not parse the cache
topology even if described.

>I'm not very familiar with DT, especially the cpu-map. Does your DT
>reflect the topology you want to build?

The DT looks like:
cpu-map {
	cluster0 {
		core0 {
			cpu = <&cpu0>;
		};
		core1 {
			cpu = <&cpu1>;
		};
		core2 {
			cpu = <&cpu2>;
		};
		core3 {
			cpu = <&cpu3>;
		};
		doe_dvfs_cl0: doe {
		};
	};

	cluster1 {
		core0 {
			cpu = <&cpu4>;
		};
		core1 {
			cpu = <&cpu5>;
		};
		core2 {
			cpu = <&cpu6>;
		};
		doe_dvfs_cl1: doe {
		};
	};
};

cpus {
		cpu0: cpu@100 {
			next-level-cache = <&L2_1>;
			L2_1: l2-cache {
 				compatible = "cache";
				next-level-cache = <&L3_1>;
 			};
			L3_1: l3-cache {
 				compatible = "cache";
 			};
		};

		cpu1: cpu@101 {
			next-level-cache = <&L2_1>;
		};

		cpu2: cpu@102 {
			next-level-cache = <&L2_2>;
			L2_2: l2-cache {
 				compatible = "cache";
				next-level-cache = <&L3_1>;
			};
		};

		cpu3: cpu@103 {
			next-level-cache = <&L2_2>;
		};

		cpu4: cpu@100 {
			next-level-cache = <&L2_3>;
			L2_3: l2-cache {
 				compatible = "cache";
				next-level-cache = <&L3_1>;
 			};
		};

		cpu5: cpu@101 {
			next-level-cache = <&L2_3>;
		};

		cpu6: cpu@102 {
			next-level-cache = <&L2_4>;
			L2_4: l2-cache {
 				compatible = "cache";
				next-level-cache = <&L3_1>;
 			};
		};

		cpu7: cpu@200 {
			next-level-cache = <&L2_4>;
		};
	};

Thanks,
Wang

>
>
>-- 
>Darren Hart
>Ampere Computing / OS and Kernel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH V2] sched: topology: make cache topology separate from cpu topology
@ 2022-03-14  2:13     ` 王擎
  0 siblings, 0 replies; 8+ messages in thread
From: 王擎 @ 2022-03-14  2:13 UTC (permalink / raw)
  To: Darren Hart
  Cc: Catalin Marinas, Will Deacon, Sudeep Holla, Greg Kroah-Hartman,
	Rafael J. Wysocki, Ingo Molnar, Peter Zijlstra, Juri Lelli,
	Vincent Guittot, Dietmar Eggemann, Steven Rostedt, Ben Segall,
	Mel Gorman, Daniel Bristot de Oliveira, linux-arm-kernel,
	linux-kernel


>> From: Wang Qing <wangqing@vivo.com>
>> 
>> Some architectures(e.g. ARM64), caches are implemented like below:
>> SD(Level 1):          ************ DIE ************
>> SD(Level 0):          **** MC ****    **** MC *****
>> cluster:              **cluster 0**   **cluster 1**
>> cores:                0   1   2   3   4   5   6   7
v> cache(Level 1):       C   C   C   C   C   C   C   C
>> cache(Level 2):       **C**   **C**   **C**   **C**
>> cache(Level 3):       *******shared Level 3********
>> sd_llc_id(current):   0   0   0   0   4   4   4   4
>> sd_llc_id(should be): 0   0   2   2   4   4   6   6
>
>Should cluster 0 and 1 span the same cpu mask as the MCs? Based on how
>you describe the cache above, it seems like what you are looking for
>would be:
>
>(SD DIE level removed in favor of the same span MC)
>SD(Level 1):          ************ MC  ************
>SD(Level 0):          *CLS0*  *CLS1*  *CLS2*  *CLS3* (CONFIG_SCHED_CLUSTER)
>cores:                0   1   2   3   4   5   6   7
>cache(Level 1):       C   C   C   C   C   C   C   C
>cache(Level 2):       **C**   **C**   **C**   **C**
>cache(Level 3):       *******shared Level 3********
>
>Provided cpu_coregroup_mask and cpu_clustergroup_mask return the
>corresponding cpumasks, this should work with the default sched domain
>topology.
>
>It looks to me like the lack of nested cluster support in
>parse_cluster() in drivers/base/arch_topology.c is what needs to be
>updated to accomplish the above. With cpu_topology[cpu].cluster_sibling and
>core_sibling updated to reflect the topology you describe, the rest of
>the sched domains construction would work with the default sched domain
>topology.

Complex (core[0-1]) looks like a nested cluster, but is not exactly,.
They only share L2 cache. 
parse_cluster() only parses the CPU topology, and does not parse the cache
topology even if described.

>I'm not very familiar with DT, especially the cpu-map. Does your DT
>reflect the topology you want to build?

The DT looks like:
cpu-map {
	cluster0 {
		core0 {
			cpu = <&cpu0>;
		};
		core1 {
			cpu = <&cpu1>;
		};
		core2 {
			cpu = <&cpu2>;
		};
		core3 {
			cpu = <&cpu3>;
		};
		doe_dvfs_cl0: doe {
		};
	};

	cluster1 {
		core0 {
			cpu = <&cpu4>;
		};
		core1 {
			cpu = <&cpu5>;
		};
		core2 {
			cpu = <&cpu6>;
		};
		doe_dvfs_cl1: doe {
		};
	};
};

cpus {
		cpu0: cpu@100 {
			next-level-cache = <&L2_1>;
			L2_1: l2-cache {
 				compatible = "cache";
				next-level-cache = <&L3_1>;
 			};
			L3_1: l3-cache {
 				compatible = "cache";
 			};
		};

		cpu1: cpu@101 {
			next-level-cache = <&L2_1>;
		};

		cpu2: cpu@102 {
			next-level-cache = <&L2_2>;
			L2_2: l2-cache {
 				compatible = "cache";
				next-level-cache = <&L3_1>;
			};
		};

		cpu3: cpu@103 {
			next-level-cache = <&L2_2>;
		};

		cpu4: cpu@100 {
			next-level-cache = <&L2_3>;
			L2_3: l2-cache {
 				compatible = "cache";
				next-level-cache = <&L3_1>;
 			};
		};

		cpu5: cpu@101 {
			next-level-cache = <&L2_3>;
		};

		cpu6: cpu@102 {
			next-level-cache = <&L2_4>;
			L2_4: l2-cache {
 				compatible = "cache";
				next-level-cache = <&L3_1>;
 			};
		};

		cpu7: cpu@200 {
			next-level-cache = <&L2_4>;
		};
	};

Thanks,
Wang

>
>
>-- 
>Darren Hart
>Ampere Computing / OS and Kernel
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-03-14  2:15 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-11  3:25 [PATCH V2] sched: topology: make cache topology separate from cpu topology Qing Wang
2022-03-11  3:25 ` Qing Wang
2022-03-11 18:25 ` Darren Hart
2022-03-11 18:25   ` Darren Hart
2022-03-14  2:13   ` 王擎
2022-03-14  2:13     ` 王擎
2022-03-12 12:05 ` Peter Zijlstra
2022-03-12 12:05   ` Peter Zijlstra

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.