All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
@ 2022-02-21 22:47 ` Ali Saidi
  0 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-02-21 22:47 UTC (permalink / raw)
  To: linux-kernel, linux-perf-users, linux-arm-kernel, german.gomez, leo.yan
  Cc: alisaidi, benh, Peter Zijlstra, Ingo Molnar,
	Arnaldo Carvalho de Melo, Mark Rutland, Alexander Shishkin,
	Jiri Olsa, Namhyung Kim, John Garry, Will Deacon,
	Mathieu Poirier, James Clark, Andrew Kilroy, Jin Yao, Kajol Jain,
	Li Huafei

When synthesizing data from SPE, augment the type with source information
for Arm Neoverse cores. The field is IMPLDEF but the Neoverse cores all use
the same encoding. I can't find encoding information for any other SPE
implementations to unify their choices with Arm's thus that is left for
future work.

This change populates the mem_lvl_num for Neoverse cores instead of the
deprecated mem_lvl namespace.

Signed-off-by: Ali Saidi <alisaidi@amazon.com>
---
 .../util/arm-spe-decoder/arm-spe-decoder.c    |   1 +
 .../util/arm-spe-decoder/arm-spe-decoder.h    |  12 ++
 tools/perf/util/arm-spe.c                     | 106 +++++++++++++++---
 3 files changed, 104 insertions(+), 15 deletions(-)

diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
index 5e390a1a79ab..091987dd3966 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
@@ -220,6 +220,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
 
 			break;
 		case ARM_SPE_DATA_SOURCE:
+			decoder->record.source = payload;
 			break;
 		case ARM_SPE_BAD:
 			break;
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
index 69b31084d6be..c81bf90c0996 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
@@ -29,6 +29,17 @@ enum arm_spe_op_type {
 	ARM_SPE_ST		= 1 << 1,
 };
 
+enum arm_spe_neoverse_data_source {
+	ARM_SPE_NV_L1D        = 0x0,
+	ARM_SPE_NV_L2         = 0x8,
+	ARM_SPE_NV_PEER_CORE  = 0x9,
+	ARM_SPE_NV_LCL_CLSTR  = 0xa,
+	ARM_SPE_NV_SYS_CACHE  = 0xb,
+	ARM_SPE_NV_PEER_CLSTR = 0xc,
+	ARM_SPE_NV_REMOTE     = 0xd,
+	ARM_SPE_NV_DRAM       = 0xe,
+};
+
 struct arm_spe_record {
 	enum arm_spe_sample_type type;
 	int err;
@@ -40,6 +51,7 @@ struct arm_spe_record {
 	u64 virt_addr;
 	u64 phys_addr;
 	u64 context_id;
+	u16 source;
 };
 
 struct arm_spe_insn;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index d2b64e3f588b..e0243c2fed5f 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -34,6 +34,7 @@
 #include "arm-spe-decoder/arm-spe-decoder.h"
 #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
 
+#include <../../../arch/arm64/include/asm/cputype.h>
 #define MAX_TIMESTAMP (~0ULL)
 
 struct arm_spe {
@@ -45,6 +46,7 @@ struct arm_spe {
 	struct perf_session		*session;
 	struct machine			*machine;
 	u32				pmu_type;
+	u64				midr;
 
 	struct perf_tsc_conversion	tc;
 
@@ -399,33 +401,103 @@ static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
 	return false;
 }
 
-static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
-{
-	union perf_mem_data_src	data_src = { 0 };
+static const struct midr_range neoverse_spe[] = {
+	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+	{},
+};
 
-	if (record->op == ARM_SPE_LD)
-		data_src.mem_op = PERF_MEM_OP_LOAD;
-	else
-		data_src.mem_op = PERF_MEM_OP_STORE;
 
+static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
+						union perf_mem_data_src *data_src)
+{
+	switch (record->source) {
+	case ARM_SPE_NV_L1D:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+		break;
+	case ARM_SPE_NV_L2:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+		break;
+	case ARM_SPE_NV_PEER_CORE:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+		break;
+	/*
+	 * We don't know if this is L1, L2, or even L3 (for the cases the system
+	 * has an L3, but we do know it was a cache-2-cache transfer, so set
+	 * SNOOP_HITM
+	 */
+	case ARM_SPE_NV_LCL_CLSTR:
+	case ARM_SPE_NV_PEER_CLSTR:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+		break;
+	/*
+	 * System cache is assumed to be L4, as cluster cache (if it exists)
+	 * would be L3 cache on Neoverse platforms
+	 */
+	case ARM_SPE_NV_SYS_CACHE:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L4;
+		break;
+	/*
+	 * We don't know what level it hit in, except it came from the other
+	 * socket
+	 */
+	case ARM_SPE_NV_REMOTE:
+		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+		break;
+	case ARM_SPE_NV_DRAM:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+		break;
+	default:
+		break;
+	}
+}
+
+static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
+						union perf_mem_data_src *data_src)
+{
 	if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
-		data_src.mem_lvl = PERF_MEM_LVL_L3;
+		data_src->mem_lvl = PERF_MEM_LVL_L3;
 
 		if (record->type & ARM_SPE_LLC_MISS)
-			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
+			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
 		else
-			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
+			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
 	} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
-		data_src.mem_lvl = PERF_MEM_LVL_L1;
+		data_src->mem_lvl = PERF_MEM_LVL_L1;
 
 		if (record->type & ARM_SPE_L1D_MISS)
-			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
+			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
 		else
-			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
+			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
 	}
 
 	if (record->type & ARM_SPE_REMOTE_ACCESS)
-		data_src.mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+}
+
+static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
+{
+	union perf_mem_data_src	data_src = { 0 };
+	bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
+
+	if (record->op & ARM_SPE_LD)
+		data_src.mem_op = PERF_MEM_OP_LOAD;
+	else
+		data_src.mem_op = PERF_MEM_OP_STORE;
+
+	if (is_neoverse)
+		arm_spe__synth_data_source_neoverse(record, &data_src);
+	else
+		arm_spe__synth_data_source_generic(record, &data_src);
 
 	if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
 		data_src.mem_dtlb = PERF_MEM_TLB_WK;
@@ -446,7 +518,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
 	u64 data_src;
 	int err;
 
-	data_src = arm_spe__synth_data_source(record);
+	data_src = arm_spe__synth_data_source(record, spe->midr);
 
 	if (spe->sample_flc) {
 		if (record->type & ARM_SPE_L1D_MISS) {
@@ -796,6 +868,10 @@ static int arm_spe_process_event(struct perf_session *session,
 	u64 timestamp;
 	struct arm_spe *spe = container_of(session->auxtrace,
 			struct arm_spe, auxtrace);
+	const char *cpuid = perf_env__cpuid(session->evlist->env);
+	u64 midr = strtol(cpuid, NULL, 16);
+
+	spe->midr = midr;
 
 	if (dump_trace)
 		return 0;
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
@ 2022-02-21 22:47 ` Ali Saidi
  0 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-02-21 22:47 UTC (permalink / raw)
  To: linux-kernel, linux-perf-users, linux-arm-kernel, german.gomez, leo.yan
  Cc: alisaidi, benh, Peter Zijlstra, Ingo Molnar,
	Arnaldo Carvalho de Melo, Mark Rutland, Alexander Shishkin,
	Jiri Olsa, Namhyung Kim, John Garry, Will Deacon,
	Mathieu Poirier, James Clark, Andrew Kilroy, Jin Yao, Kajol Jain,
	Li Huafei

When synthesizing data from SPE, augment the type with source information
for Arm Neoverse cores. The field is IMPLDEF but the Neoverse cores all use
the same encoding. I can't find encoding information for any other SPE
implementations to unify their choices with Arm's thus that is left for
future work.

This change populates the mem_lvl_num for Neoverse cores instead of the
deprecated mem_lvl namespace.

Signed-off-by: Ali Saidi <alisaidi@amazon.com>
---
 .../util/arm-spe-decoder/arm-spe-decoder.c    |   1 +
 .../util/arm-spe-decoder/arm-spe-decoder.h    |  12 ++
 tools/perf/util/arm-spe.c                     | 106 +++++++++++++++---
 3 files changed, 104 insertions(+), 15 deletions(-)

diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
index 5e390a1a79ab..091987dd3966 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
@@ -220,6 +220,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
 
 			break;
 		case ARM_SPE_DATA_SOURCE:
+			decoder->record.source = payload;
 			break;
 		case ARM_SPE_BAD:
 			break;
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
index 69b31084d6be..c81bf90c0996 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
@@ -29,6 +29,17 @@ enum arm_spe_op_type {
 	ARM_SPE_ST		= 1 << 1,
 };
 
+enum arm_spe_neoverse_data_source {
+	ARM_SPE_NV_L1D        = 0x0,
+	ARM_SPE_NV_L2         = 0x8,
+	ARM_SPE_NV_PEER_CORE  = 0x9,
+	ARM_SPE_NV_LCL_CLSTR  = 0xa,
+	ARM_SPE_NV_SYS_CACHE  = 0xb,
+	ARM_SPE_NV_PEER_CLSTR = 0xc,
+	ARM_SPE_NV_REMOTE     = 0xd,
+	ARM_SPE_NV_DRAM       = 0xe,
+};
+
 struct arm_spe_record {
 	enum arm_spe_sample_type type;
 	int err;
@@ -40,6 +51,7 @@ struct arm_spe_record {
 	u64 virt_addr;
 	u64 phys_addr;
 	u64 context_id;
+	u16 source;
 };
 
 struct arm_spe_insn;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index d2b64e3f588b..e0243c2fed5f 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -34,6 +34,7 @@
 #include "arm-spe-decoder/arm-spe-decoder.h"
 #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
 
+#include <../../../arch/arm64/include/asm/cputype.h>
 #define MAX_TIMESTAMP (~0ULL)
 
 struct arm_spe {
@@ -45,6 +46,7 @@ struct arm_spe {
 	struct perf_session		*session;
 	struct machine			*machine;
 	u32				pmu_type;
+	u64				midr;
 
 	struct perf_tsc_conversion	tc;
 
@@ -399,33 +401,103 @@ static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
 	return false;
 }
 
-static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
-{
-	union perf_mem_data_src	data_src = { 0 };
+static const struct midr_range neoverse_spe[] = {
+	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+	{},
+};
 
-	if (record->op == ARM_SPE_LD)
-		data_src.mem_op = PERF_MEM_OP_LOAD;
-	else
-		data_src.mem_op = PERF_MEM_OP_STORE;
 
+static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
+						union perf_mem_data_src *data_src)
+{
+	switch (record->source) {
+	case ARM_SPE_NV_L1D:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+		break;
+	case ARM_SPE_NV_L2:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+		break;
+	case ARM_SPE_NV_PEER_CORE:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+		break;
+	/*
+	 * We don't know if this is L1, L2, or even L3 (for the cases the system
+	 * has an L3, but we do know it was a cache-2-cache transfer, so set
+	 * SNOOP_HITM
+	 */
+	case ARM_SPE_NV_LCL_CLSTR:
+	case ARM_SPE_NV_PEER_CLSTR:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+		break;
+	/*
+	 * System cache is assumed to be L4, as cluster cache (if it exists)
+	 * would be L3 cache on Neoverse platforms
+	 */
+	case ARM_SPE_NV_SYS_CACHE:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L4;
+		break;
+	/*
+	 * We don't know what level it hit in, except it came from the other
+	 * socket
+	 */
+	case ARM_SPE_NV_REMOTE:
+		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+		break;
+	case ARM_SPE_NV_DRAM:
+		data_src->mem_lvl = PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+		break;
+	default:
+		break;
+	}
+}
+
+static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
+						union perf_mem_data_src *data_src)
+{
 	if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
-		data_src.mem_lvl = PERF_MEM_LVL_L3;
+		data_src->mem_lvl = PERF_MEM_LVL_L3;
 
 		if (record->type & ARM_SPE_LLC_MISS)
-			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
+			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
 		else
-			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
+			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
 	} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
-		data_src.mem_lvl = PERF_MEM_LVL_L1;
+		data_src->mem_lvl = PERF_MEM_LVL_L1;
 
 		if (record->type & ARM_SPE_L1D_MISS)
-			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
+			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
 		else
-			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
+			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
 	}
 
 	if (record->type & ARM_SPE_REMOTE_ACCESS)
-		data_src.mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+}
+
+static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
+{
+	union perf_mem_data_src	data_src = { 0 };
+	bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
+
+	if (record->op & ARM_SPE_LD)
+		data_src.mem_op = PERF_MEM_OP_LOAD;
+	else
+		data_src.mem_op = PERF_MEM_OP_STORE;
+
+	if (is_neoverse)
+		arm_spe__synth_data_source_neoverse(record, &data_src);
+	else
+		arm_spe__synth_data_source_generic(record, &data_src);
 
 	if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
 		data_src.mem_dtlb = PERF_MEM_TLB_WK;
@@ -446,7 +518,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
 	u64 data_src;
 	int err;
 
-	data_src = arm_spe__synth_data_source(record);
+	data_src = arm_spe__synth_data_source(record, spe->midr);
 
 	if (spe->sample_flc) {
 		if (record->type & ARM_SPE_L1D_MISS) {
@@ -796,6 +868,10 @@ static int arm_spe_process_event(struct perf_session *session,
 	u64 timestamp;
 	struct arm_spe *spe = container_of(session->auxtrace,
 			struct arm_spe, auxtrace);
+	const char *cpuid = perf_env__cpuid(session->evlist->env);
+	u64 midr = strtol(cpuid, NULL, 16);
+
+	spe->midr = midr;
 
 	if (dump_trace)
 		return 0;
-- 
2.32.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-02-21 22:47 ` Ali Saidi
@ 2022-02-21 22:48   ` Ali Saidi
  -1 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-02-21 22:48 UTC (permalink / raw)
  To: linux-kernel, linux-perf-users, linux-arm-kernel, german.gomez, leo.yan
  Cc: alisaidi, benh, Peter Zijlstra, Ingo Molnar,
	Arnaldo Carvalho de Melo, Mark Rutland, Alexander Shishkin,
	Jiri Olsa, Namhyung Kim, John Garry, Will Deacon,
	Mathieu Poirier, James Clark, Andrew Kilroy, Jin Yao, Kajol Jain,
	Li Huafei

Current code only support HITM statistics for last level cache (LLC)
when mem_lvl encodes the level. On existing Arm64 machines there are as
many as four levels cache and this change supports decoding l1, l2, and
llc hits from the mem_lvl_num data. Given that the mem_lvl namespace is
being deprecated take this opportunity to encode the neoverse data into
mem_lvl_num.

For loads that hit in a the LLC snoop filter and are fullfilled from a
higher level cache, it's not usually clear what the true level of the
cache the data came from (i.e. a transfer from a core could come from
it's L1 or L2). Instead of making an assumption of where the line came
from, add support for incrementing HITM if the source is CACHE_ANY.

Since other architectures don't seem to populate the mem_lvl_num field
here there shouldn't be a change in functionality.

Signed-off-by: Ali Saidi <alisaidi@amazon.com>
---
 tools/perf/util/mem-events.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index ed0ab838bcc5..6c3fd4aac7ae 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -485,6 +485,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
 	u64 daddr  = mi->daddr.addr;
 	u64 op     = data_src->mem_op;
 	u64 lvl    = data_src->mem_lvl;
+	u64 lnum   = data_src->mem_lvl_num;
 	u64 snoop  = data_src->mem_snoop;
 	u64 lock   = data_src->mem_lock;
 	u64 blk    = data_src->mem_blk;
@@ -527,16 +528,18 @@ do {				\
 			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
 			if (lvl & P(LVL, IO))  stats->ld_io++;
 			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
-			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
-			if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
-			if (lvl & P(LVL, L3 )) {
+			if (lvl & P(LVL, L1) || lnum == P(LVLNUM, L1))
+				stats->ld_l1hit++;
+			if (lvl & P(LVL, L2) || lnum == P(LVLNUM, L2))
+				stats->ld_l2hit++;
+			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
 				if (snoop & P(SNOOP, HITM))
 					HITM_INC(lcl_hitm);
 				else
 					stats->ld_llchit++;
 			}
 
-			if (lvl & P(LVL, LOC_RAM)) {
+			if (lvl & P(LVL, LOC_RAM) || lnum == P(LVLNUM, RAM)) {
 				stats->lcl_dram++;
 				if (snoop & P(SNOOP, HIT))
 					stats->ld_shared++;
@@ -564,6 +567,9 @@ do {				\
 				HITM_INC(rmt_hitm);
 		}
 
+		if (lnum == P(LVLNUM, ANY_CACHE) && snoop & P(SNOOP, HITM))
+			HITM_INC(lcl_hitm);
+
 		if ((lvl & P(LVL, MISS)))
 			stats->ld_miss++;
 
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-02-21 22:48   ` Ali Saidi
  0 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-02-21 22:48 UTC (permalink / raw)
  To: linux-kernel, linux-perf-users, linux-arm-kernel, german.gomez, leo.yan
  Cc: alisaidi, benh, Peter Zijlstra, Ingo Molnar,
	Arnaldo Carvalho de Melo, Mark Rutland, Alexander Shishkin,
	Jiri Olsa, Namhyung Kim, John Garry, Will Deacon,
	Mathieu Poirier, James Clark, Andrew Kilroy, Jin Yao, Kajol Jain,
	Li Huafei

Current code only support HITM statistics for last level cache (LLC)
when mem_lvl encodes the level. On existing Arm64 machines there are as
many as four levels cache and this change supports decoding l1, l2, and
llc hits from the mem_lvl_num data. Given that the mem_lvl namespace is
being deprecated take this opportunity to encode the neoverse data into
mem_lvl_num.

For loads that hit in a the LLC snoop filter and are fullfilled from a
higher level cache, it's not usually clear what the true level of the
cache the data came from (i.e. a transfer from a core could come from
it's L1 or L2). Instead of making an assumption of where the line came
from, add support for incrementing HITM if the source is CACHE_ANY.

Since other architectures don't seem to populate the mem_lvl_num field
here there shouldn't be a change in functionality.

Signed-off-by: Ali Saidi <alisaidi@amazon.com>
---
 tools/perf/util/mem-events.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index ed0ab838bcc5..6c3fd4aac7ae 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -485,6 +485,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
 	u64 daddr  = mi->daddr.addr;
 	u64 op     = data_src->mem_op;
 	u64 lvl    = data_src->mem_lvl;
+	u64 lnum   = data_src->mem_lvl_num;
 	u64 snoop  = data_src->mem_snoop;
 	u64 lock   = data_src->mem_lock;
 	u64 blk    = data_src->mem_blk;
@@ -527,16 +528,18 @@ do {				\
 			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
 			if (lvl & P(LVL, IO))  stats->ld_io++;
 			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
-			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
-			if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
-			if (lvl & P(LVL, L3 )) {
+			if (lvl & P(LVL, L1) || lnum == P(LVLNUM, L1))
+				stats->ld_l1hit++;
+			if (lvl & P(LVL, L2) || lnum == P(LVLNUM, L2))
+				stats->ld_l2hit++;
+			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
 				if (snoop & P(SNOOP, HITM))
 					HITM_INC(lcl_hitm);
 				else
 					stats->ld_llchit++;
 			}
 
-			if (lvl & P(LVL, LOC_RAM)) {
+			if (lvl & P(LVL, LOC_RAM) || lnum == P(LVLNUM, RAM)) {
 				stats->lcl_dram++;
 				if (snoop & P(SNOOP, HIT))
 					stats->ld_shared++;
@@ -564,6 +567,9 @@ do {				\
 				HITM_INC(rmt_hitm);
 		}
 
+		if (lnum == P(LVLNUM, ANY_CACHE) && snoop & P(SNOOP, HITM))
+			HITM_INC(lcl_hitm);
+
 		if ((lvl & P(LVL, MISS)))
 			stats->ld_miss++;
 
-- 
2.32.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
  2022-02-21 22:47 ` Ali Saidi
@ 2022-03-02 11:59   ` German Gomez
  -1 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-02 11:59 UTC (permalink / raw)
  To: Ali Saidi, linux-kernel, linux-perf-users, linux-arm-kernel, leo.yan
  Cc: benh, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Mark Rutland, Alexander Shishkin, Jiri Olsa, Namhyung Kim,
	John Garry, Will Deacon, Mathieu Poirier, James Clark,
	Andrew Kilroy, Jin Yao, Kajol Jain, Li Huafei

Hi Ali,

On 21/02/2022 22:47, Ali Saidi wrote:
> When synthesizing data from SPE, augment the type with source information
> for Arm Neoverse cores. The field is IMPLDEF but the Neoverse cores all use
> the same encoding. I can't find encoding information for any other SPE
> implementations to unify their choices with Arm's thus that is left for
> future work.
>
> This change populates the mem_lvl_num for Neoverse cores instead of the
> deprecated mem_lvl namespace.
>
> Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> ---
>  .../util/arm-spe-decoder/arm-spe-decoder.c    |   1 +
>  .../util/arm-spe-decoder/arm-spe-decoder.h    |  12 ++
>  tools/perf/util/arm-spe.c                     | 106 +++++++++++++++---
>  3 files changed, 104 insertions(+), 15 deletions(-)
>
> diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> index 5e390a1a79ab..091987dd3966 100644
> --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> @@ -220,6 +220,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
>  
>  			break;
>  		case ARM_SPE_DATA_SOURCE:
> +			decoder->record.source = payload;
>  			break;
>  		case ARM_SPE_BAD:
>  			break;
> diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> index 69b31084d6be..c81bf90c0996 100644
> --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> @@ -29,6 +29,17 @@ enum arm_spe_op_type {
>  	ARM_SPE_ST		= 1 << 1,
>  };
>  
> +enum arm_spe_neoverse_data_source {
> +	ARM_SPE_NV_L1D        = 0x0,
> +	ARM_SPE_NV_L2         = 0x8,
> +	ARM_SPE_NV_PEER_CORE  = 0x9,
> +	ARM_SPE_NV_LCL_CLSTR  = 0xa,
> +	ARM_SPE_NV_SYS_CACHE  = 0xb,
> +	ARM_SPE_NV_PEER_CLSTR = 0xc,
> +	ARM_SPE_NV_REMOTE     = 0xd,
> +	ARM_SPE_NV_DRAM       = 0xe,
> +};
> +
>  struct arm_spe_record {
>  	enum arm_spe_sample_type type;
>  	int err;
> @@ -40,6 +51,7 @@ struct arm_spe_record {
>  	u64 virt_addr;
>  	u64 phys_addr;
>  	u64 context_id;
> +	u16 source;
>  };
>  
>  struct arm_spe_insn;
> diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
> index d2b64e3f588b..e0243c2fed5f 100644
> --- a/tools/perf/util/arm-spe.c
> +++ b/tools/perf/util/arm-spe.c
> @@ -34,6 +34,7 @@
>  #include "arm-spe-decoder/arm-spe-decoder.h"
>  #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
>  
> +#include <../../../arch/arm64/include/asm/cputype.h>
>  #define MAX_TIMESTAMP (~0ULL)
>  
>  struct arm_spe {
> @@ -45,6 +46,7 @@ struct arm_spe {
>  	struct perf_session		*session;
>  	struct machine			*machine;
>  	u32				pmu_type;
> +	u64				midr;
>  
>  	struct perf_tsc_conversion	tc;
>  
> @@ -399,33 +401,103 @@ static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
>  	return false;
>  }
>  
> -static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
> -{
> -	union perf_mem_data_src	data_src = { 0 };
> +static const struct midr_range neoverse_spe[] = {
> +	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
> +	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
> +	{},
> +};
>  
> -	if (record->op == ARM_SPE_LD)
> -		data_src.mem_op = PERF_MEM_OP_LOAD;
> -	else
> -		data_src.mem_op = PERF_MEM_OP_STORE;
>  
> +static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
> +						union perf_mem_data_src *data_src)
> +{
> +	switch (record->source) {
> +	case ARM_SPE_NV_L1D:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;

I understand mem_lvl is deprecated but shouldn't we add the level bits here as well for backwards compat?

> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
> +		break;
> +	case ARM_SPE_NV_L2:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
> +		break;
> +	case ARM_SPE_NV_PEER_CORE:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> +		break;
> +	/*
> +	 * We don't know if this is L1, L2, or even L3 (for the cases the system
> +	 * has an L3, but we do know it was a cache-2-cache transfer, so set
> +	 * SNOOP_HITM
> +	 */
> +	case ARM_SPE_NV_LCL_CLSTR:
> +	case ARM_SPE_NV_PEER_CLSTR:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> +		break;
> +	/*
> +	 * System cache is assumed to be L4, as cluster cache (if it exists)
> +	 * would be L3 cache on Neoverse platforms
> +	 */
> +	case ARM_SPE_NV_SYS_CACHE:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L4;
> +		break;
> +	/*
> +	 * We don't know what level it hit in, except it came from the other
> +	 * socket
> +	 */
> +	case ARM_SPE_NV_REMOTE:
> +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> +		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
> +		break;
> +	case ARM_SPE_NV_DRAM:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
> +		break;
> +	default:
> +		break;
> +	}
> +}
> +
> +static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
> +						union perf_mem_data_src *data_src)
> +{
>  	if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
> -		data_src.mem_lvl = PERF_MEM_LVL_L3;
> +		data_src->mem_lvl = PERF_MEM_LVL_L3;
>  
>  		if (record->type & ARM_SPE_LLC_MISS)
> -			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
> +			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
>  		else
> -			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
> +			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
>  	} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
> -		data_src.mem_lvl = PERF_MEM_LVL_L1;
> +		data_src->mem_lvl = PERF_MEM_LVL_L1;
>  
>  		if (record->type & ARM_SPE_L1D_MISS)
> -			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
> +			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
>  		else
> -			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
> +			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
>  	}
>  
>  	if (record->type & ARM_SPE_REMOTE_ACCESS)
> -		data_src.mem_lvl |= PERF_MEM_LVL_REM_CCE1;
> +		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
> +}
> +
> +static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
> +{
> +	union perf_mem_data_src	data_src = { 0 };
> +	bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
> +
> +	if (record->op & ARM_SPE_LD)
> +		data_src.mem_op = PERF_MEM_OP_LOAD;
> +	else
> +		data_src.mem_op = PERF_MEM_OP_STORE;
> +
> +	if (is_neoverse)
> +		arm_spe__synth_data_source_neoverse(record, &data_src);
> +	else
> +		arm_spe__synth_data_source_generic(record, &data_src);
>  
>  	if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
>  		data_src.mem_dtlb = PERF_MEM_TLB_WK;
> @@ -446,7 +518,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
>  	u64 data_src;
>  	int err;
>  
> -	data_src = arm_spe__synth_data_source(record);
> +	data_src = arm_spe__synth_data_source(record, spe->midr);
>  
>  	if (spe->sample_flc) {
>  		if (record->type & ARM_SPE_L1D_MISS) {
> @@ -796,6 +868,10 @@ static int arm_spe_process_event(struct perf_session *session,
>  	u64 timestamp;
>  	struct arm_spe *spe = container_of(session->auxtrace,
>  			struct arm_spe, auxtrace);
> +	const char *cpuid = perf_env__cpuid(session->evlist->env);
> +	u64 midr = strtol(cpuid, NULL, 16);
> +
> +	spe->midr = midr;

I think this midr setup belongs in the arm_spe_process_auxtrace_info callback instead.

Thanks,
German

>  
>  	if (dump_trace)
>  		return 0;

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
@ 2022-03-02 11:59   ` German Gomez
  0 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-02 11:59 UTC (permalink / raw)
  To: Ali Saidi, linux-kernel, linux-perf-users, linux-arm-kernel, leo.yan
  Cc: benh, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Mark Rutland, Alexander Shishkin, Jiri Olsa, Namhyung Kim,
	John Garry, Will Deacon, Mathieu Poirier, James Clark,
	Andrew Kilroy, Jin Yao, Kajol Jain, Li Huafei

Hi Ali,

On 21/02/2022 22:47, Ali Saidi wrote:
> When synthesizing data from SPE, augment the type with source information
> for Arm Neoverse cores. The field is IMPLDEF but the Neoverse cores all use
> the same encoding. I can't find encoding information for any other SPE
> implementations to unify their choices with Arm's thus that is left for
> future work.
>
> This change populates the mem_lvl_num for Neoverse cores instead of the
> deprecated mem_lvl namespace.
>
> Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> ---
>  .../util/arm-spe-decoder/arm-spe-decoder.c    |   1 +
>  .../util/arm-spe-decoder/arm-spe-decoder.h    |  12 ++
>  tools/perf/util/arm-spe.c                     | 106 +++++++++++++++---
>  3 files changed, 104 insertions(+), 15 deletions(-)
>
> diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> index 5e390a1a79ab..091987dd3966 100644
> --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> @@ -220,6 +220,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
>  
>  			break;
>  		case ARM_SPE_DATA_SOURCE:
> +			decoder->record.source = payload;
>  			break;
>  		case ARM_SPE_BAD:
>  			break;
> diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> index 69b31084d6be..c81bf90c0996 100644
> --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> @@ -29,6 +29,17 @@ enum arm_spe_op_type {
>  	ARM_SPE_ST		= 1 << 1,
>  };
>  
> +enum arm_spe_neoverse_data_source {
> +	ARM_SPE_NV_L1D        = 0x0,
> +	ARM_SPE_NV_L2         = 0x8,
> +	ARM_SPE_NV_PEER_CORE  = 0x9,
> +	ARM_SPE_NV_LCL_CLSTR  = 0xa,
> +	ARM_SPE_NV_SYS_CACHE  = 0xb,
> +	ARM_SPE_NV_PEER_CLSTR = 0xc,
> +	ARM_SPE_NV_REMOTE     = 0xd,
> +	ARM_SPE_NV_DRAM       = 0xe,
> +};
> +
>  struct arm_spe_record {
>  	enum arm_spe_sample_type type;
>  	int err;
> @@ -40,6 +51,7 @@ struct arm_spe_record {
>  	u64 virt_addr;
>  	u64 phys_addr;
>  	u64 context_id;
> +	u16 source;
>  };
>  
>  struct arm_spe_insn;
> diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
> index d2b64e3f588b..e0243c2fed5f 100644
> --- a/tools/perf/util/arm-spe.c
> +++ b/tools/perf/util/arm-spe.c
> @@ -34,6 +34,7 @@
>  #include "arm-spe-decoder/arm-spe-decoder.h"
>  #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
>  
> +#include <../../../arch/arm64/include/asm/cputype.h>
>  #define MAX_TIMESTAMP (~0ULL)
>  
>  struct arm_spe {
> @@ -45,6 +46,7 @@ struct arm_spe {
>  	struct perf_session		*session;
>  	struct machine			*machine;
>  	u32				pmu_type;
> +	u64				midr;
>  
>  	struct perf_tsc_conversion	tc;
>  
> @@ -399,33 +401,103 @@ static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
>  	return false;
>  }
>  
> -static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
> -{
> -	union perf_mem_data_src	data_src = { 0 };
> +static const struct midr_range neoverse_spe[] = {
> +	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
> +	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
> +	{},
> +};
>  
> -	if (record->op == ARM_SPE_LD)
> -		data_src.mem_op = PERF_MEM_OP_LOAD;
> -	else
> -		data_src.mem_op = PERF_MEM_OP_STORE;
>  
> +static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
> +						union perf_mem_data_src *data_src)
> +{
> +	switch (record->source) {
> +	case ARM_SPE_NV_L1D:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;

I understand mem_lvl is deprecated but shouldn't we add the level bits here as well for backwards compat?

> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
> +		break;
> +	case ARM_SPE_NV_L2:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
> +		break;
> +	case ARM_SPE_NV_PEER_CORE:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> +		break;
> +	/*
> +	 * We don't know if this is L1, L2, or even L3 (for the cases the system
> +	 * has an L3, but we do know it was a cache-2-cache transfer, so set
> +	 * SNOOP_HITM
> +	 */
> +	case ARM_SPE_NV_LCL_CLSTR:
> +	case ARM_SPE_NV_PEER_CLSTR:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> +		break;
> +	/*
> +	 * System cache is assumed to be L4, as cluster cache (if it exists)
> +	 * would be L3 cache on Neoverse platforms
> +	 */
> +	case ARM_SPE_NV_SYS_CACHE:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L4;
> +		break;
> +	/*
> +	 * We don't know what level it hit in, except it came from the other
> +	 * socket
> +	 */
> +	case ARM_SPE_NV_REMOTE:
> +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> +		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
> +		break;
> +	case ARM_SPE_NV_DRAM:
> +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
> +		break;
> +	default:
> +		break;
> +	}
> +}
> +
> +static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
> +						union perf_mem_data_src *data_src)
> +{
>  	if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
> -		data_src.mem_lvl = PERF_MEM_LVL_L3;
> +		data_src->mem_lvl = PERF_MEM_LVL_L3;
>  
>  		if (record->type & ARM_SPE_LLC_MISS)
> -			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
> +			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
>  		else
> -			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
> +			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
>  	} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
> -		data_src.mem_lvl = PERF_MEM_LVL_L1;
> +		data_src->mem_lvl = PERF_MEM_LVL_L1;
>  
>  		if (record->type & ARM_SPE_L1D_MISS)
> -			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
> +			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
>  		else
> -			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
> +			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
>  	}
>  
>  	if (record->type & ARM_SPE_REMOTE_ACCESS)
> -		data_src.mem_lvl |= PERF_MEM_LVL_REM_CCE1;
> +		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
> +}
> +
> +static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
> +{
> +	union perf_mem_data_src	data_src = { 0 };
> +	bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
> +
> +	if (record->op & ARM_SPE_LD)
> +		data_src.mem_op = PERF_MEM_OP_LOAD;
> +	else
> +		data_src.mem_op = PERF_MEM_OP_STORE;
> +
> +	if (is_neoverse)
> +		arm_spe__synth_data_source_neoverse(record, &data_src);
> +	else
> +		arm_spe__synth_data_source_generic(record, &data_src);
>  
>  	if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
>  		data_src.mem_dtlb = PERF_MEM_TLB_WK;
> @@ -446,7 +518,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
>  	u64 data_src;
>  	int err;
>  
> -	data_src = arm_spe__synth_data_source(record);
> +	data_src = arm_spe__synth_data_source(record, spe->midr);
>  
>  	if (spe->sample_flc) {
>  		if (record->type & ARM_SPE_L1D_MISS) {
> @@ -796,6 +868,10 @@ static int arm_spe_process_event(struct perf_session *session,
>  	u64 timestamp;
>  	struct arm_spe *spe = container_of(session->auxtrace,
>  			struct arm_spe, auxtrace);
> +	const char *cpuid = perf_env__cpuid(session->evlist->env);
> +	u64 midr = strtol(cpuid, NULL, 16);
> +
> +	spe->midr = midr;

I think this midr setup belongs in the arm_spe_process_auxtrace_info callback instead.

Thanks,
German

>  
>  	if (dump_trace)
>  		return 0;

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-02-21 22:48   ` Ali Saidi
@ 2022-03-02 15:39     ` German Gomez
  -1 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-02 15:39 UTC (permalink / raw)
  To: Ali Saidi, linux-kernel, linux-perf-users, linux-arm-kernel, leo.yan
  Cc: benh, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Mark Rutland, Alexander Shishkin, Jiri Olsa, Namhyung Kim,
	John Garry, Will Deacon, Mathieu Poirier, James Clark,
	Andrew Kilroy, Jin Yao, Kajol Jain, Li Huafei


On 21/02/2022 22:48, Ali Saidi wrote:
> Current code only support HITM statistics for last level cache (LLC)
> when mem_lvl encodes the level. On existing Arm64 machines there are as
> many as four levels cache and this change supports decoding l1, l2, and
> llc hits from the mem_lvl_num data. Given that the mem_lvl namespace is
> being deprecated take this opportunity to encode the neoverse data into
> mem_lvl_num.

Since Neoverse is mentioned in the commit message, I think there should be a comment somewhere in the code as well.

>
> For loads that hit in a the LLC snoop filter and are fullfilled from a
> higher level cache, it's not usually clear what the true level of the
> cache the data came from (i.e. a transfer from a core could come from
> it's L1 or L2). Instead of making an assumption of where the line came
> from, add support for incrementing HITM if the source is CACHE_ANY.
>
> Since other architectures don't seem to populate the mem_lvl_num field
> here there shouldn't be a change in functionality.
>
> Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> ---
>  tools/perf/util/mem-events.c | 14 ++++++++++----
>  1 file changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
> index ed0ab838bcc5..6c3fd4aac7ae 100644
> --- a/tools/perf/util/mem-events.c
> +++ b/tools/perf/util/mem-events.c
> @@ -485,6 +485,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
>  	u64 daddr  = mi->daddr.addr;
>  	u64 op     = data_src->mem_op;
>  	u64 lvl    = data_src->mem_lvl;
> +	u64 lnum   = data_src->mem_lvl_num;
>  	u64 snoop  = data_src->mem_snoop;
>  	u64 lock   = data_src->mem_lock;
>  	u64 blk    = data_src->mem_blk;
> @@ -527,16 +528,18 @@ do {				\
>  			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
>  			if (lvl & P(LVL, IO))  stats->ld_io++;
>  			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
> -			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
> -			if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
> -			if (lvl & P(LVL, L3 )) {
> +			if (lvl & P(LVL, L1) || lnum == P(LVLNUM, L1))
> +				stats->ld_l1hit++;
> +			if (lvl & P(LVL, L2) || lnum == P(LVLNUM, L2))
> +				stats->ld_l2hit++;
> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {

According to a comment in the previous patch, using L4 is specific to Neoverse, right?

Maybe we need to distinguish the Neoverse case from the generic one here as well

if (is_neoverse)
// treat L4 as llc
else
// treat L3 as llc

>  				if (snoop & P(SNOOP, HITM))
>  					HITM_INC(lcl_hitm);
>  				else
>  					stats->ld_llchit++;
>  			}
>  
> -			if (lvl & P(LVL, LOC_RAM)) {
> +			if (lvl & P(LVL, LOC_RAM) || lnum == P(LVLNUM, RAM)) {
>  				stats->lcl_dram++;
>  				if (snoop & P(SNOOP, HIT))
>  					stats->ld_shared++;
> @@ -564,6 +567,9 @@ do {				\
>  				HITM_INC(rmt_hitm);
>  		}
>  
> +		if (lnum == P(LVLNUM, ANY_CACHE) && snoop & P(SNOOP, HITM))
> +			HITM_INC(lcl_hitm);
> +
>  		if ((lvl & P(LVL, MISS)))
>  			stats->ld_miss++;
>  

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-02 15:39     ` German Gomez
  0 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-02 15:39 UTC (permalink / raw)
  To: Ali Saidi, linux-kernel, linux-perf-users, linux-arm-kernel, leo.yan
  Cc: benh, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Mark Rutland, Alexander Shishkin, Jiri Olsa, Namhyung Kim,
	John Garry, Will Deacon, Mathieu Poirier, James Clark,
	Andrew Kilroy, Jin Yao, Kajol Jain, Li Huafei


On 21/02/2022 22:48, Ali Saidi wrote:
> Current code only support HITM statistics for last level cache (LLC)
> when mem_lvl encodes the level. On existing Arm64 machines there are as
> many as four levels cache and this change supports decoding l1, l2, and
> llc hits from the mem_lvl_num data. Given that the mem_lvl namespace is
> being deprecated take this opportunity to encode the neoverse data into
> mem_lvl_num.

Since Neoverse is mentioned in the commit message, I think there should be a comment somewhere in the code as well.

>
> For loads that hit in a the LLC snoop filter and are fullfilled from a
> higher level cache, it's not usually clear what the true level of the
> cache the data came from (i.e. a transfer from a core could come from
> it's L1 or L2). Instead of making an assumption of where the line came
> from, add support for incrementing HITM if the source is CACHE_ANY.
>
> Since other architectures don't seem to populate the mem_lvl_num field
> here there shouldn't be a change in functionality.
>
> Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> ---
>  tools/perf/util/mem-events.c | 14 ++++++++++----
>  1 file changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
> index ed0ab838bcc5..6c3fd4aac7ae 100644
> --- a/tools/perf/util/mem-events.c
> +++ b/tools/perf/util/mem-events.c
> @@ -485,6 +485,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
>  	u64 daddr  = mi->daddr.addr;
>  	u64 op     = data_src->mem_op;
>  	u64 lvl    = data_src->mem_lvl;
> +	u64 lnum   = data_src->mem_lvl_num;
>  	u64 snoop  = data_src->mem_snoop;
>  	u64 lock   = data_src->mem_lock;
>  	u64 blk    = data_src->mem_blk;
> @@ -527,16 +528,18 @@ do {				\
>  			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
>  			if (lvl & P(LVL, IO))  stats->ld_io++;
>  			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
> -			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
> -			if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
> -			if (lvl & P(LVL, L3 )) {
> +			if (lvl & P(LVL, L1) || lnum == P(LVLNUM, L1))
> +				stats->ld_l1hit++;
> +			if (lvl & P(LVL, L2) || lnum == P(LVLNUM, L2))
> +				stats->ld_l2hit++;
> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {

According to a comment in the previous patch, using L4 is specific to Neoverse, right?

Maybe we need to distinguish the Neoverse case from the generic one here as well

if (is_neoverse)
// treat L4 as llc
else
// treat L3 as llc

>  				if (snoop & P(SNOOP, HITM))
>  					HITM_INC(lcl_hitm);
>  				else
>  					stats->ld_llchit++;
>  			}
>  
> -			if (lvl & P(LVL, LOC_RAM)) {
> +			if (lvl & P(LVL, LOC_RAM) || lnum == P(LVLNUM, RAM)) {
>  				stats->lcl_dram++;
>  				if (snoop & P(SNOOP, HIT))
>  					stats->ld_shared++;
> @@ -564,6 +567,9 @@ do {				\
>  				HITM_INC(rmt_hitm);
>  		}
>  
> +		if (lnum == P(LVLNUM, ANY_CACHE) && snoop & P(SNOOP, HITM))
> +			HITM_INC(lcl_hitm);
> +
>  		if ((lvl & P(LVL, MISS)))
>  			stats->ld_miss++;
>  

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
  2022-03-02 11:59   ` German Gomez
@ 2022-03-13 11:46     ` Leo Yan
  -1 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-13 11:46 UTC (permalink / raw)
  To: German Gomez
  Cc: Ali Saidi, linux-kernel, linux-perf-users, linux-arm-kernel,
	benh, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Mark Rutland, Alexander Shishkin, Jiri Olsa, Namhyung Kim,
	John Garry, Will Deacon, Mathieu Poirier, James Clark,
	Andrew Kilroy, Jin Yao, Kajol Jain, Li Huafei

Hi Ali, German,

On Wed, Mar 02, 2022 at 11:59:05AM +0000, German Gomez wrote:
> Hi Ali,
> 
> On 21/02/2022 22:47, Ali Saidi wrote:
> > When synthesizing data from SPE, augment the type with source information
> > for Arm Neoverse cores. The field is IMPLDEF but the Neoverse cores all use
> > the same encoding. I can't find encoding information for any other SPE
> > implementations to unify their choices with Arm's thus that is left for
> > future work.
> >
> > This change populates the mem_lvl_num for Neoverse cores instead of the
> > deprecated mem_lvl namespace.
> >
> > Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> > ---
> >  .../util/arm-spe-decoder/arm-spe-decoder.c    |   1 +
> >  .../util/arm-spe-decoder/arm-spe-decoder.h    |  12 ++
> >  tools/perf/util/arm-spe.c                     | 106 +++++++++++++++---
> >  3 files changed, 104 insertions(+), 15 deletions(-)
> >
> > diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> > index 5e390a1a79ab..091987dd3966 100644
> > --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> > +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> > @@ -220,6 +220,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
> >  
> >  			break;
> >  		case ARM_SPE_DATA_SOURCE:
> > +			decoder->record.source = payload;
> >  			break;
> >  		case ARM_SPE_BAD:
> >  			break;
> > diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> > index 69b31084d6be..c81bf90c0996 100644
> > --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> > +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> > @@ -29,6 +29,17 @@ enum arm_spe_op_type {
> >  	ARM_SPE_ST		= 1 << 1,
> >  };
> >  
> > +enum arm_spe_neoverse_data_source {
> > +	ARM_SPE_NV_L1D        = 0x0,
> > +	ARM_SPE_NV_L2         = 0x8,
> > +	ARM_SPE_NV_PEER_CORE  = 0x9,
> > +	ARM_SPE_NV_LCL_CLSTR  = 0xa,
> > +	ARM_SPE_NV_SYS_CACHE  = 0xb,
> > +	ARM_SPE_NV_PEER_CLSTR = 0xc,
> > +	ARM_SPE_NV_REMOTE     = 0xd,
> > +	ARM_SPE_NV_DRAM       = 0xe,
> > +};
> > +
> >  struct arm_spe_record {
> >  	enum arm_spe_sample_type type;
> >  	int err;
> > @@ -40,6 +51,7 @@ struct arm_spe_record {
> >  	u64 virt_addr;
> >  	u64 phys_addr;
> >  	u64 context_id;
> > +	u16 source;
> >  };
> >  
> >  struct arm_spe_insn;
> > diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
> > index d2b64e3f588b..e0243c2fed5f 100644
> > --- a/tools/perf/util/arm-spe.c
> > +++ b/tools/perf/util/arm-spe.c
> > @@ -34,6 +34,7 @@
> >  #include "arm-spe-decoder/arm-spe-decoder.h"
> >  #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
> >  
> > +#include <../../../arch/arm64/include/asm/cputype.h>
> >  #define MAX_TIMESTAMP (~0ULL)
> >  
> >  struct arm_spe {
> > @@ -45,6 +46,7 @@ struct arm_spe {
> >  	struct perf_session		*session;
> >  	struct machine			*machine;
> >  	u32				pmu_type;
> > +	u64				midr;
> >  
> >  	struct perf_tsc_conversion	tc;
> >  
> > @@ -399,33 +401,103 @@ static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
> >  	return false;
> >  }
> >  
> > -static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
> > -{
> > -	union perf_mem_data_src	data_src = { 0 };
> > +static const struct midr_range neoverse_spe[] = {
> > +	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
> > +	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
> > +	{},
> > +};
> >  
> > -	if (record->op == ARM_SPE_LD)
> > -		data_src.mem_op = PERF_MEM_OP_LOAD;
> > -	else
> > -		data_src.mem_op = PERF_MEM_OP_STORE;
> >  
> > +static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
> > +						union perf_mem_data_src *data_src)
> > +{
> > +	switch (record->source) {
> > +	case ARM_SPE_NV_L1D:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> 
> I understand mem_lvl is deprecated but shouldn't we add the level bits here as well for backwards compat?

Thanks for pointing out this.  Yeah, I think German's suggestion is
valid, the commit 6ae5fa61d27d ("perf/x86: Fix data source decoding
for Skylake") introduces new field 'mem_lvl_num', but it also keeps
backwards compatible for the field 'mem_lvl'.

> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
> > +		break;
> > +	case ARM_SPE_NV_L2:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
> > +		break;
> > +	case ARM_SPE_NV_PEER_CORE:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;

For PEER_CORE data source, we don't know if it's coming from peer
core's L1 cache or L2 cache, right?

If so, do you think if it's possible to retrieve more accurate info
from the field "record->type"?

> > +		break;
> > +	/*
> > +	 * We don't know if this is L1, L2, or even L3 (for the cases the system
> > +	 * has an L3, but we do know it was a cache-2-cache transfer, so set
> > +	 * SNOOP_HITM
> > +	 */
> > +	case ARM_SPE_NV_LCL_CLSTR:
> > +	case ARM_SPE_NV_PEER_CLSTR:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;

Seems to me, we need to add attribution to indicate the difference
between ARM_SPE_NV_PEER_CORE and ARM_SPE_NV_LCL_CLSTR.

For ARM_SPE_NV_PEER_CLSTR data source, should we set any "remote"
attribution as well?

> > +		break;
> > +	/*
> > +	 * System cache is assumed to be L4, as cluster cache (if it exists)
> > +	 * would be L3 cache on Neoverse platforms
> > +	 */
> > +	case ARM_SPE_NV_SYS_CACHE:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L4;
> > +		break;
> > +	/*
> > +	 * We don't know what level it hit in, except it came from the other
> > +	 * socket
> > +	 */
> > +	case ARM_SPE_NV_REMOTE:
> > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > +		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
> > +		break;

Just curious, is it possible that 'record->source' combines multiple
bits?  Like we can get a data source value with:

  ARM_SPE_NV_REMOTE | ARM_SPE_NV_REMOTE

> > +	case ARM_SPE_NV_DRAM:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
> > +		break;
> > +	default:
> > +		break;
> > +	}
> > +}
> > +
> > +static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
> > +						union perf_mem_data_src *data_src)
> > +{
> >  	if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
> > -		data_src.mem_lvl = PERF_MEM_LVL_L3;
> > +		data_src->mem_lvl = PERF_MEM_LVL_L3;
> >  
> >  		if (record->type & ARM_SPE_LLC_MISS)
> > -			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
> > +			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
> >  		else
> > -			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
> > +			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
> >  	} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
> > -		data_src.mem_lvl = PERF_MEM_LVL_L1;
> > +		data_src->mem_lvl = PERF_MEM_LVL_L1;
> >  
> >  		if (record->type & ARM_SPE_L1D_MISS)
> > -			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
> > +			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
> >  		else
> > -			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
> > +			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
> >  	}
> >  
> >  	if (record->type & ARM_SPE_REMOTE_ACCESS)
> > -		data_src.mem_lvl |= PERF_MEM_LVL_REM_CCE1;
> > +		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
> > +}
> > +
> > +static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
> > +{
> > +	union perf_mem_data_src	data_src = { 0 };
> > +	bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
> > +
> > +	if (record->op & ARM_SPE_LD)
> > +		data_src.mem_op = PERF_MEM_OP_LOAD;
> > +	else
> > +		data_src.mem_op = PERF_MEM_OP_STORE;
> > +
> > +	if (is_neoverse)
> > +		arm_spe__synth_data_source_neoverse(record, &data_src);
> > +	else
> > +		arm_spe__synth_data_source_generic(record, &data_src);
> >  
> >  	if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
> >  		data_src.mem_dtlb = PERF_MEM_TLB_WK;
> > @@ -446,7 +518,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
> >  	u64 data_src;
> >  	int err;
> >  
> > -	data_src = arm_spe__synth_data_source(record);
> > +	data_src = arm_spe__synth_data_source(record, spe->midr);
> >  
> >  	if (spe->sample_flc) {
> >  		if (record->type & ARM_SPE_L1D_MISS) {
> > @@ -796,6 +868,10 @@ static int arm_spe_process_event(struct perf_session *session,
> >  	u64 timestamp;
> >  	struct arm_spe *spe = container_of(session->auxtrace,
> >  			struct arm_spe, auxtrace);
> > +	const char *cpuid = perf_env__cpuid(session->evlist->env);
> > +	u64 midr = strtol(cpuid, NULL, 16);
> > +
> > +	spe->midr = midr;
> 
> I think this midr setup belongs in the arm_spe_process_auxtrace_info callback instead.

Yeah, arm_spe_process_event() would be invoked for multiple times for
processing perf events.  arm_spe_process_auxtrace_info() would be a
good place to initialize midr.

Thanks,
Leo

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
@ 2022-03-13 11:46     ` Leo Yan
  0 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-13 11:46 UTC (permalink / raw)
  To: German Gomez
  Cc: Ali Saidi, linux-kernel, linux-perf-users, linux-arm-kernel,
	benh, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Mark Rutland, Alexander Shishkin, Jiri Olsa, Namhyung Kim,
	John Garry, Will Deacon, Mathieu Poirier, James Clark,
	Andrew Kilroy, Jin Yao, Kajol Jain, Li Huafei

Hi Ali, German,

On Wed, Mar 02, 2022 at 11:59:05AM +0000, German Gomez wrote:
> Hi Ali,
> 
> On 21/02/2022 22:47, Ali Saidi wrote:
> > When synthesizing data from SPE, augment the type with source information
> > for Arm Neoverse cores. The field is IMPLDEF but the Neoverse cores all use
> > the same encoding. I can't find encoding information for any other SPE
> > implementations to unify their choices with Arm's thus that is left for
> > future work.
> >
> > This change populates the mem_lvl_num for Neoverse cores instead of the
> > deprecated mem_lvl namespace.
> >
> > Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> > ---
> >  .../util/arm-spe-decoder/arm-spe-decoder.c    |   1 +
> >  .../util/arm-spe-decoder/arm-spe-decoder.h    |  12 ++
> >  tools/perf/util/arm-spe.c                     | 106 +++++++++++++++---
> >  3 files changed, 104 insertions(+), 15 deletions(-)
> >
> > diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> > index 5e390a1a79ab..091987dd3966 100644
> > --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> > +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
> > @@ -220,6 +220,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
> >  
> >  			break;
> >  		case ARM_SPE_DATA_SOURCE:
> > +			decoder->record.source = payload;
> >  			break;
> >  		case ARM_SPE_BAD:
> >  			break;
> > diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> > index 69b31084d6be..c81bf90c0996 100644
> > --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> > +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
> > @@ -29,6 +29,17 @@ enum arm_spe_op_type {
> >  	ARM_SPE_ST		= 1 << 1,
> >  };
> >  
> > +enum arm_spe_neoverse_data_source {
> > +	ARM_SPE_NV_L1D        = 0x0,
> > +	ARM_SPE_NV_L2         = 0x8,
> > +	ARM_SPE_NV_PEER_CORE  = 0x9,
> > +	ARM_SPE_NV_LCL_CLSTR  = 0xa,
> > +	ARM_SPE_NV_SYS_CACHE  = 0xb,
> > +	ARM_SPE_NV_PEER_CLSTR = 0xc,
> > +	ARM_SPE_NV_REMOTE     = 0xd,
> > +	ARM_SPE_NV_DRAM       = 0xe,
> > +};
> > +
> >  struct arm_spe_record {
> >  	enum arm_spe_sample_type type;
> >  	int err;
> > @@ -40,6 +51,7 @@ struct arm_spe_record {
> >  	u64 virt_addr;
> >  	u64 phys_addr;
> >  	u64 context_id;
> > +	u16 source;
> >  };
> >  
> >  struct arm_spe_insn;
> > diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
> > index d2b64e3f588b..e0243c2fed5f 100644
> > --- a/tools/perf/util/arm-spe.c
> > +++ b/tools/perf/util/arm-spe.c
> > @@ -34,6 +34,7 @@
> >  #include "arm-spe-decoder/arm-spe-decoder.h"
> >  #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
> >  
> > +#include <../../../arch/arm64/include/asm/cputype.h>
> >  #define MAX_TIMESTAMP (~0ULL)
> >  
> >  struct arm_spe {
> > @@ -45,6 +46,7 @@ struct arm_spe {
> >  	struct perf_session		*session;
> >  	struct machine			*machine;
> >  	u32				pmu_type;
> > +	u64				midr;
> >  
> >  	struct perf_tsc_conversion	tc;
> >  
> > @@ -399,33 +401,103 @@ static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
> >  	return false;
> >  }
> >  
> > -static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
> > -{
> > -	union perf_mem_data_src	data_src = { 0 };
> > +static const struct midr_range neoverse_spe[] = {
> > +	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
> > +	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
> > +	{},
> > +};
> >  
> > -	if (record->op == ARM_SPE_LD)
> > -		data_src.mem_op = PERF_MEM_OP_LOAD;
> > -	else
> > -		data_src.mem_op = PERF_MEM_OP_STORE;
> >  
> > +static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
> > +						union perf_mem_data_src *data_src)
> > +{
> > +	switch (record->source) {
> > +	case ARM_SPE_NV_L1D:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> 
> I understand mem_lvl is deprecated but shouldn't we add the level bits here as well for backwards compat?

Thanks for pointing out this.  Yeah, I think German's suggestion is
valid, the commit 6ae5fa61d27d ("perf/x86: Fix data source decoding
for Skylake") introduces new field 'mem_lvl_num', but it also keeps
backwards compatible for the field 'mem_lvl'.

> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
> > +		break;
> > +	case ARM_SPE_NV_L2:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
> > +		break;
> > +	case ARM_SPE_NV_PEER_CORE:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;

For PEER_CORE data source, we don't know if it's coming from peer
core's L1 cache or L2 cache, right?

If so, do you think if it's possible to retrieve more accurate info
from the field "record->type"?

> > +		break;
> > +	/*
> > +	 * We don't know if this is L1, L2, or even L3 (for the cases the system
> > +	 * has an L3, but we do know it was a cache-2-cache transfer, so set
> > +	 * SNOOP_HITM
> > +	 */
> > +	case ARM_SPE_NV_LCL_CLSTR:
> > +	case ARM_SPE_NV_PEER_CLSTR:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;

Seems to me, we need to add attribution to indicate the difference
between ARM_SPE_NV_PEER_CORE and ARM_SPE_NV_LCL_CLSTR.

For ARM_SPE_NV_PEER_CLSTR data source, should we set any "remote"
attribution as well?

> > +		break;
> > +	/*
> > +	 * System cache is assumed to be L4, as cluster cache (if it exists)
> > +	 * would be L3 cache on Neoverse platforms
> > +	 */
> > +	case ARM_SPE_NV_SYS_CACHE:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L4;
> > +		break;
> > +	/*
> > +	 * We don't know what level it hit in, except it came from the other
> > +	 * socket
> > +	 */
> > +	case ARM_SPE_NV_REMOTE:
> > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > +		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
> > +		break;

Just curious, is it possible that 'record->source' combines multiple
bits?  Like we can get a data source value with:

  ARM_SPE_NV_REMOTE | ARM_SPE_NV_REMOTE

> > +	case ARM_SPE_NV_DRAM:
> > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
> > +		break;
> > +	default:
> > +		break;
> > +	}
> > +}
> > +
> > +static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
> > +						union perf_mem_data_src *data_src)
> > +{
> >  	if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
> > -		data_src.mem_lvl = PERF_MEM_LVL_L3;
> > +		data_src->mem_lvl = PERF_MEM_LVL_L3;
> >  
> >  		if (record->type & ARM_SPE_LLC_MISS)
> > -			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
> > +			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
> >  		else
> > -			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
> > +			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
> >  	} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
> > -		data_src.mem_lvl = PERF_MEM_LVL_L1;
> > +		data_src->mem_lvl = PERF_MEM_LVL_L1;
> >  
> >  		if (record->type & ARM_SPE_L1D_MISS)
> > -			data_src.mem_lvl |= PERF_MEM_LVL_MISS;
> > +			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
> >  		else
> > -			data_src.mem_lvl |= PERF_MEM_LVL_HIT;
> > +			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
> >  	}
> >  
> >  	if (record->type & ARM_SPE_REMOTE_ACCESS)
> > -		data_src.mem_lvl |= PERF_MEM_LVL_REM_CCE1;
> > +		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
> > +}
> > +
> > +static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
> > +{
> > +	union perf_mem_data_src	data_src = { 0 };
> > +	bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
> > +
> > +	if (record->op & ARM_SPE_LD)
> > +		data_src.mem_op = PERF_MEM_OP_LOAD;
> > +	else
> > +		data_src.mem_op = PERF_MEM_OP_STORE;
> > +
> > +	if (is_neoverse)
> > +		arm_spe__synth_data_source_neoverse(record, &data_src);
> > +	else
> > +		arm_spe__synth_data_source_generic(record, &data_src);
> >  
> >  	if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
> >  		data_src.mem_dtlb = PERF_MEM_TLB_WK;
> > @@ -446,7 +518,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
> >  	u64 data_src;
> >  	int err;
> >  
> > -	data_src = arm_spe__synth_data_source(record);
> > +	data_src = arm_spe__synth_data_source(record, spe->midr);
> >  
> >  	if (spe->sample_flc) {
> >  		if (record->type & ARM_SPE_L1D_MISS) {
> > @@ -796,6 +868,10 @@ static int arm_spe_process_event(struct perf_session *session,
> >  	u64 timestamp;
> >  	struct arm_spe *spe = container_of(session->auxtrace,
> >  			struct arm_spe, auxtrace);
> > +	const char *cpuid = perf_env__cpuid(session->evlist->env);
> > +	u64 midr = strtol(cpuid, NULL, 16);
> > +
> > +	spe->midr = midr;
> 
> I think this midr setup belongs in the arm_spe_process_auxtrace_info callback instead.

Yeah, arm_spe_process_event() would be invoked for multiple times for
processing perf events.  arm_spe_process_auxtrace_info() would be a
good place to initialize midr.

Thanks,
Leo

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-02 15:39     ` German Gomez
@ 2022-03-13 12:44       ` Leo Yan
  -1 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-13 12:44 UTC (permalink / raw)
  To: German Gomez
  Cc: Ali Saidi, linux-kernel, linux-perf-users, linux-arm-kernel,
	benh, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Mark Rutland, Alexander Shishkin, Jiri Olsa, Namhyung Kim,
	John Garry, Will Deacon, Mathieu Poirier, James Clark,
	Andrew Kilroy, Jin Yao, Kajol Jain, Li Huafei

On Wed, Mar 02, 2022 at 03:39:04PM +0000, German Gomez wrote:
> 
> On 21/02/2022 22:48, Ali Saidi wrote:
> > Current code only support HITM statistics for last level cache (LLC)
> > when mem_lvl encodes the level. On existing Arm64 machines there are as
> > many as four levels cache and this change supports decoding l1, l2, and
> > llc hits from the mem_lvl_num data. Given that the mem_lvl namespace is
> > being deprecated take this opportunity to encode the neoverse data into
> > mem_lvl_num.
> 
> Since Neoverse is mentioned in the commit message, I think there should be a comment somewhere in the code as well.
>

> > For loads that hit in a the LLC snoop filter and are fullfilled from a
> > higher level cache, it's not usually clear what the true level of the
> > cache the data came from (i.e. a transfer from a core could come from
> > it's L1 or L2). Instead of making an assumption of where the line came
> > from, add support for incrementing HITM if the source is CACHE_ANY.
> >
> > Since other architectures don't seem to populate the mem_lvl_num field
> > here there shouldn't be a change in functionality.
> >
> > Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> > ---
> >  tools/perf/util/mem-events.c | 14 ++++++++++----
> >  1 file changed, 10 insertions(+), 4 deletions(-)
> >
> > diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
> > index ed0ab838bcc5..6c3fd4aac7ae 100644
> > --- a/tools/perf/util/mem-events.c
> > +++ b/tools/perf/util/mem-events.c
> > @@ -485,6 +485,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
> >  	u64 daddr  = mi->daddr.addr;
> >  	u64 op     = data_src->mem_op;
> >  	u64 lvl    = data_src->mem_lvl;
> > +	u64 lnum   = data_src->mem_lvl_num;
> >  	u64 snoop  = data_src->mem_snoop;
> >  	u64 lock   = data_src->mem_lock;
> >  	u64 blk    = data_src->mem_blk;
> > @@ -527,16 +528,18 @@ do {				\
> >  			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
> >  			if (lvl & P(LVL, IO))  stats->ld_io++;
> >  			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
> > -			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
> > -			if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
> > -			if (lvl & P(LVL, L3 )) {
> > +			if (lvl & P(LVL, L1) || lnum == P(LVLNUM, L1))
> > +				stats->ld_l1hit++;
> > +			if (lvl & P(LVL, L2) || lnum == P(LVLNUM, L2))
> > +				stats->ld_l2hit++;

It's good to split into two patches: one patch is to add statistics for
field 'mem_lvl_num', the second patch is to handle HITM tags.

> > +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {

It's a bit weird that we take either PERF_MEM_LVL_L3 or
PERF_MEM_LVLNUM_L4 as the last level local cache in the same condition
checking.

> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
> 
> Maybe we need to distinguish the Neoverse case from the generic one here as well
> 
> if (is_neoverse)
> // treat L4 as llc
> else
> // treat L3 as llc

I personally think it's not good idea to distinguish platforms in the decoding code.

To make more more clear statistics, we can firstly increment hit values
for every level cache respectively;  so we can consider to adde two
extra statistics items 'stats->ld_l3hit' and 'stats->ld_l4hit'.

        if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L3))
                stats->ld_l3hit++;
        if (lnum == P(LVLNUM, L4))
                stats->ld_l4hit++;

> >  				if (snoop & P(SNOOP, HITM))
> >  					HITM_INC(lcl_hitm);
> >  				else
> >  					stats->ld_llchit++;

For the statistics of 'ld_llchit' and 'lcl_hitm', please see below comment.

> >  			}
> >  
> > -			if (lvl & P(LVL, LOC_RAM)) {
> > +			if (lvl & P(LVL, LOC_RAM) || lnum == P(LVLNUM, RAM)) {
> >  				stats->lcl_dram++;
> >  				if (snoop & P(SNOOP, HIT))
> >  					stats->ld_shared++;
> > @@ -564,6 +567,9 @@ do {				\
> >  				HITM_INC(rmt_hitm);
> >  		}
> >  
> > +		if (lnum == P(LVLNUM, ANY_CACHE) && snoop & P(SNOOP, HITM))
> > +			HITM_INC(lcl_hitm);
> > +

The condition checking of "lnum == P(LVLNUM, ANY_CACHE)" is a bit
suspecious and it might be fragile for support multiple archs.

So I am just wandering if it's possible that we add a new field
'llc_level' in the structure 'mem_info', we can initialize this field
based on different memory hardware events (e.g. Intel mem event,
Arm SPE, etc).  During the decoding phase, the local last level cache
is dynamically set to 'mem_info:: llc_level', we can base on it to
increment 'ld_llchit' and 'lcl_hitm', the code is like below:

                 if ((lvl & P(LVL, REM_CCE1)) ||
                     (lvl & P(LVL, REM_CCE2)) ||
                      mrem) {
                         if (snoop & P(SNOOP, HIT))
                                 stats->rmt_hit++;
                         else if (snoop & P(SNOOP, HITM))
                                 HITM_INC(rmt_hitm);
+               } else {
+                       if ((snoop & P(SNOOP, HIT)) && (lnum == mi->llc_level))
+                               stats->ld_llchit++;
+                       else if (snoop & P(SNOOP, HITM))
+                               HITM_INC(lcl_hitm);
                 }

Thanks,
Leo

> >  		if ((lvl & P(LVL, MISS)))
> >  			stats->ld_miss++;
> >  

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-13 12:44       ` Leo Yan
  0 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-13 12:44 UTC (permalink / raw)
  To: German Gomez
  Cc: Ali Saidi, linux-kernel, linux-perf-users, linux-arm-kernel,
	benh, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Mark Rutland, Alexander Shishkin, Jiri Olsa, Namhyung Kim,
	John Garry, Will Deacon, Mathieu Poirier, James Clark,
	Andrew Kilroy, Jin Yao, Kajol Jain, Li Huafei

On Wed, Mar 02, 2022 at 03:39:04PM +0000, German Gomez wrote:
> 
> On 21/02/2022 22:48, Ali Saidi wrote:
> > Current code only support HITM statistics for last level cache (LLC)
> > when mem_lvl encodes the level. On existing Arm64 machines there are as
> > many as four levels cache and this change supports decoding l1, l2, and
> > llc hits from the mem_lvl_num data. Given that the mem_lvl namespace is
> > being deprecated take this opportunity to encode the neoverse data into
> > mem_lvl_num.
> 
> Since Neoverse is mentioned in the commit message, I think there should be a comment somewhere in the code as well.
>

> > For loads that hit in a the LLC snoop filter and are fullfilled from a
> > higher level cache, it's not usually clear what the true level of the
> > cache the data came from (i.e. a transfer from a core could come from
> > it's L1 or L2). Instead of making an assumption of where the line came
> > from, add support for incrementing HITM if the source is CACHE_ANY.
> >
> > Since other architectures don't seem to populate the mem_lvl_num field
> > here there shouldn't be a change in functionality.
> >
> > Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> > ---
> >  tools/perf/util/mem-events.c | 14 ++++++++++----
> >  1 file changed, 10 insertions(+), 4 deletions(-)
> >
> > diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
> > index ed0ab838bcc5..6c3fd4aac7ae 100644
> > --- a/tools/perf/util/mem-events.c
> > +++ b/tools/perf/util/mem-events.c
> > @@ -485,6 +485,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
> >  	u64 daddr  = mi->daddr.addr;
> >  	u64 op     = data_src->mem_op;
> >  	u64 lvl    = data_src->mem_lvl;
> > +	u64 lnum   = data_src->mem_lvl_num;
> >  	u64 snoop  = data_src->mem_snoop;
> >  	u64 lock   = data_src->mem_lock;
> >  	u64 blk    = data_src->mem_blk;
> > @@ -527,16 +528,18 @@ do {				\
> >  			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
> >  			if (lvl & P(LVL, IO))  stats->ld_io++;
> >  			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
> > -			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
> > -			if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
> > -			if (lvl & P(LVL, L3 )) {
> > +			if (lvl & P(LVL, L1) || lnum == P(LVLNUM, L1))
> > +				stats->ld_l1hit++;
> > +			if (lvl & P(LVL, L2) || lnum == P(LVLNUM, L2))
> > +				stats->ld_l2hit++;

It's good to split into two patches: one patch is to add statistics for
field 'mem_lvl_num', the second patch is to handle HITM tags.

> > +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {

It's a bit weird that we take either PERF_MEM_LVL_L3 or
PERF_MEM_LVLNUM_L4 as the last level local cache in the same condition
checking.

> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
> 
> Maybe we need to distinguish the Neoverse case from the generic one here as well
> 
> if (is_neoverse)
> // treat L4 as llc
> else
> // treat L3 as llc

I personally think it's not good idea to distinguish platforms in the decoding code.

To make more more clear statistics, we can firstly increment hit values
for every level cache respectively;  so we can consider to adde two
extra statistics items 'stats->ld_l3hit' and 'stats->ld_l4hit'.

        if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L3))
                stats->ld_l3hit++;
        if (lnum == P(LVLNUM, L4))
                stats->ld_l4hit++;

> >  				if (snoop & P(SNOOP, HITM))
> >  					HITM_INC(lcl_hitm);
> >  				else
> >  					stats->ld_llchit++;

For the statistics of 'ld_llchit' and 'lcl_hitm', please see below comment.

> >  			}
> >  
> > -			if (lvl & P(LVL, LOC_RAM)) {
> > +			if (lvl & P(LVL, LOC_RAM) || lnum == P(LVLNUM, RAM)) {
> >  				stats->lcl_dram++;
> >  				if (snoop & P(SNOOP, HIT))
> >  					stats->ld_shared++;
> > @@ -564,6 +567,9 @@ do {				\
> >  				HITM_INC(rmt_hitm);
> >  		}
> >  
> > +		if (lnum == P(LVLNUM, ANY_CACHE) && snoop & P(SNOOP, HITM))
> > +			HITM_INC(lcl_hitm);
> > +

The condition checking of "lnum == P(LVLNUM, ANY_CACHE)" is a bit
suspecious and it might be fragile for support multiple archs.

So I am just wandering if it's possible that we add a new field
'llc_level' in the structure 'mem_info', we can initialize this field
based on different memory hardware events (e.g. Intel mem event,
Arm SPE, etc).  During the decoding phase, the local last level cache
is dynamically set to 'mem_info:: llc_level', we can base on it to
increment 'ld_llchit' and 'lcl_hitm', the code is like below:

                 if ((lvl & P(LVL, REM_CCE1)) ||
                     (lvl & P(LVL, REM_CCE2)) ||
                      mrem) {
                         if (snoop & P(SNOOP, HIT))
                                 stats->rmt_hit++;
                         else if (snoop & P(SNOOP, HITM))
                                 HITM_INC(rmt_hitm);
+               } else {
+                       if ((snoop & P(SNOOP, HIT)) && (lnum == mi->llc_level))
+                               stats->ld_llchit++;
+                       else if (snoop & P(SNOOP, HITM))
+                               HITM_INC(lcl_hitm);
                 }

Thanks,
Leo

> >  		if ((lvl & P(LVL, MISS)))
> >  			stats->ld_miss++;
> >  

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
  2022-03-13 11:46     ` Leo Yan
@ 2022-03-13 19:06       ` Ali Saidi
  -1 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-03-13 19:06 UTC (permalink / raw)
  To: leo.yan
  Cc: acme, alexander.shishkin, alisaidi, andrew.kilroy, benh,
	german.gomez, james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin

On Sun, 13 Mar 2022 11:47:58 +0000 Leo Yan wrote:
> Hi Ali, German,
> 
> On Wed, Mar 02, 2022 at 11:59:05AM +0000, German Gomez wrote:
> > Hi Ali,
> > 
> > On 21/02/2022 22:47, Ali Saidi wrote:
> > > When synthesizing data from SPE, augment the type with source information
> > > for Arm Neoverse cores. The field is IMPLDEF but the Neoverse cores all use
> > > the same encoding. I can't find encoding information for any other SPE
> > > implementations to unify their choices with Arm's thus that is left for
> > > future work.
[snip]
> > >  
> > > +static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
> > > +						union perf_mem_data_src *data_src)
> > > +{
> > > +	switch (record->source) {
> > > +	case ARM_SPE_NV_L1D:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > 
> > I understand mem_lvl is deprecated but shouldn't we add the level bits here as well for backwards compat?
> 
> Thanks for pointing out this.  Yeah, I think German's suggestion is
> valid, the commit 6ae5fa61d27d ("perf/x86: Fix data source decoding
> for Skylake") introduces new field 'mem_lvl_num', but it also keeps
> backwards compatible for the field 'mem_lvl'.
I thought about that, but then I'm making some assumption about how to fit
this into the old LVL framework, which is perhaps OK (afaik there are no
Neoverse systems with more than 3 cache levels). What stopped me was that
perf_mem__lvl_scnprintf() does the wrong thing when both are set so I
assumed that setting both was not the right course of action.

> 
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
> > > +		break;
> > > +	case ARM_SPE_NV_L2:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
> > > +		break;
> > > +	case ARM_SPE_NV_PEER_CORE:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> 
> For PEER_CORE data source, we don't know if it's coming from peer
> core's L1 cache or L2 cache, right?
We don't.

> 
> If so, do you think if it's possible to retrieve more accurate info
> from the field "record->type"?
No, we just don't know and it really doesn't matter. The main reason to
understand the source is to understand the penalty of data coming from
the source and that it's coming from a core should be sufficient.

> 
> > > +		break;
> > > +	/*
> > > +	 * We don't know if this is L1, L2, or even L3 (for the cases the system
> > > +	 * has an L3, but we do know it was a cache-2-cache transfer, so set
> > > +	 * SNOOP_HITM
> > > +	 */
> > > +	case ARM_SPE_NV_LCL_CLSTR:
> > > +	case ARM_SPE_NV_PEER_CLSTR:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> 
> Seems to me, we need to add attribution to indicate the difference
> between ARM_SPE_NV_PEER_CORE and ARM_SPE_NV_LCL_CLSTR.
I don't think we really do, see my reasoning above. 

> 
> For ARM_SPE_NV_PEER_CLSTR data source, should we set any "remote"
> attribution as well?

No, we should leave remote for data coming from another chip/socket
which is really impactful. 

> 
> > > +		break;
> > > +	/*
> > > +	 * System cache is assumed to be L4, as cluster cache (if it exists)
> > > +	 * would be L3 cache on Neoverse platforms
> > > +	 */
> > > +	case ARM_SPE_NV_SYS_CACHE:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L4;
> > > +		break;
> > > +	/*
> > > +	 * We don't know what level it hit in, except it came from the other
> > > +	 * socket
> > > +	 */
> > > +	case ARM_SPE_NV_REMOTE:
> > > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > > +		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
> > > +		break;
> 
> Just curious, is it possible that 'record->source' combines multiple
> bits?  Like we can get a data source value with:
> 
>   ARM_SPE_NV_REMOTE | ARM_SPE_NV_REMOTE

source encodes a single value (not bits that represent flags) on Neoverse
cores.

[snip]
> > > @@ -796,6 +868,10 @@ static int arm_spe_process_event(struct perf_session *session,
> > >  	u64 timestamp;
> > >  	struct arm_spe *spe = container_of(session->auxtrace,
> > >  			struct arm_spe, auxtrace);
> > > +	const char *cpuid = perf_env__cpuid(session->evlist->env);
> > > +	u64 midr = strtol(cpuid, NULL, 16);
> > > +
> > > +	spe->midr = midr;
> > 
> > I think this midr setup belongs in the arm_spe_process_auxtrace_info callback instead.
> 
> Yeah, arm_spe_process_event() would be invoked for multiple times for
> processing perf events.  arm_spe_process_auxtrace_info() would be a
> good place to initialize midr.

Will do.

Thanks,
Ali


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
@ 2022-03-13 19:06       ` Ali Saidi
  0 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-03-13 19:06 UTC (permalink / raw)
  To: leo.yan
  Cc: acme, alexander.shishkin, alisaidi, andrew.kilroy, benh,
	german.gomez, james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin

On Sun, 13 Mar 2022 11:47:58 +0000 Leo Yan wrote:
> Hi Ali, German,
> 
> On Wed, Mar 02, 2022 at 11:59:05AM +0000, German Gomez wrote:
> > Hi Ali,
> > 
> > On 21/02/2022 22:47, Ali Saidi wrote:
> > > When synthesizing data from SPE, augment the type with source information
> > > for Arm Neoverse cores. The field is IMPLDEF but the Neoverse cores all use
> > > the same encoding. I can't find encoding information for any other SPE
> > > implementations to unify their choices with Arm's thus that is left for
> > > future work.
[snip]
> > >  
> > > +static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
> > > +						union perf_mem_data_src *data_src)
> > > +{
> > > +	switch (record->source) {
> > > +	case ARM_SPE_NV_L1D:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > 
> > I understand mem_lvl is deprecated but shouldn't we add the level bits here as well for backwards compat?
> 
> Thanks for pointing out this.  Yeah, I think German's suggestion is
> valid, the commit 6ae5fa61d27d ("perf/x86: Fix data source decoding
> for Skylake") introduces new field 'mem_lvl_num', but it also keeps
> backwards compatible for the field 'mem_lvl'.
I thought about that, but then I'm making some assumption about how to fit
this into the old LVL framework, which is perhaps OK (afaik there are no
Neoverse systems with more than 3 cache levels). What stopped me was that
perf_mem__lvl_scnprintf() does the wrong thing when both are set so I
assumed that setting both was not the right course of action.

> 
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
> > > +		break;
> > > +	case ARM_SPE_NV_L2:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
> > > +		break;
> > > +	case ARM_SPE_NV_PEER_CORE:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> 
> For PEER_CORE data source, we don't know if it's coming from peer
> core's L1 cache or L2 cache, right?
We don't.

> 
> If so, do you think if it's possible to retrieve more accurate info
> from the field "record->type"?
No, we just don't know and it really doesn't matter. The main reason to
understand the source is to understand the penalty of data coming from
the source and that it's coming from a core should be sufficient.

> 
> > > +		break;
> > > +	/*
> > > +	 * We don't know if this is L1, L2, or even L3 (for the cases the system
> > > +	 * has an L3, but we do know it was a cache-2-cache transfer, so set
> > > +	 * SNOOP_HITM
> > > +	 */
> > > +	case ARM_SPE_NV_LCL_CLSTR:
> > > +	case ARM_SPE_NV_PEER_CLSTR:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> 
> Seems to me, we need to add attribution to indicate the difference
> between ARM_SPE_NV_PEER_CORE and ARM_SPE_NV_LCL_CLSTR.
I don't think we really do, see my reasoning above. 

> 
> For ARM_SPE_NV_PEER_CLSTR data source, should we set any "remote"
> attribution as well?

No, we should leave remote for data coming from another chip/socket
which is really impactful. 

> 
> > > +		break;
> > > +	/*
> > > +	 * System cache is assumed to be L4, as cluster cache (if it exists)
> > > +	 * would be L3 cache on Neoverse platforms
> > > +	 */
> > > +	case ARM_SPE_NV_SYS_CACHE:
> > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L4;
> > > +		break;
> > > +	/*
> > > +	 * We don't know what level it hit in, except it came from the other
> > > +	 * socket
> > > +	 */
> > > +	case ARM_SPE_NV_REMOTE:
> > > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > > +		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
> > > +		break;
> 
> Just curious, is it possible that 'record->source' combines multiple
> bits?  Like we can get a data source value with:
> 
>   ARM_SPE_NV_REMOTE | ARM_SPE_NV_REMOTE

source encodes a single value (not bits that represent flags) on Neoverse
cores.

[snip]
> > > @@ -796,6 +868,10 @@ static int arm_spe_process_event(struct perf_session *session,
> > >  	u64 timestamp;
> > >  	struct arm_spe *spe = container_of(session->auxtrace,
> > >  			struct arm_spe, auxtrace);
> > > +	const char *cpuid = perf_env__cpuid(session->evlist->env);
> > > +	u64 midr = strtol(cpuid, NULL, 16);
> > > +
> > > +	spe->midr = midr;
> > 
> > I think this midr setup belongs in the arm_spe_process_auxtrace_info callback instead.
> 
> Yeah, arm_spe_process_event() would be invoked for multiple times for
> processing perf events.  arm_spe_process_auxtrace_info() would be a
> good place to initialize midr.

Will do.

Thanks,
Ali


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-13 12:44       ` Leo Yan
@ 2022-03-13 19:19         ` Ali Saidi
  -1 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-03-13 19:19 UTC (permalink / raw)
  To: leo.yan
  Cc: acme, alexander.shishkin, alisaidi, andrew.kilroy, benh,
	german.gomez, james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin

Hi Leo,

On Sun, 13 Mar 2022 12:46:02 +0000, Leo Yan wrote:
> On Wed, Mar 02, 2022 at 03:39:04PM +0000, German Gomez wrote:
> > 
> > On 21/02/2022 22:48, Ali Saidi wrote:
> > > Current code only support HITM statistics for last level cache (LLC)
> > > when mem_lvl encodes the level. On existing Arm64 machines there are as
> > > many as four levels cache and this change supports decoding l1, l2, and
> > > llc hits from the mem_lvl_num data. Given that the mem_lvl namespace is
> > > being deprecated take this opportunity to encode the neoverse data into
> > > mem_lvl_num.
> > 
> > Since Neoverse is mentioned in the commit message, I think there should be a comment somewhere in the code as well.
> >
> 
> > > For loads that hit in a the LLC snoop filter and are fullfilled from a
> > > higher level cache, it's not usually clear what the true level of the
> > > cache the data came from (i.e. a transfer from a core could come from
> > > it's L1 or L2). Instead of making an assumption of where the line came
> > > from, add support for incrementing HITM if the source is CACHE_ANY.
> > >
> > > Since other architectures don't seem to populate the mem_lvl_num field
> > > here there shouldn't be a change in functionality.
> > >
> > > Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> > > ---
> > >  tools/perf/util/mem-events.c | 14 ++++++++++----
> > >  1 file changed, 10 insertions(+), 4 deletions(-)
> > >
> > > diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
> > > index ed0ab838bcc5..6c3fd4aac7ae 100644
> > > --- a/tools/perf/util/mem-events.c
> > > +++ b/tools/perf/util/mem-events.c
> > > @@ -485,6 +485,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
> > >  	u64 daddr  = mi->daddr.addr;
> > >  	u64 op     = data_src->mem_op;
> > >  	u64 lvl    = data_src->mem_lvl;
> > > +	u64 lnum   = data_src->mem_lvl_num;
> > >  	u64 snoop  = data_src->mem_snoop;
> > >  	u64 lock   = data_src->mem_lock;
> > >  	u64 blk    = data_src->mem_blk;
> > > @@ -527,16 +528,18 @@ do {				\
> > >  			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
> > >  			if (lvl & P(LVL, IO))  stats->ld_io++;
> > >  			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
> > > -			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
> > > -			if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
> > > -			if (lvl & P(LVL, L3 )) {
> > > +			if (lvl & P(LVL, L1) || lnum == P(LVLNUM, L1))
> > > +				stats->ld_l1hit++;
> > > +			if (lvl & P(LVL, L2) || lnum == P(LVLNUM, L2))
> > > +				stats->ld_l2hit++;
> 
> It's good to split into two patches: one patch is to add statistics for
> field 'mem_lvl_num', the second patch is to handle HITM tags.
> 
> > > +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
> 
> It's a bit weird that we take either PERF_MEM_LVL_L3 or
> PERF_MEM_LVLNUM_L4 as the last level local cache in the same condition
> checking.
> 
> > According to a comment in the previous patch, using L4 is specific to Neoverse, right?
> > 
> > Maybe we need to distinguish the Neoverse case from the generic one here as well
> > 
> > if (is_neoverse)
> > // treat L4 as llc
> > else
> > // treat L3 as llc
> 
> I personally think it's not good idea to distinguish platforms in the decoding code.

I agree here. The more we talk about this, the more I'm wondering if we're
spending too much code solving a problem that doesn't exist. I know of no
Neoverse systems that actually have 4 cache levels, they all actually have three
even though it's technically possible to have four.  I have some doubts anyone
will actually build four levels of cache and perhaps the most prudent path here
is to assume only three levels (and adjust the previous patch) until someone 
actually produces a system with four levels instead of a lot of code that is
never actually exercised?

Thanks,
Ali


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-13 19:19         ` Ali Saidi
  0 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-03-13 19:19 UTC (permalink / raw)
  To: leo.yan
  Cc: acme, alexander.shishkin, alisaidi, andrew.kilroy, benh,
	german.gomez, james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin

Hi Leo,

On Sun, 13 Mar 2022 12:46:02 +0000, Leo Yan wrote:
> On Wed, Mar 02, 2022 at 03:39:04PM +0000, German Gomez wrote:
> > 
> > On 21/02/2022 22:48, Ali Saidi wrote:
> > > Current code only support HITM statistics for last level cache (LLC)
> > > when mem_lvl encodes the level. On existing Arm64 machines there are as
> > > many as four levels cache and this change supports decoding l1, l2, and
> > > llc hits from the mem_lvl_num data. Given that the mem_lvl namespace is
> > > being deprecated take this opportunity to encode the neoverse data into
> > > mem_lvl_num.
> > 
> > Since Neoverse is mentioned in the commit message, I think there should be a comment somewhere in the code as well.
> >
> 
> > > For loads that hit in a the LLC snoop filter and are fullfilled from a
> > > higher level cache, it's not usually clear what the true level of the
> > > cache the data came from (i.e. a transfer from a core could come from
> > > it's L1 or L2). Instead of making an assumption of where the line came
> > > from, add support for incrementing HITM if the source is CACHE_ANY.
> > >
> > > Since other architectures don't seem to populate the mem_lvl_num field
> > > here there shouldn't be a change in functionality.
> > >
> > > Signed-off-by: Ali Saidi <alisaidi@amazon.com>
> > > ---
> > >  tools/perf/util/mem-events.c | 14 ++++++++++----
> > >  1 file changed, 10 insertions(+), 4 deletions(-)
> > >
> > > diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
> > > index ed0ab838bcc5..6c3fd4aac7ae 100644
> > > --- a/tools/perf/util/mem-events.c
> > > +++ b/tools/perf/util/mem-events.c
> > > @@ -485,6 +485,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
> > >  	u64 daddr  = mi->daddr.addr;
> > >  	u64 op     = data_src->mem_op;
> > >  	u64 lvl    = data_src->mem_lvl;
> > > +	u64 lnum   = data_src->mem_lvl_num;
> > >  	u64 snoop  = data_src->mem_snoop;
> > >  	u64 lock   = data_src->mem_lock;
> > >  	u64 blk    = data_src->mem_blk;
> > > @@ -527,16 +528,18 @@ do {				\
> > >  			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
> > >  			if (lvl & P(LVL, IO))  stats->ld_io++;
> > >  			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
> > > -			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
> > > -			if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
> > > -			if (lvl & P(LVL, L3 )) {
> > > +			if (lvl & P(LVL, L1) || lnum == P(LVLNUM, L1))
> > > +				stats->ld_l1hit++;
> > > +			if (lvl & P(LVL, L2) || lnum == P(LVLNUM, L2))
> > > +				stats->ld_l2hit++;
> 
> It's good to split into two patches: one patch is to add statistics for
> field 'mem_lvl_num', the second patch is to handle HITM tags.
> 
> > > +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
> 
> It's a bit weird that we take either PERF_MEM_LVL_L3 or
> PERF_MEM_LVLNUM_L4 as the last level local cache in the same condition
> checking.
> 
> > According to a comment in the previous patch, using L4 is specific to Neoverse, right?
> > 
> > Maybe we need to distinguish the Neoverse case from the generic one here as well
> > 
> > if (is_neoverse)
> > // treat L4 as llc
> > else
> > // treat L3 as llc
> 
> I personally think it's not good idea to distinguish platforms in the decoding code.

I agree here. The more we talk about this, the more I'm wondering if we're
spending too much code solving a problem that doesn't exist. I know of no
Neoverse systems that actually have 4 cache levels, they all actually have three
even though it's technically possible to have four.  I have some doubts anyone
will actually build four levels of cache and perhaps the most prudent path here
is to assume only three levels (and adjust the previous patch) until someone 
actually produces a system with four levels instead of a lot of code that is
never actually exercised?

Thanks,
Ali


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
  2022-03-13 19:06       ` Ali Saidi
@ 2022-03-14  4:05         ` Leo Yan
  -1 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-14  4:05 UTC (permalink / raw)
  To: Ali Saidi
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, german.gomez,
	james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin

On Sun, Mar 13, 2022 at 07:06:19PM +0000, Ali Saidi wrote:

[...]

> > > > +static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
> > > > +						union perf_mem_data_src *data_src)
> > > > +{
> > > > +	switch (record->source) {
> > > > +	case ARM_SPE_NV_L1D:
> > > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > 
> > > I understand mem_lvl is deprecated but shouldn't we add the level bits here as well for backwards compat?
> > 
> > Thanks for pointing out this.  Yeah, I think German's suggestion is
> > valid, the commit 6ae5fa61d27d ("perf/x86: Fix data source decoding
> > for Skylake") introduces new field 'mem_lvl_num', but it also keeps
> > backwards compatible for the field 'mem_lvl'.
> >
> I thought about that, but then I'm making some assumption about how to fit
> this into the old LVL framework, which is perhaps OK (afaik there are no
> Neoverse systems with more than 3 cache levels). What stopped me was that
> perf_mem__lvl_scnprintf() does the wrong thing when both are set so I
> assumed that setting both was not the right course of action.

Thanks for pointing out this.  I looked at perf_mem__lvl_scnprintf()
and it prints both for fields 'mem_lvl' and 'mem_lvl_num'.  Thus I can
see the output result shows the duplicate info for memory access like
"L1 or L1 hit", "L3 or L3 hit", etc.  This would be a common issue
crossing archs.  Do I miss any other issues?

> > > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
> > > > +		break;
> > > > +	case ARM_SPE_NV_L2:
> > > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
> > > > +		break;
> > > > +	case ARM_SPE_NV_PEER_CORE:
> > > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> > 
> > For PEER_CORE data source, we don't know if it's coming from peer
> > core's L1 cache or L2 cache, right?
>
> We don't.
> 
> > If so, do you think if it's possible to retrieve more accurate info
> > from the field "record->type"?
>
> No, we just don't know and it really doesn't matter. The main reason to
> understand the source is to understand the penalty of data coming from
> the source and that it's coming from a core should be sufficient.

Okay, the question is Neoverse has three different data sources
"ARM_SPE_NV_PEER_CORE", "ARM_SPE_NV_LCL_CLSTR" and
"ARM_SPE_NV_PEER_CLSTR", but the patch only uses the same attribution
for all of them.

To be honest, I don't have precise understanding the definition for
these three types, seems to me "ARM_SPE_NV_PEER_CORE" means to fetch
data cache from peer core (like SMT things), "ARM_SPE_NV_LCL_CLSTR"
means cache conherency within the same cluster with SCU,
"ARM_SPE_NV_PEER_CLSTR" means the conherency happens with external bus
(like CCI or CMN).  So I'd like to suggest to consider to extend the
level definitions so can allow us to express the data source for Arm
arch precisely.

It's important to understand current cache level definitions which is
derived from x86 arch and think what's the good way to match and
extend for Arm memory hierarchy.  I will think a bit more for this,
and if have any idea will share back.

Thanks,
Leo

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores
@ 2022-03-14  4:05         ` Leo Yan
  0 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-14  4:05 UTC (permalink / raw)
  To: Ali Saidi
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, german.gomez,
	james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin

On Sun, Mar 13, 2022 at 07:06:19PM +0000, Ali Saidi wrote:

[...]

> > > > +static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
> > > > +						union perf_mem_data_src *data_src)
> > > > +{
> > > > +	switch (record->source) {
> > > > +	case ARM_SPE_NV_L1D:
> > > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > 
> > > I understand mem_lvl is deprecated but shouldn't we add the level bits here as well for backwards compat?
> > 
> > Thanks for pointing out this.  Yeah, I think German's suggestion is
> > valid, the commit 6ae5fa61d27d ("perf/x86: Fix data source decoding
> > for Skylake") introduces new field 'mem_lvl_num', but it also keeps
> > backwards compatible for the field 'mem_lvl'.
> >
> I thought about that, but then I'm making some assumption about how to fit
> this into the old LVL framework, which is perhaps OK (afaik there are no
> Neoverse systems with more than 3 cache levels). What stopped me was that
> perf_mem__lvl_scnprintf() does the wrong thing when both are set so I
> assumed that setting both was not the right course of action.

Thanks for pointing out this.  I looked at perf_mem__lvl_scnprintf()
and it prints both for fields 'mem_lvl' and 'mem_lvl_num'.  Thus I can
see the output result shows the duplicate info for memory access like
"L1 or L1 hit", "L3 or L3 hit", etc.  This would be a common issue
crossing archs.  Do I miss any other issues?

> > > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
> > > > +		break;
> > > > +	case ARM_SPE_NV_L2:
> > > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
> > > > +		break;
> > > > +	case ARM_SPE_NV_PEER_CORE:
> > > > +		data_src->mem_lvl = PERF_MEM_LVL_HIT;
> > > > +		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
> > > > +		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
> > 
> > For PEER_CORE data source, we don't know if it's coming from peer
> > core's L1 cache or L2 cache, right?
>
> We don't.
> 
> > If so, do you think if it's possible to retrieve more accurate info
> > from the field "record->type"?
>
> No, we just don't know and it really doesn't matter. The main reason to
> understand the source is to understand the penalty of data coming from
> the source and that it's coming from a core should be sufficient.

Okay, the question is Neoverse has three different data sources
"ARM_SPE_NV_PEER_CORE", "ARM_SPE_NV_LCL_CLSTR" and
"ARM_SPE_NV_PEER_CLSTR", but the patch only uses the same attribution
for all of them.

To be honest, I don't have precise understanding the definition for
these three types, seems to me "ARM_SPE_NV_PEER_CORE" means to fetch
data cache from peer core (like SMT things), "ARM_SPE_NV_LCL_CLSTR"
means cache conherency within the same cluster with SCU,
"ARM_SPE_NV_PEER_CLSTR" means the conherency happens with external bus
(like CCI or CMN).  So I'd like to suggest to consider to extend the
level definitions so can allow us to express the data source for Arm
arch precisely.

It's important to understand current cache level definitions which is
derived from x86 arch and think what's the good way to match and
extend for Arm memory hierarchy.  I will think a bit more for this,
and if have any idea will share back.

Thanks,
Leo

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-13 19:19         ` Ali Saidi
@ 2022-03-14  6:33           ` Leo Yan
  -1 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-14  6:33 UTC (permalink / raw)
  To: Ali Saidi
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, german.gomez,
	james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin

On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:

[...]

> > > > +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
> > >
> > > According to a comment in the previous patch, using L4 is specific to Neoverse, right?
> > > 
> > > Maybe we need to distinguish the Neoverse case from the generic one here as well
> > > 
> > > if (is_neoverse)
> > > // treat L4 as llc
> > > else
> > > // treat L3 as llc
> > 
> > I personally think it's not good idea to distinguish platforms in the decoding code.
> 
> I agree here. The more we talk about this, the more I'm wondering if we're
> spending too much code solving a problem that doesn't exist. I know of no
> Neoverse systems that actually have 4 cache levels, they all actually have three
> even though it's technically possible to have four.  I have some doubts anyone
> will actually build four levels of cache and perhaps the most prudent path here
> is to assume only three levels (and adjust the previous patch) until someone 
> actually produces a system with four levels instead of a lot of code that is
> never actually exercised?

I am not right person to say L4 cache is not implemented in Neoverse
platforms; my guess for a "System cache" data source might be L3 or
L4 and it is a implementation dependent.  Maybe German or Arm mates
could confirm for this.

Thanks,
Leo

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-14  6:33           ` Leo Yan
  0 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-14  6:33 UTC (permalink / raw)
  To: Ali Saidi
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, german.gomez,
	james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin

On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:

[...]

> > > > +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
> > >
> > > According to a comment in the previous patch, using L4 is specific to Neoverse, right?
> > > 
> > > Maybe we need to distinguish the Neoverse case from the generic one here as well
> > > 
> > > if (is_neoverse)
> > > // treat L4 as llc
> > > else
> > > // treat L3 as llc
> > 
> > I personally think it's not good idea to distinguish platforms in the decoding code.
> 
> I agree here. The more we talk about this, the more I'm wondering if we're
> spending too much code solving a problem that doesn't exist. I know of no
> Neoverse systems that actually have 4 cache levels, they all actually have three
> even though it's technically possible to have four.  I have some doubts anyone
> will actually build four levels of cache and perhaps the most prudent path here
> is to assume only three levels (and adjust the previous patch) until someone 
> actually produces a system with four levels instead of a lot of code that is
> never actually exercised?

I am not right person to say L4 cache is not implemented in Neoverse
platforms; my guess for a "System cache" data source might be L3 or
L4 and it is a implementation dependent.  Maybe German or Arm mates
could confirm for this.

Thanks,
Leo

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-14  6:33           ` Leo Yan
@ 2022-03-14 18:00             ` German Gomez
  -1 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-14 18:00 UTC (permalink / raw)
  To: Leo Yan, Ali Saidi
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, james.clark,
	john.garry, jolsa, kjain, lihuafei1, linux-arm-kernel,
	linux-kernel, linux-perf-users, mark.rutland, mathieu.poirier,
	mingo, namhyung, peterz, will, yao.jin, Nick.Forrington

Hi Leo, Ali

On 14/03/2022 06:33, Leo Yan wrote:
> On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:
>
> [...]
>
>>>>> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
>>>> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
>>>>
>>>> Maybe we need to distinguish the Neoverse case from the generic one here as well
>>>>
>>>> if (is_neoverse)
>>>> // treat L4 as llc
>>>> else
>>>> // treat L3 as llc
>>> I personally think it's not good idea to distinguish platforms in the decoding code.
>> I agree here. The more we talk about this, the more I'm wondering if we're
>> spending too much code solving a problem that doesn't exist. I know of no
>> Neoverse systems that actually have 4 cache levels, they all actually have three
>> even though it's technically possible to have four.  I have some doubts anyone
>> will actually build four levels of cache and perhaps the most prudent path here
>> is to assume only three levels (and adjust the previous patch) until someone 
>> actually produces a system with four levels instead of a lot of code that is
>> never actually exercised?
> I am not right person to say L4 cache is not implemented in Neoverse
> platforms; my guess for a "System cache" data source might be L3 or
> L4 and it is a implementation dependent.  Maybe German or Arm mates
> could confirm for this.

I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
(specifically the LL_CACHE_RD pmu events). If we were to assign a number
to the system cache (assuming all caches are implemented):

*For N1*, if L2 and L3 are implemented, system cache would follow at *L4*

*For V1 and N2*, if L2 is implemented, system cache would follow at *L3*
(these don't seem to have the same/similar per-cluster L3 cache from the N1)

There's also room in the PERF_MEM_LVLNUM_* namespace for a SYSTEM value,
if we want to consider that option as well.

[1] https://developer.arm.com/documentation/100616/0401/?lang=en
[2] https://developer.arm.com/documentation/101427/0101/?lang=en
[3] https://developer.arm.com/documentation/102099/0001/?lang=en

>
> Thanks,
> Leo

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-14 18:00             ` German Gomez
  0 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-14 18:00 UTC (permalink / raw)
  To: Leo Yan, Ali Saidi
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, james.clark,
	john.garry, jolsa, kjain, lihuafei1, linux-arm-kernel,
	linux-kernel, linux-perf-users, mark.rutland, mathieu.poirier,
	mingo, namhyung, peterz, will, yao.jin, Nick.Forrington

Hi Leo, Ali

On 14/03/2022 06:33, Leo Yan wrote:
> On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:
>
> [...]
>
>>>>> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
>>>> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
>>>>
>>>> Maybe we need to distinguish the Neoverse case from the generic one here as well
>>>>
>>>> if (is_neoverse)
>>>> // treat L4 as llc
>>>> else
>>>> // treat L3 as llc
>>> I personally think it's not good idea to distinguish platforms in the decoding code.
>> I agree here. The more we talk about this, the more I'm wondering if we're
>> spending too much code solving a problem that doesn't exist. I know of no
>> Neoverse systems that actually have 4 cache levels, they all actually have three
>> even though it's technically possible to have four.  I have some doubts anyone
>> will actually build four levels of cache and perhaps the most prudent path here
>> is to assume only three levels (and adjust the previous patch) until someone 
>> actually produces a system with four levels instead of a lot of code that is
>> never actually exercised?
> I am not right person to say L4 cache is not implemented in Neoverse
> platforms; my guess for a "System cache" data source might be L3 or
> L4 and it is a implementation dependent.  Maybe German or Arm mates
> could confirm for this.

I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
(specifically the LL_CACHE_RD pmu events). If we were to assign a number
to the system cache (assuming all caches are implemented):

*For N1*, if L2 and L3 are implemented, system cache would follow at *L4*

*For V1 and N2*, if L2 is implemented, system cache would follow at *L3*
(these don't seem to have the same/similar per-cluster L3 cache from the N1)

There's also room in the PERF_MEM_LVLNUM_* namespace for a SYSTEM value,
if we want to consider that option as well.

[1] https://developer.arm.com/documentation/100616/0401/?lang=en
[2] https://developer.arm.com/documentation/101427/0101/?lang=en
[3] https://developer.arm.com/documentation/102099/0001/?lang=en

>
> Thanks,
> Leo

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-14 18:00             ` German Gomez
@ 2022-03-14 18:37               ` Ali Saidi
  -1 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-03-14 18:37 UTC (permalink / raw)
  To: german.gomez
  Cc: Nick.Forrington, acme, alexander.shishkin, alisaidi,
	andrew.kilroy, benh, james.clark, john.garry, jolsa, kjain,
	leo.yan, lihuafei1, linux-arm-kernel, linux-kernel,
	linux-perf-users, mark.rutland, mathieu.poirier, mingo, namhyung,
	peterz, will, yao.jin

Hi German and Leo,

On   Mon, 14 Mar 2022 18:00:13 +0000, German Gomez wrote:
> Hi Leo, Ali
> 
> On 14/03/2022 06:33, Leo Yan wrote:
> > On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:
> >
> > [...]
> >
> >>>>> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
> >>>> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
> >>>>
> >>>> Maybe we need to distinguish the Neoverse case from the generic one here as well
> >>>>
> >>>> if (is_neoverse)
> >>>> // treat L4 as llc
> >>>> else
> >>>> // treat L3 as llc
> >>> I personally think it's not good idea to distinguish platforms in the decoding code.
> >> I agree here. The more we talk about this, the more I'm wondering if we're
> >> spending too much code solving a problem that doesn't exist. I know of no
> >> Neoverse systems that actually have 4 cache levels, they all actually have three
> >> even though it's technically possible to have four.  I have some doubts anyone
> >> will actually build four levels of cache and perhaps the most prudent path here
> >> is to assume only three levels (and adjust the previous patch) until someone 
> >> actually produces a system with four levels instead of a lot of code that is
> >> never actually exercised?
> > I am not right person to say L4 cache is not implemented in Neoverse
> > platforms; my guess for a "System cache" data source might be L3 or
> > L4 and it is a implementation dependent.  Maybe German or Arm mates
> > could confirm for this.
> 
> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
> to the system cache (assuming all caches are implemented):
> 
> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
To date no one has built 4 level though. Everyone has only built three.

> *For V1 and N2*, if L2 is implemented, system cache would follow at *L3*
> (these don't seem to have the same/similar per-cluster L3 cache from the N1)

And in the future they're not able to build >3. German and Leo if there aren't
strong objections I think the best path forward is for me to respin these
assuming only 3 levels and if someone builds 4 in a far-off-future we can always
change the implementation then. Agreed?

Thanks,
Ali


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-14 18:37               ` Ali Saidi
  0 siblings, 0 replies; 32+ messages in thread
From: Ali Saidi @ 2022-03-14 18:37 UTC (permalink / raw)
  To: german.gomez
  Cc: Nick.Forrington, acme, alexander.shishkin, alisaidi,
	andrew.kilroy, benh, james.clark, john.garry, jolsa, kjain,
	leo.yan, lihuafei1, linux-arm-kernel, linux-kernel,
	linux-perf-users, mark.rutland, mathieu.poirier, mingo, namhyung,
	peterz, will, yao.jin

Hi German and Leo,

On   Mon, 14 Mar 2022 18:00:13 +0000, German Gomez wrote:
> Hi Leo, Ali
> 
> On 14/03/2022 06:33, Leo Yan wrote:
> > On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:
> >
> > [...]
> >
> >>>>> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
> >>>> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
> >>>>
> >>>> Maybe we need to distinguish the Neoverse case from the generic one here as well
> >>>>
> >>>> if (is_neoverse)
> >>>> // treat L4 as llc
> >>>> else
> >>>> // treat L3 as llc
> >>> I personally think it's not good idea to distinguish platforms in the decoding code.
> >> I agree here. The more we talk about this, the more I'm wondering if we're
> >> spending too much code solving a problem that doesn't exist. I know of no
> >> Neoverse systems that actually have 4 cache levels, they all actually have three
> >> even though it's technically possible to have four.  I have some doubts anyone
> >> will actually build four levels of cache and perhaps the most prudent path here
> >> is to assume only three levels (and adjust the previous patch) until someone 
> >> actually produces a system with four levels instead of a lot of code that is
> >> never actually exercised?
> > I am not right person to say L4 cache is not implemented in Neoverse
> > platforms; my guess for a "System cache" data source might be L3 or
> > L4 and it is a implementation dependent.  Maybe German or Arm mates
> > could confirm for this.
> 
> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
> to the system cache (assuming all caches are implemented):
> 
> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
To date no one has built 4 level though. Everyone has only built three.

> *For V1 and N2*, if L2 is implemented, system cache would follow at *L3*
> (these don't seem to have the same/similar per-cluster L3 cache from the N1)

And in the future they're not able to build >3. German and Leo if there aren't
strong objections I think the best path forward is for me to respin these
assuming only 3 levels and if someone builds 4 in a far-off-future we can always
change the implementation then. Agreed?

Thanks,
Ali


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-14 18:37               ` Ali Saidi
@ 2022-03-15 18:44                 ` German Gomez
  -1 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-15 18:44 UTC (permalink / raw)
  To: Ali Saidi
  Cc: Nick.Forrington, acme, alexander.shishkin, andrew.kilroy, benh,
	james.clark, john.garry, jolsa, kjain, leo.yan, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin


On 14/03/2022 18:37, Ali Saidi wrote:
> Hi German and Leo,
>
> On   Mon, 14 Mar 2022 18:00:13 +0000, German Gomez wrote:
>> Hi Leo, Ali
>>
>> On 14/03/2022 06:33, Leo Yan wrote:
>>> On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:
>>>
>>> [...]
>>>
>>>>>>> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
>>>>>> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
>>>>>>
>>>>>> Maybe we need to distinguish the Neoverse case from the generic one here as well
>>>>>>
>>>>>> if (is_neoverse)
>>>>>> // treat L4 as llc
>>>>>> else
>>>>>> // treat L3 as llc
>>>>> I personally think it's not good idea to distinguish platforms in the decoding code.
>>>> I agree here. The more we talk about this, the more I'm wondering if we're
>>>> spending too much code solving a problem that doesn't exist. I know of no
>>>> Neoverse systems that actually have 4 cache levels, they all actually have three
>>>> even though it's technically possible to have four.  I have some doubts anyone
>>>> will actually build four levels of cache and perhaps the most prudent path here
>>>> is to assume only three levels (and adjust the previous patch) until someone 
>>>> actually produces a system with four levels instead of a lot of code that is
>>>> never actually exercised?
>>> I am not right person to say L4 cache is not implemented in Neoverse
>>> platforms; my guess for a "System cache" data source might be L3 or
>>> L4 and it is a implementation dependent.  Maybe German or Arm mates
>>> could confirm for this.
>> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
>> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
>> to the system cache (assuming all caches are implemented):
>>
>> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
> To date no one has built 4 level though. Everyone has only built three.

The N1SDP board advertises 4 levels (we use it regularly for testing perf patches)

| $ cat /sys/devices/system/cpu/cpu0/cache/index4/{level,shared_cpu_list}
| 4
| 0-3

Would it be a good idea to obtain the system cache level# from sysfs?

>> *For V1 and N2*, if L2 is implemented, system cache would follow at *L3*
>> (these don't seem to have the same/similar per-cluster L3 cache from the N1)
> And in the future they're not able to build >3. German and Leo if there aren't
> strong objections I think the best path forward is for me to respin these
> assuming only 3 levels and if someone builds 4 in a far-off-future we can always
> change the implementation then. Agreed?
>
> Thanks,
> Ali
>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-15 18:44                 ` German Gomez
  0 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-15 18:44 UTC (permalink / raw)
  To: Ali Saidi
  Cc: Nick.Forrington, acme, alexander.shishkin, andrew.kilroy, benh,
	james.clark, john.garry, jolsa, kjain, leo.yan, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin


On 14/03/2022 18:37, Ali Saidi wrote:
> Hi German and Leo,
>
> On   Mon, 14 Mar 2022 18:00:13 +0000, German Gomez wrote:
>> Hi Leo, Ali
>>
>> On 14/03/2022 06:33, Leo Yan wrote:
>>> On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:
>>>
>>> [...]
>>>
>>>>>>> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
>>>>>> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
>>>>>>
>>>>>> Maybe we need to distinguish the Neoverse case from the generic one here as well
>>>>>>
>>>>>> if (is_neoverse)
>>>>>> // treat L4 as llc
>>>>>> else
>>>>>> // treat L3 as llc
>>>>> I personally think it's not good idea to distinguish platforms in the decoding code.
>>>> I agree here. The more we talk about this, the more I'm wondering if we're
>>>> spending too much code solving a problem that doesn't exist. I know of no
>>>> Neoverse systems that actually have 4 cache levels, they all actually have three
>>>> even though it's technically possible to have four.  I have some doubts anyone
>>>> will actually build four levels of cache and perhaps the most prudent path here
>>>> is to assume only three levels (and adjust the previous patch) until someone 
>>>> actually produces a system with four levels instead of a lot of code that is
>>>> never actually exercised?
>>> I am not right person to say L4 cache is not implemented in Neoverse
>>> platforms; my guess for a "System cache" data source might be L3 or
>>> L4 and it is a implementation dependent.  Maybe German or Arm mates
>>> could confirm for this.
>> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
>> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
>> to the system cache (assuming all caches are implemented):
>>
>> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
> To date no one has built 4 level though. Everyone has only built three.

The N1SDP board advertises 4 levels (we use it regularly for testing perf patches)

| $ cat /sys/devices/system/cpu/cpu0/cache/index4/{level,shared_cpu_list}
| 4
| 0-3

Would it be a good idea to obtain the system cache level# from sysfs?

>> *For V1 and N2*, if L2 is implemented, system cache would follow at *L3*
>> (these don't seem to have the same/similar per-cluster L3 cache from the N1)
> And in the future they're not able to build >3. German and Leo if there aren't
> strong objections I think the best path forward is for me to respin these
> assuming only 3 levels and if someone builds 4 in a far-off-future we can always
> change the implementation then. Agreed?
>
> Thanks,
> Ali
>

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-15 18:44                 ` German Gomez
@ 2022-03-16 11:43                   ` German Gomez
  -1 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-16 11:43 UTC (permalink / raw)
  To: Ali Saidi, leo.yan
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, james.clark,
	john.garry, jolsa, kjain, lihuafei1, linux-arm-kernel,
	linux-kernel, linux-perf-users, mark.rutland, mathieu.poirier,
	mingo, namhyung, peterz, will, yao.jin, Nick.Forrington


On 15/03/2022 18:44, German Gomez wrote:
> On 14/03/2022 18:37, Ali Saidi wrote:
>> Hi German and Leo,
>>
>> On   Mon, 14 Mar 2022 18:00:13 +0000, German Gomez wrote:
>>> Hi Leo, Ali
>>>
>>> On 14/03/2022 06:33, Leo Yan wrote:
>>>> On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:
>>>>
>>>> [...]
>>>>
>>>>>>>> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
>>>>>>> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
>>>>>>>
>>>>>>> Maybe we need to distinguish the Neoverse case from the generic one here as well
>>>>>>>
>>>>>>> if (is_neoverse)
>>>>>>> // treat L4 as llc
>>>>>>> else
>>>>>>> // treat L3 as llc
>>>>>> I personally think it's not good idea to distinguish platforms in the decoding code.
>>>>> I agree here. The more we talk about this, the more I'm wondering if we're
>>>>> spending too much code solving a problem that doesn't exist. I know of no
>>>>> Neoverse systems that actually have 4 cache levels, they all actually have three
>>>>> even though it's technically possible to have four.  I have some doubts anyone
>>>>> will actually build four levels of cache and perhaps the most prudent path here
>>>>> is to assume only three levels (and adjust the previous patch) until someone 
>>>>> actually produces a system with four levels instead of a lot of code that is
>>>>> never actually exercised?
>>>> I am not right person to say L4 cache is not implemented in Neoverse
>>>> platforms; my guess for a "System cache" data source might be L3 or
>>>> L4 and it is a implementation dependent.  Maybe German or Arm mates
>>>> could confirm for this.
>>> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
>>> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
>>> to the system cache (assuming all caches are implemented):
>>>
>>> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
>> To date no one has built 4 level though. Everyone has only built three.
> The N1SDP board advertises 4 levels (we use it regularly for testing perf patches)

That said, it's probably the odd one out.

I'm not against assuming 3 levels. Later if there's is a strong need for L4, indeed we can go back and change it.

Thanks,
German

>
> | $ cat /sys/devices/system/cpu/cpu0/cache/index4/{level,shared_cpu_list}
> | 4
> | 0-3
>
> Would it be a good idea to obtain the system cache level# from sysfs?
>
>>> *For V1 and N2*, if L2 is implemented, system cache would follow at *L3*
>>> (these don't seem to have the same/similar per-cluster L3 cache from the N1)
>> And in the future they're not able to build >3. German and Leo if there aren't
>> strong objections I think the best path forward is for me to respin these
>> assuming only 3 levels and if someone builds 4 in a far-off-future we can always
>> change the implementation then. Agreed?
>>
>> Thanks,
>> Ali
>>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-16 11:43                   ` German Gomez
  0 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-16 11:43 UTC (permalink / raw)
  To: Ali Saidi, leo.yan
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, james.clark,
	john.garry, jolsa, kjain, lihuafei1, linux-arm-kernel,
	linux-kernel, linux-perf-users, mark.rutland, mathieu.poirier,
	mingo, namhyung, peterz, will, yao.jin, Nick.Forrington


On 15/03/2022 18:44, German Gomez wrote:
> On 14/03/2022 18:37, Ali Saidi wrote:
>> Hi German and Leo,
>>
>> On   Mon, 14 Mar 2022 18:00:13 +0000, German Gomez wrote:
>>> Hi Leo, Ali
>>>
>>> On 14/03/2022 06:33, Leo Yan wrote:
>>>> On Sun, Mar 13, 2022 at 07:19:33PM +0000, Ali Saidi wrote:
>>>>
>>>> [...]
>>>>
>>>>>>>> +			if (lvl & P(LVL, L3) || lnum == P(LVLNUM, L4)) {
>>>>>>> According to a comment in the previous patch, using L4 is specific to Neoverse, right?
>>>>>>>
>>>>>>> Maybe we need to distinguish the Neoverse case from the generic one here as well
>>>>>>>
>>>>>>> if (is_neoverse)
>>>>>>> // treat L4 as llc
>>>>>>> else
>>>>>>> // treat L3 as llc
>>>>>> I personally think it's not good idea to distinguish platforms in the decoding code.
>>>>> I agree here. The more we talk about this, the more I'm wondering if we're
>>>>> spending too much code solving a problem that doesn't exist. I know of no
>>>>> Neoverse systems that actually have 4 cache levels, they all actually have three
>>>>> even though it's technically possible to have four.  I have some doubts anyone
>>>>> will actually build four levels of cache and perhaps the most prudent path here
>>>>> is to assume only three levels (and adjust the previous patch) until someone 
>>>>> actually produces a system with four levels instead of a lot of code that is
>>>>> never actually exercised?
>>>> I am not right person to say L4 cache is not implemented in Neoverse
>>>> platforms; my guess for a "System cache" data source might be L3 or
>>>> L4 and it is a implementation dependent.  Maybe German or Arm mates
>>>> could confirm for this.
>>> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
>>> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
>>> to the system cache (assuming all caches are implemented):
>>>
>>> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
>> To date no one has built 4 level though. Everyone has only built three.
> The N1SDP board advertises 4 levels (we use it regularly for testing perf patches)

That said, it's probably the odd one out.

I'm not against assuming 3 levels. Later if there's is a strong need for L4, indeed we can go back and change it.

Thanks,
German

>
> | $ cat /sys/devices/system/cpu/cpu0/cache/index4/{level,shared_cpu_list}
> | 4
> | 0-3
>
> Would it be a good idea to obtain the system cache level# from sysfs?
>
>>> *For V1 and N2*, if L2 is implemented, system cache would follow at *L3*
>>> (these don't seem to have the same/similar per-cluster L3 cache from the N1)
>> And in the future they're not able to build >3. German and Leo if there aren't
>> strong objections I think the best path forward is for me to respin these
>> assuming only 3 levels and if someone builds 4 in a far-off-future we can always
>> change the implementation then. Agreed?
>>
>> Thanks,
>> Ali
>>

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-16 11:43                   ` German Gomez
@ 2022-03-16 12:42                     ` Leo Yan
  -1 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-16 12:42 UTC (permalink / raw)
  To: German Gomez
  Cc: Ali Saidi, acme, alexander.shishkin, andrew.kilroy, benh,
	james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin,
	Nick.Forrington

On Wed, Mar 16, 2022 at 11:43:52AM +0000, German Gomez wrote:

[...]

> >>> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
> >>> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
> >>> to the system cache (assuming all caches are implemented):
> >>>
> >>> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
> >> To date no one has built 4 level though. Everyone has only built three.
> > The N1SDP board advertises 4 levels (we use it regularly for testing perf patches)
> 
> That said, it's probably the odd one out.
> 
> I'm not against assuming 3 levels. Later if there's is a strong need for L4, indeed we can go back and change it.

Thanks for the info.

For exploring cache hierarchy via sysFS is a good idea, the only one
concern for me is: can we simply take the system cache as the same
thing as the highest level cache?  If so, I think another option is to
define a cache level as "PERF_MEM_LVLNUM_SYSTEM_CACHE" and extend the
decoding code for support it.

With PERF_MEM_LVLNUM_SYSTEM_CACHE, it can tell users clearly the data
source from system cache, and users can easily map this info with the
cache media on the working platform.

In practice, I don't object to use cache level 3 at first step.  At
least this can meet the requirement at current stage.

Thanks,
Leo

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-16 12:42                     ` Leo Yan
  0 siblings, 0 replies; 32+ messages in thread
From: Leo Yan @ 2022-03-16 12:42 UTC (permalink / raw)
  To: German Gomez
  Cc: Ali Saidi, acme, alexander.shishkin, andrew.kilroy, benh,
	james.clark, john.garry, jolsa, kjain, lihuafei1,
	linux-arm-kernel, linux-kernel, linux-perf-users, mark.rutland,
	mathieu.poirier, mingo, namhyung, peterz, will, yao.jin,
	Nick.Forrington

On Wed, Mar 16, 2022 at 11:43:52AM +0000, German Gomez wrote:

[...]

> >>> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
> >>> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
> >>> to the system cache (assuming all caches are implemented):
> >>>
> >>> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
> >> To date no one has built 4 level though. Everyone has only built three.
> > The N1SDP board advertises 4 levels (we use it regularly for testing perf patches)
> 
> That said, it's probably the odd one out.
> 
> I'm not against assuming 3 levels. Later if there's is a strong need for L4, indeed we can go back and change it.

Thanks for the info.

For exploring cache hierarchy via sysFS is a good idea, the only one
concern for me is: can we simply take the system cache as the same
thing as the highest level cache?  If so, I think another option is to
define a cache level as "PERF_MEM_LVLNUM_SYSTEM_CACHE" and extend the
decoding code for support it.

With PERF_MEM_LVLNUM_SYSTEM_CACHE, it can tell users clearly the data
source from system cache, and users can easily map this info with the
cache media on the working platform.

In practice, I don't object to use cache level 3 at first step.  At
least this can meet the requirement at current stage.

Thanks,
Leo

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
  2022-03-16 12:42                     ` Leo Yan
@ 2022-03-16 15:10                       ` German Gomez
  -1 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-16 15:10 UTC (permalink / raw)
  To: Leo Yan, Ali Saidi
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, james.clark,
	john.garry, jolsa, kjain, lihuafei1, linux-arm-kernel,
	linux-kernel, linux-perf-users, mark.rutland, mathieu.poirier,
	mingo, namhyung, peterz, will, yao.jin, Nick.Forrington


On 16/03/2022 12:42, Leo Yan wrote:
> On Wed, Mar 16, 2022 at 11:43:52AM +0000, German Gomez wrote:
>
> [...]
>
>>>>> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
>>>>> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
>>>>> to the system cache (assuming all caches are implemented):
>>>>>
>>>>> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
>>>> To date no one has built 4 level though. Everyone has only built three.
>>> The N1SDP board advertises 4 levels (we use it regularly for testing perf patches)
>> That said, it's probably the odd one out.
>>
>> I'm not against assuming 3 levels. Later if there's is a strong need for L4, indeed we can go back and change it.
> Thanks for the info.
>
> For exploring cache hierarchy via sysFS is a good idea, the only one
> concern for me is: can we simply take the system cache as the same
> thing as the highest level cache?  If so, I think another option is to

For Neoverse, it should be. LL_CACHE_RD pmu event says (if system cache is implemented):

* If CPUECTLR.EXTLLC is set: This event counts any cacheable read transaction which returns a data source of 'interconnect cache'.

> define a cache level as "PERF_MEM_LVLNUM_SYSTEM_CACHE" and extend the
> decoding code for support it.
>
> With PERF_MEM_LVLNUM_SYSTEM_CACHE, it can tell users clearly the data
> source from system cache, and users can easily map this info with the
> cache media on the working platform.
>
> In practice, I don't object to use cache level 3 at first step.  At
> least this can meet the requirement at current stage.

Ok, I agree. I think for now it is a good compromise.
Detecting the caches seems like an additional/separate perf feature.

Thanks,
German

> Thanks,
> Leo

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used
@ 2022-03-16 15:10                       ` German Gomez
  0 siblings, 0 replies; 32+ messages in thread
From: German Gomez @ 2022-03-16 15:10 UTC (permalink / raw)
  To: Leo Yan, Ali Saidi
  Cc: acme, alexander.shishkin, andrew.kilroy, benh, james.clark,
	john.garry, jolsa, kjain, lihuafei1, linux-arm-kernel,
	linux-kernel, linux-perf-users, mark.rutland, mathieu.poirier,
	mingo, namhyung, peterz, will, yao.jin, Nick.Forrington


On 16/03/2022 12:42, Leo Yan wrote:
> On Wed, Mar 16, 2022 at 11:43:52AM +0000, German Gomez wrote:
>
> [...]
>
>>>>> I had a look at the TRMs for the N1[1], V1[2] and N2[3] Neoverse cores
>>>>> (specifically the LL_CACHE_RD pmu events). If we were to assign a number
>>>>> to the system cache (assuming all caches are implemented):
>>>>>
>>>>> *For N1*, if L2 and L3 are implemented, system cache would follow at *L4*
>>>> To date no one has built 4 level though. Everyone has only built three.
>>> The N1SDP board advertises 4 levels (we use it regularly for testing perf patches)
>> That said, it's probably the odd one out.
>>
>> I'm not against assuming 3 levels. Later if there's is a strong need for L4, indeed we can go back and change it.
> Thanks for the info.
>
> For exploring cache hierarchy via sysFS is a good idea, the only one
> concern for me is: can we simply take the system cache as the same
> thing as the highest level cache?  If so, I think another option is to

For Neoverse, it should be. LL_CACHE_RD pmu event says (if system cache is implemented):

* If CPUECTLR.EXTLLC is set: This event counts any cacheable read transaction which returns a data source of 'interconnect cache'.

> define a cache level as "PERF_MEM_LVLNUM_SYSTEM_CACHE" and extend the
> decoding code for support it.
>
> With PERF_MEM_LVLNUM_SYSTEM_CACHE, it can tell users clearly the data
> source from system cache, and users can easily map this info with the
> cache media on the working platform.
>
> In practice, I don't object to use cache level 3 at first step.  At
> least this can meet the requirement at current stage.

Ok, I agree. I think for now it is a good compromise.
Detecting the caches seems like an additional/separate perf feature.

Thanks,
German

> Thanks,
> Leo

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2022-03-16 15:14 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-21 22:47 [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores Ali Saidi
2022-02-21 22:47 ` Ali Saidi
2022-02-21 22:48 ` [PATCH v2 2/2] perf mem: Support HITM for when mem_lvl_num is used Ali Saidi
2022-02-21 22:48   ` Ali Saidi
2022-03-02 15:39   ` German Gomez
2022-03-02 15:39     ` German Gomez
2022-03-13 12:44     ` Leo Yan
2022-03-13 12:44       ` Leo Yan
2022-03-13 19:19       ` Ali Saidi
2022-03-13 19:19         ` Ali Saidi
2022-03-14  6:33         ` Leo Yan
2022-03-14  6:33           ` Leo Yan
2022-03-14 18:00           ` German Gomez
2022-03-14 18:00             ` German Gomez
2022-03-14 18:37             ` Ali Saidi
2022-03-14 18:37               ` Ali Saidi
2022-03-15 18:44               ` German Gomez
2022-03-15 18:44                 ` German Gomez
2022-03-16 11:43                 ` German Gomez
2022-03-16 11:43                   ` German Gomez
2022-03-16 12:42                   ` Leo Yan
2022-03-16 12:42                     ` Leo Yan
2022-03-16 15:10                     ` German Gomez
2022-03-16 15:10                       ` German Gomez
2022-03-02 11:59 ` [PATCH v2 1/2] perf arm-spe: Use SPE data source for neoverse cores German Gomez
2022-03-02 11:59   ` German Gomez
2022-03-13 11:46   ` Leo Yan
2022-03-13 11:46     ` Leo Yan
2022-03-13 19:06     ` Ali Saidi
2022-03-13 19:06       ` Ali Saidi
2022-03-14  4:05       ` Leo Yan
2022-03-14  4:05         ` Leo Yan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.