All of lore.kernel.org
 help / color / mirror / Atom feed
From: Namhyung Kim <namhyung@kernel.org>
To: Peter Zijlstra <peterz@infradead.org>, Ingo Molnar <mingo@kernel.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Stephane Eranian <eranian@google.com>,
	Kan Liang <kan.liang@linux.intel.com>,
	LKML <linux-kernel@vger.kernel.org>
Subject: [PATCH] perf/x86/ibs: Set data_src.mem_lvl_num as well
Date: Fri, 10 Mar 2023 16:06:42 -0800	[thread overview]
Message-ID: <20230311000642.1270971-1-namhyung@kernel.org> (raw)

The IBS PMU driver sets data source of memory operations but it missed
mem_lvl_num field.  So I'm adding them here.

Most cases are straight-forward but please check if MEM_LVLNUM_ANY_CACHE
for peer cache hits is ok.  Also I wonder we can add MEM_REMOTE_REMOTE
for peer cache hits in a far CCX.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 arch/x86/events/amd/ibs.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 64582954b5f6..819869fc2dc7 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -724,6 +724,7 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
 	/* L1 Hit */
 	if (op_data3->dc_miss == 0) {
 		data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
 		return;
 	}
 
@@ -733,6 +734,7 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
 		if (boot_cpu_data.x86 != 0x19 || boot_cpu_data.x86_model > 0xF ||
 		    !(op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc)) {
 			data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+			data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
 			return;
 		}
 	}
@@ -748,12 +750,14 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
 	if (ibs_caps & IBS_CAPS_ZEN4) {
 		if (ibs_data_src == IBS_DATA_SRC_EXT_LOC_CACHE) {
 			data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+			data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
 			return;
 		}
 	} else {
 		if (ibs_data_src == IBS_DATA_SRC_LOC_CACHE) {
 			data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_REM_CCE1 |
 					    PERF_MEM_LVL_HIT;
+			data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
 			return;
 		}
 	}
@@ -762,6 +766,7 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
 	if (ibs_caps & IBS_CAPS_ZEN4 &&
 	    ibs_data_src == IBS_DATA_SRC_EXT_NEAR_CCX_CACHE) {
 		data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
 		return;
 	}
 
@@ -769,11 +774,13 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
 	if (ibs_caps & IBS_CAPS_ZEN4) {
 		if (ibs_data_src == IBS_DATA_SRC_EXT_FAR_CCX_CACHE) {
 			data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2 | PERF_MEM_LVL_HIT;
+			data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
 			return;
 		}
 	} else {
 		if (ibs_data_src == IBS_DATA_SRC_REM_CACHE) {
 			data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2 | PERF_MEM_LVL_HIT;
+			data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
 			return;
 		}
 	}
@@ -784,6 +791,7 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
 			data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
 		else
 			data_src->mem_lvl = PERF_MEM_LVL_REM_RAM1 | PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
 		return;
 	}
 
@@ -832,10 +840,12 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
 	 */
 	if (op_data3->dc_miss_no_mab_alloc) {
 		data_src->mem_lvl = PERF_MEM_LVL_LFB | PERF_MEM_LVL_HIT;
+		data_src->mem_lvl_num = PERF_MEM_LVLNUM_LFB;
 		return;
 	}
 
 	data_src->mem_lvl = PERF_MEM_LVL_NA;
+	data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
 }
 
 static bool perf_ibs_cache_hit_st_valid(void)
-- 
2.40.0.rc1.284.g88254d51c5-goog


             reply	other threads:[~2023-03-11  0:07 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-11  0:06 Namhyung Kim [this message]
2023-03-21  6:33 ` [PATCH] perf/x86/ibs: Set data_src.mem_lvl_num as well Ravi Bangoria
2023-03-22 15:54   ` Namhyung Kim
2023-03-23 14:11     ` Ravi Bangoria
2023-03-23 14:41       ` Namhyung Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230311000642.1270971-1-namhyung@kernel.org \
    --to=namhyung@kernel.org \
    --cc=acme@kernel.org \
    --cc=eranian@google.com \
    --cc=kan.liang@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=ravi.bangoria@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.