linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: kan.liang@linux.intel.com
To: peterz@infradead.org, acme@kernel.org, mingo@redhat.com,
	linux-kernel@vger.kernel.org
Cc: tglx@linutronix.de, jolsa@kernel.org, eranian@google.com,
	alexander.shishkin@linux.intel.com, ak@linux.intel.com,
	Kan Liang <kan.liang@linux.intel.com>
Subject: [PATCH V4 02/23] perf/x86/intel: Extract memory code PEBS parser for reuse
Date: Tue, 26 Mar 2019 09:08:40 -0700	[thread overview]
Message-ID: <20190326160901.4887-3-kan.liang@linux.intel.com> (raw)
In-Reply-To: <20190326160901.4887-1-kan.liang@linux.intel.com>

From: Andi Kleen <ak@linux.intel.com>

Extract some code related to memory profiling from the PEBS record
parser into separate functions. It can be reused by the upcoming
adaptive PEBS parser. No functional changes.
Rename intel_hsw_weight to intel_get_tsx_weight, and
intel_hsw_transaction to intel_get_tsx_transaction. Because the input is
not the hsw pebs format anymore.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---

No changes since V3.

 arch/x86/events/intel/ds.c | 63 ++++++++++++++++++++------------------
 1 file changed, 34 insertions(+), 29 deletions(-)

diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 10c99ce1fead..c02cd19fe640 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1125,34 +1125,50 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
 	return 0;
 }
 
-static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
 {
-	if (pebs->tsx_tuning) {
-		union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
+	if (tsx_tuning) {
+		union hsw_tsx_tuning tsx = { .value = tsx_tuning };
 		return tsx.cycles_last_block;
 	}
 	return 0;
 }
 
-static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
 {
-	u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
+	u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
 
 	/* For RTM XABORTs also log the abort code from AX */
-	if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
-		txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
+	if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
+		txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
 	return txn;
 }
 
+#define PERF_X86_EVENT_PEBS_HSW_PREC \
+		(PERF_X86_EVENT_PEBS_ST_HSW | \
+		 PERF_X86_EVENT_PEBS_LD_HSW | \
+		 PERF_X86_EVENT_PEBS_NA_HSW)
+
+static u64 get_data_src(struct perf_event *event, u64 aux)
+{
+	u64 val = PERF_MEM_NA;
+	int fl = event->hw.flags;
+	bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+
+	if (fl & PERF_X86_EVENT_PEBS_LDLAT)
+		val = load_latency_data(aux);
+	else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
+		val = precise_datala_hsw(event, aux);
+	else if (fst)
+		val = precise_store_data(aux);
+	return val;
+}
+
 static void setup_pebs_sample_data(struct perf_event *event,
 				   struct pt_regs *iregs, void *__pebs,
 				   struct perf_sample_data *data,
 				   struct pt_regs *regs)
 {
-#define PERF_X86_EVENT_PEBS_HSW_PREC \
-		(PERF_X86_EVENT_PEBS_ST_HSW | \
-		 PERF_X86_EVENT_PEBS_LD_HSW | \
-		 PERF_X86_EVENT_PEBS_NA_HSW)
 	/*
 	 * We cast to the biggest pebs_record but are careful not to
 	 * unconditionally access the 'extra' entries.
@@ -1160,17 +1176,13 @@ static void setup_pebs_sample_data(struct perf_event *event,
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	struct pebs_record_skl *pebs = __pebs;
 	u64 sample_type;
-	int fll, fst, dsrc;
-	int fl = event->hw.flags;
+	int fll;
 
 	if (pebs == NULL)
 		return;
 
 	sample_type = event->attr.sample_type;
-	dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
-
-	fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
-	fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+	fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
 
 	perf_sample_data_init(data, 0, event->hw.last_period);
 
@@ -1185,16 +1197,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
 	/*
 	 * data.data_src encodes the data source
 	 */
-	if (dsrc) {
-		u64 val = PERF_MEM_NA;
-		if (fll)
-			val = load_latency_data(pebs->dse);
-		else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
-			val = precise_datala_hsw(event, pebs->dse);
-		else if (fst)
-			val = precise_store_data(pebs->dse);
-		data->data_src.val = val;
-	}
+	if (sample_type & PERF_SAMPLE_DATA_SRC)
+		data->data_src.val = get_data_src(event, pebs->dse);
 
 	/*
 	 * We must however always use iregs for the unwinder to stay sane; the
@@ -1281,10 +1285,11 @@ static void setup_pebs_sample_data(struct perf_event *event,
 	if (x86_pmu.intel_cap.pebs_format >= 2) {
 		/* Only set the TSX weight when no memory weight. */
 		if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
-			data->weight = intel_hsw_weight(pebs);
+			data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
 
 		if (sample_type & PERF_SAMPLE_TRANSACTION)
-			data->txn = intel_hsw_transaction(pebs);
+			data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
+							      pebs->ax);
 	}
 
 	/*
-- 
2.17.1


  parent reply	other threads:[~2019-03-26 16:13 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-26 16:08 [PATCH V4 00/23] perf: Add Icelake support kan.liang
2019-03-26 16:08 ` [PATCH V4 01/23] perf/x86: Support outputting XMM registers kan.liang
2019-04-01 19:18   ` Stephane Eranian
2019-04-01 19:54     ` Liang, Kan
2019-04-01 21:11       ` Stephane Eranian
2019-04-01 22:33         ` Liang, Kan
2019-03-26 16:08 ` kan.liang [this message]
2019-03-26 16:08 ` [PATCH V4 03/23] perf/x86/intel/ds: Extract code of event update in short period kan.liang
2019-03-26 16:08 ` [PATCH V4 04/23] perf/x86/intel: Support adaptive PEBSv4 kan.liang
2019-03-26 22:24   ` Andi Kleen
2019-03-27 14:25     ` Liang, Kan
2019-03-27 14:42       ` Andi Kleen
2019-03-26 16:08 ` [PATCH V4 05/23] perf/x86/lbr: Avoid reading the LBRs when adaptive PEBS handles them kan.liang
2019-03-26 16:08 ` [PATCH V4 06/23] perf/x86: Support constraint ranges kan.liang
2019-03-26 16:08 ` [PATCH V4 07/23] perf/x86/intel: Add Icelake support kan.liang
2019-03-26 16:08 ` [PATCH V4 08/23] perf/x86/intel/cstate: " kan.liang
2019-03-26 16:08 ` [PATCH V4 09/23] perf/x86/intel/rapl: " kan.liang
2019-03-26 16:08 ` [PATCH V4 10/23] perf/x86/msr: " kan.liang
2019-03-26 16:08 ` [PATCH V4 11/23] perf/x86/intel/uncore: Add Intel Icelake uncore support kan.liang
2019-03-26 16:08 ` [PATCH V4 12/23] perf/core: Support a REMOVE transaction kan.liang
2019-03-26 16:08 ` [PATCH V4 13/23] perf/x86/intel: Basic support for metrics counters kan.liang
2019-03-26 16:08 ` [PATCH V4 14/23] perf/x86/intel: Support overflows on SLOTS kan.liang
2019-03-26 16:08 ` [PATCH V4 15/23] perf/x86/intel: Support hardware TopDown metrics kan.liang
2019-03-26 16:08 ` [PATCH V4 16/23] perf/x86/intel: Set correct weight for topdown subevent counters kan.liang
2019-03-26 16:08 ` [PATCH V4 17/23] perf/x86/intel: Export new top down events for Icelake kan.liang
2019-03-26 16:08 ` [PATCH V4 18/23] perf/x86/intel: Disable sampling read slots and topdown kan.liang
2019-03-26 16:08 ` [PATCH V4 19/23] perf/x86/intel: Support CPUID 10.ECX to disable fixed counters kan.liang
2019-03-26 16:08 ` [PATCH V4 20/23] perf, tools: Add support for recording and printing XMM registers kan.liang
2019-03-26 16:08 ` [PATCH V4 21/23] perf, tools, stat: Support new per thread TopDown metrics kan.liang
2019-03-26 16:09 ` [PATCH V4 22/23] perf, tools: Add documentation for topdown metrics kan.liang
2019-03-26 16:09 ` [PATCH V4 23/23] perf vendor events intel: Add JSON files for Icelake kan.liang
2019-04-01 13:01 ` [PATCH V4 00/23] perf: Add Icelake support Liang, Kan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190326160901.4887-3-kan.liang@linux.intel.com \
    --to=kan.liang@linux.intel.com \
    --cc=acme@kernel.org \
    --cc=ak@linux.intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=eranian@google.com \
    --cc=jolsa@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).