All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vikas Shivappa <vikas.shivappa@linux.intel.com>
To: vikas.shivappa@intel.com, x86@kernel.org,
	linux-kernel@vger.kernel.org, hpa@zytor.com, tglx@linutronix.de
Cc: mingo@kernel.org, peterz@infradead.org, ravi.v.shankar@intel.com,
	tony.luck@intel.com, fenghua.yu@intel.com,
	vikas.shivappa@linux.intel.com
Subject: [PATCH 1/3] perf/x86/cqm,mbm: Store cqm,mbm count for all events when RMID is recycled
Date: Wed, 27 Apr 2016 10:00:54 -0700	[thread overview]
Message-ID: <1461776456-25246-2-git-send-email-vikas.shivappa@linux.intel.com> (raw)
In-Reply-To: <1461776456-25246-1-git-send-email-vikas.shivappa@linux.intel.com>

During RMID recycling, when an event loses the RMID we saved the counter
for group leader but it was not being saved for all the events in an
event group. This would lead to a situation where if 2 perf instances
are counting the same PID one of them would not see the updated count
which other perf instance is seeing. This patch tries to fix the issue
by saving the count for all the events in the same event group.

Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
---
 arch/x86/events/intel/cqm.c | 39 ++++++++++++++++++++++++---------------
 1 file changed, 24 insertions(+), 15 deletions(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 7b5fd81..5f2104a 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -14,6 +14,14 @@
 #define MSR_IA32_QM_EVTSEL	0x0c8d
 
 #define MBM_CNTR_WIDTH		24
+
+#define __init_rr(old_rmid, config, val)	\
+((struct rmid_read) {				\
+	.rmid = old_rmid,			\
+	.evt_type = config,			\
+	.value = ATOMIC64_INIT(val),		\
+})
+
 /*
  * Guaranteed time in ms as per SDM where MBM counters will not overflow.
  */
@@ -478,7 +486,8 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
 {
 	struct perf_event *event;
 	struct list_head *head = &group->hw.cqm_group_entry;
-	u32 old_rmid = group->hw.cqm_rmid;
+	u32 old_rmid = group->hw.cqm_rmid, evttype;
+	struct rmid_read rr;
 
 	lockdep_assert_held(&cache_mutex);
 
@@ -486,14 +495,21 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
 	 * If our RMID is being deallocated, perform a read now.
 	 */
 	if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
-		struct rmid_read rr = {
-			.rmid = old_rmid,
-			.evt_type = group->attr.config,
-			.value = ATOMIC64_INIT(0),
-		};
 
+		rr = __init_rr(old_rmid, group->attr.config, 0);
 		cqm_mask_call(&rr);
 		local64_set(&group->count, atomic64_read(&rr.value));
+		list_for_each_entry(event, head, hw.cqm_group_entry) {
+			if (event->hw.is_group_event) {
+
+				evttype = event->attr.config;
+				rr = __init_rr(old_rmid, evttype, 0);
+
+				cqm_mask_call(&rr);
+				local64_set(&event->count,
+					    atomic64_read(&rr.value));
+			}
+		}
 	}
 
 	raw_spin_lock_irq(&cache_lock);
@@ -983,11 +999,7 @@ static void __intel_mbm_event_init(void *info)
 
 static void init_mbm_sample(u32 rmid, u32 evt_type)
 {
-	struct rmid_read rr = {
-		.rmid = rmid,
-		.evt_type = evt_type,
-		.value = ATOMIC64_INIT(0),
-	};
+	struct rmid_read rr = __init_rr(rmid, evt_type, 0);
 
 	/* on each socket, init sample */
 	on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_init, &rr, 1);
@@ -1181,10 +1193,7 @@ static void mbm_hrtimer_init(void)
 static u64 intel_cqm_event_count(struct perf_event *event)
 {
 	unsigned long flags;
-	struct rmid_read rr = {
-		.evt_type = event->attr.config,
-		.value = ATOMIC64_INIT(0),
-	};
+	struct rmid_read rr = __init_rr(-1, event->attr.config, 0);
 
 	/*
 	 * We only need to worry about task events. System-wide events
-- 
1.9.1

  reply	other threads:[~2016-04-27 17:01 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-27 17:00 [PATCH V2 0/3] Urgent fixes for Intel CQM/MBM counting Vikas Shivappa
2016-04-27 17:00 ` Vikas Shivappa [this message]
2016-04-27 17:00 ` [PATCH 2/3] perf/x86/mbm: Store bytes counted for mbm during recycle Vikas Shivappa
2016-04-27 17:00 ` [PATCH 3/3] perf/x86/mbm: Fix mbm counting when RMIDs are reused Vikas Shivappa
2016-05-06 23:44 [PATCH V2 0/3] Urgent fixes for Intel CQM/MBM counting Vikas Shivappa
2016-05-06 23:44 ` [PATCH 1/3] perf/x86/cqm,mbm: Store cqm,mbm count for all events when RMID is recycled Vikas Shivappa

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1461776456-25246-2-git-send-email-vikas.shivappa@linux.intel.com \
    --to=vikas.shivappa@linux.intel.com \
    --cc=fenghua.yu@intel.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vikas.shivappa@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.