All of lore.kernel.org
 help / color / mirror / Atom feed
From: tip-bot for Matt Fleming <tipbot@zytor.com>
To: linux-tip-commits@vger.kernel.org
Cc: acme@redhat.com, tglx@linutronix.de, acme@kernel.org,
	mingo@kernel.org, peterz@infradead.org,
	linux-kernel@vger.kernel.org, jolsa@redhat.com,
	kanaka.d.juvva@intel.com, vikas.shivappa@linux.intel.com,
	torvalds@linux-foundation.org, matt.fleming@intel.com,
	hpa@zytor.com
Subject: [tip:perf/x86] perf/x86/intel: Implement LRU monitoring ID allocation for CQM
Date: Wed, 25 Feb 2015 20:15:31 -0800	[thread overview]
Message-ID: <tip-35298e554c74b7849875e3676ba8eaf833c7b917@git.kernel.org> (raw)
In-Reply-To: <1422038748-21397-7-git-send-email-matt@codeblueprint.co.uk>

Commit-ID:  35298e554c74b7849875e3676ba8eaf833c7b917
Gitweb:     http://git.kernel.org/tip/35298e554c74b7849875e3676ba8eaf833c7b917
Author:     Matt Fleming <matt.fleming@intel.com>
AuthorDate: Fri, 23 Jan 2015 18:45:45 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Wed, 25 Feb 2015 13:53:33 +0100

perf/x86/intel: Implement LRU monitoring ID allocation for CQM

It's possible to run into issues with re-using unused monitoring IDs
because there may be stale cachelines associated with that ID from a
previous allocation. This can cause the LLC occupancy values to be
inaccurate.

To attempt to mitigate this problem we place the IDs on a least recently
used list, essentially a FIFO. The basic idea is that the longer the
time period between ID re-use the lower the probability that stale
cachelines exist in the cache.

Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kanaka Juvva <kanaka.d.juvva@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Vikas Shivappa <vikas.shivappa@linux.intel.com>
Link: http://lkml.kernel.org/r/1422038748-21397-7-git-send-email-matt@codeblueprint.co.uk
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/kernel/cpu/perf_event_intel_cqm.c | 100 ++++++++++++++++++++++++++---
 1 file changed, 92 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 05b4cd2..b5d9d74 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -25,7 +25,7 @@ struct intel_cqm_state {
 static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state);
 
 /*
- * Protects cache_cgroups.
+ * Protects cache_cgroups and cqm_rmid_lru.
  */
 static DEFINE_MUTEX(cache_mutex);
 
@@ -64,36 +64,120 @@ static u64 __rmid_read(unsigned long rmid)
 	return val;
 }
 
-static unsigned long *cqm_rmid_bitmap;
+struct cqm_rmid_entry {
+	u64 rmid;
+	struct list_head list;
+};
+
+/*
+ * A least recently used list of RMIDs.
+ *
+ * Oldest entry at the head, newest (most recently used) entry at the
+ * tail. This list is never traversed, it's only used to keep track of
+ * the lru order. That is, we only pick entries of the head or insert
+ * them on the tail.
+ *
+ * All entries on the list are 'free', and their RMIDs are not currently
+ * in use. To mark an RMID as in use, remove its entry from the lru
+ * list.
+ *
+ * This list is protected by cache_mutex.
+ */
+static LIST_HEAD(cqm_rmid_lru);
+
+/*
+ * We use a simple array of pointers so that we can lookup a struct
+ * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
+ * and __put_rmid() from having to worry about dealing with struct
+ * cqm_rmid_entry - they just deal with rmids, i.e. integers.
+ *
+ * Once this array is initialized it is read-only. No locks are required
+ * to access it.
+ *
+ * All entries for all RMIDs can be looked up in the this array at all
+ * times.
+ */
+static struct cqm_rmid_entry **cqm_rmid_ptrs;
+
+static inline struct cqm_rmid_entry *__rmid_entry(int rmid)
+{
+	struct cqm_rmid_entry *entry;
+
+	entry = cqm_rmid_ptrs[rmid];
+	WARN_ON(entry->rmid != rmid);
+
+	return entry;
+}
 
 /*
  * Returns < 0 on fail.
+ *
+ * We expect to be called with cache_mutex held.
  */
 static int __get_rmid(void)
 {
-	return bitmap_find_free_region(cqm_rmid_bitmap, cqm_max_rmid, 0);
+	struct cqm_rmid_entry *entry;
+
+	lockdep_assert_held(&cache_mutex);
+
+	if (list_empty(&cqm_rmid_lru))
+		return -EAGAIN;
+
+	entry = list_first_entry(&cqm_rmid_lru, struct cqm_rmid_entry, list);
+	list_del(&entry->list);
+
+	return entry->rmid;
 }
 
 static void __put_rmid(int rmid)
 {
-	bitmap_release_region(cqm_rmid_bitmap, rmid, 0);
+	struct cqm_rmid_entry *entry;
+
+	lockdep_assert_held(&cache_mutex);
+
+	entry = __rmid_entry(rmid);
+
+	list_add_tail(&entry->list, &cqm_rmid_lru);
 }
 
 static int intel_cqm_setup_rmid_cache(void)
 {
-	cqm_rmid_bitmap = kmalloc(sizeof(long) * BITS_TO_LONGS(cqm_max_rmid), GFP_KERNEL);
-	if (!cqm_rmid_bitmap)
+	struct cqm_rmid_entry *entry;
+	int r;
+
+	cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
+				(cqm_max_rmid + 1), GFP_KERNEL);
+	if (!cqm_rmid_ptrs)
 		return -ENOMEM;
 
-	bitmap_zero(cqm_rmid_bitmap, cqm_max_rmid);
+	for (r = 0; r <= cqm_max_rmid; r++) {
+		struct cqm_rmid_entry *entry;
+
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry)
+			goto fail;
+
+		INIT_LIST_HEAD(&entry->list);
+		entry->rmid = r;
+		cqm_rmid_ptrs[r] = entry;
+
+		list_add_tail(&entry->list, &cqm_rmid_lru);
+	}
 
 	/*
 	 * RMID 0 is special and is always allocated. It's used for all
 	 * tasks that are not monitored.
 	 */
-	bitmap_allocate_region(cqm_rmid_bitmap, 0, 0);
+	entry = __rmid_entry(0);
+	list_del(&entry->list);
 
 	return 0;
+fail:
+	while (r--)
+		kfree(cqm_rmid_ptrs[r]);
+
+	kfree(cqm_rmid_ptrs);
+	return -ENOMEM;
 }
 
 /*

  reply	other threads:[~2015-02-26  4:16 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-01-23 18:45 [PATCH v5 0/9] perf: Intel Cache QoS Monitoring support Matt Fleming
2015-01-23 18:45 ` [PATCH 1/9] perf: Make perf_cgroup_from_task() global Matt Fleming
2015-02-26  4:14   ` [tip:perf/x86] " tip-bot for Matt Fleming
2015-01-23 18:45 ` [PATCH 2/9] perf: Add ->count() function to read per-package counters Matt Fleming
2015-02-26  4:14   ` [tip:perf/x86] " tip-bot for Matt Fleming
2015-01-23 18:45 ` [PATCH 3/9] perf: Move cgroup init before PMU ->event_init() Matt Fleming
2015-02-26  4:14   ` [tip:perf/x86] " tip-bot for Matt Fleming
2015-01-23 18:45 ` [PATCH 4/9] x86: Add support for Intel Cache QoS Monitoring (CQM) detection Matt Fleming
2015-02-26  4:14   ` [tip:perf/x86] x86: Add support for Intel Cache QoS Monitoring ( CQM) detection tip-bot for Peter P Waskiewicz Jr
2015-01-23 18:45 ` [PATCH 5/9] perf/x86/intel: Add Intel Cache QoS Monitoring support Matt Fleming
2015-01-25 18:34   ` Jiri Olsa
2015-01-25 23:07     ` Matt Fleming
2015-02-26  4:15   ` [tip:perf/x86] " tip-bot for Matt Fleming
2015-02-26 18:47     ` Matt Fleming
2015-02-26 19:40       ` Peter Zijlstra
2015-03-23 12:25       ` [tip:perf/x86] perf/x86/intel: Fix Makefile to actually build the cqm driver tip-bot for Matt Fleming
2015-01-23 18:45 ` [PATCH 6/9] perf/x86/intel: Implement LRU monitoring ID allocation for CQM Matt Fleming
2015-02-26  4:15   ` tip-bot for Matt Fleming [this message]
2015-01-23 18:45 ` [PATCH 7/9] perf/x86/intel: Support task events with Intel CQM Matt Fleming
2015-02-26  4:15   ` [tip:perf/x86] " tip-bot for Matt Fleming
2015-03-04  4:16     ` Vince Weaver
2015-03-05  0:55       ` Ingo Molnar
2015-03-05 21:10         ` Peter Zijlstra
2015-03-23 12:26           ` [tip:perf/x86] perf: Remove type specific target pointers tip-bot for Peter Zijlstra
2015-01-23 18:45 ` [PATCH v5 8/9] perf/x86/intel: Perform rotation on Intel CQM RMIDs Matt Fleming
2015-02-26  4:16   ` [tip:perf/x86] " tip-bot for Matt Fleming
2015-01-23 18:45 ` [PATCH v5 9/9] perf/x86/intel: Enable conflicting event scheduling for CQM Matt Fleming
2015-02-26  4:16   ` [tip:perf/x86] " tip-bot for Matt Fleming

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=tip-35298e554c74b7849875e3676ba8eaf833c7b917@git.kernel.org \
    --to=tipbot@zytor.com \
    --cc=acme@kernel.org \
    --cc=acme@redhat.com \
    --cc=hpa@zytor.com \
    --cc=jolsa@redhat.com \
    --cc=kanaka.d.juvva@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=matt.fleming@intel.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=vikas.shivappa@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.