linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vikas Shivappa <vikas.shivappa@linux.intel.com>
To: vikas.shivappa@intel.com, vikas.shivappa@linux.intel.com
Cc: linux-kernel@vger.kernel.org, x86@kernel.org, tglx@linutronix.de,
	peterz@infradead.org, ravi.v.shankar@intel.com,
	tony.luck@intel.com, fenghua.yu@intel.com, andi.kleen@intel.com,
	davidcc@google.com, eranian@google.com, hpa@zytor.com
Subject: [PATCH 10/14] x86/cqm: Add RMID reuse
Date: Fri, 16 Dec 2016 15:13:04 -0800	[thread overview]
Message-ID: <1481929988-31569-11-git-send-email-vikas.shivappa@linux.intel.com> (raw)
In-Reply-To: <1481929988-31569-1-git-send-email-vikas.shivappa@linux.intel.com>

When an RMID is freed by an event it cannot be reused immediately as the
RMID may still have some cache occupancy. Hence when an RMID is freed it
goes into limbo list and not free list. This patch provides support to
periodically check the occupancy values of such RMIDs and move them to
the free list once its occupancy < threshold_occupancy value. The
threshold occupancy value can be modified by user based on his
requirements.

Tests: Before the patch, task monitoring would just throw error once
RMIDs are used in the lifetime of systemboot.
After this patch, we would be able to reuse the RMIDs that are freed.

Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
---
 arch/x86/events/intel/cqm.c | 107 +++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 106 insertions(+), 1 deletion(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 73f566a..85162aa 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -173,6 +173,13 @@ u32 __get_rmid(int domain)
 	return entry->rmid;
 }
 
+static void cqm_schedule_rmidwork(int domain);
+
+static inline bool is_first_cqmwork(int domain)
+{
+	return (!atomic_cmpxchg(&cqm_pkgs_data[domain]->reuse_scheduled, 0, 1));
+}
+
 static void __put_rmid(u32 rmid, int domain)
 {
 	struct cqm_rmid_entry *entry;
@@ -293,6 +300,93 @@ static void cqm_mask_call(struct rmid_read *rr)
 static unsigned int __intel_cqm_threshold;
 static unsigned int __intel_cqm_max_threshold;
 
+/*
+ * Test whether an RMID has a zero occupancy value on this cpu.
+ */
+static void intel_cqm_stable(void)
+{
+	struct cqm_rmid_entry *entry;
+	struct list_head *llist;
+
+	llist = &cqm_pkgs_data[pkg_id]->cqm_rmid_limbo_lru;
+	list_for_each_entry(entry, llist, list) {
+
+		if (__rmid_read(entry->rmid) < __intel_cqm_threshold)
+			entry->state = RMID_AVAILABLE;
+	}
+}
+
+static void __intel_cqm_rmid_reuse(void)
+{
+	struct cqm_rmid_entry *entry, *tmp;
+	struct list_head *llist, *flist;
+	struct pkg_data *pdata;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&cache_lock, flags);
+	pdata = cqm_pkgs_data[pkg_id];
+	llist = &pdata->cqm_rmid_limbo_lru;
+	flist = &pdata->cqm_rmid_free_lru;
+
+	if (list_empty(llist))
+		goto end;
+	/*
+	 * Test whether an RMID is free
+	 */
+	intel_cqm_stable();
+
+	list_for_each_entry_safe(entry, tmp, llist, list) {
+
+		if (entry->state == RMID_DIRTY)
+			continue;
+		/*
+		 * Otherwise remove from limbo and place it onto the free list.
+		 */
+		list_del(&entry->list);
+		list_add_tail(&entry->list, flist);
+	}
+
+end:
+	raw_spin_unlock_irqrestore(&cache_lock, flags);
+}
+
+static bool reschedule_cqm_work(void)
+{
+	unsigned long flags;
+	bool nwork = false;
+
+	raw_spin_lock_irqsave(&cache_lock, flags);
+
+	if (!list_empty(&cqm_pkgs_data[pkg_id]->cqm_rmid_limbo_lru))
+		nwork = true;
+	else
+		atomic_set(&cqm_pkgs_data[pkg_id]->reuse_scheduled, 0U);
+
+	raw_spin_unlock_irqrestore(&cache_lock, flags);
+
+	return nwork;
+}
+
+static void cqm_schedule_rmidwork(int domain)
+{
+	struct delayed_work *dwork;
+	unsigned long delay;
+
+	dwork = &cqm_pkgs_data[domain]->intel_cqm_rmid_work;
+	delay = msecs_to_jiffies(RMID_DEFAULT_QUEUE_TIME);
+
+	schedule_delayed_work_on(cqm_pkgs_data[domain]->rmid_work_cpu,
+			     dwork, delay);
+}
+
+static void intel_cqm_rmid_reuse(struct work_struct *work)
+{
+	__intel_cqm_rmid_reuse();
+
+	if (reschedule_cqm_work())
+		cqm_schedule_rmidwork(pkg_id);
+}
+
 static struct pmu intel_cqm_pmu;
 
 static u64 update_sample(unsigned int rmid, u32 evt_type, int first)
@@ -540,7 +634,7 @@ static int intel_cqm_setup_event(struct perf_event *event,
 	}
 #ifdef CONFIG_CGROUP_PERF
 	/*
-	 * For continously monitored cgroups, *rmid is allocated already.
+	 * For continously monitored cgroups, rmid is allocated already.
 	 */
 	if (event->cgrp) {
 		cqm_info = cgrp_to_cqm_info(event->cgrp);
@@ -882,6 +976,7 @@ static void intel_cqm_event_terminate(struct perf_event *event)
 {
 	struct perf_event *group_other = NULL;
 	unsigned long flags;
+	int d;
 
 	mutex_lock(&cache_mutex);
 	/*
@@ -924,6 +1019,13 @@ static void intel_cqm_event_terminate(struct perf_event *event)
 		mbm_stop_timers();
 
 	mutex_unlock(&cache_mutex);
+
+	for (d = 0; d < cqm_socket_max; d++) {
+
+		if (cqm_pkgs_data[d] != NULL && is_first_cqmwork(d)) {
+			cqm_schedule_rmidwork(d);
+		}
+	}
 }
 
 static int intel_cqm_event_init(struct perf_event *event)
@@ -1430,6 +1532,9 @@ static int pkg_data_init_cpu(int cpu)
 	mutex_init(&pkg_data->pkg_data_mutex);
 	raw_spin_lock_init(&pkg_data->pkg_data_lock);
 
+	INIT_DEFERRABLE_WORK(
+		&pkg_data->intel_cqm_rmid_work, intel_cqm_rmid_reuse);
+
 	pkg_data->rmid_work_cpu = cpu;
 
 	nr_rmids = cqm_max_rmid + 1;
-- 
1.9.1

  parent reply	other threads:[~2016-12-16 23:14 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-16 23:12 [PATCH V4 00/14] Cqm2: Intel Cache Monitoring fixes and enhancements Vikas Shivappa
2016-12-16 23:12 ` [PATCH 01/14] x86/cqm: Intel Resource Monitoring Documentation Vikas Shivappa
2016-12-23 12:32   ` Peter Zijlstra
2016-12-23 19:35     ` Shivappa Vikas
2016-12-23 20:33       ` Peter Zijlstra
2016-12-23 21:41         ` Shivappa Vikas
2016-12-25  1:51         ` Shivappa Vikas
2016-12-27  7:13           ` David Carrillo-Cisneros
2016-12-27 20:00           ` Andi Kleen
2016-12-27 20:21             ` Shivappa Vikas
2016-12-27 21:38               ` David Carrillo-Cisneros
2016-12-27 21:33             ` David Carrillo-Cisneros
2016-12-27 23:10               ` Andi Kleen
2016-12-28  1:23                 ` David Carrillo-Cisneros
2016-12-28 20:03                   ` Shivappa Vikas
2016-12-16 23:12 ` [PATCH 02/14] x86/cqm: Remove cqm recycling/conflict handling Vikas Shivappa
2016-12-16 23:12 ` [PATCH 03/14] x86/rdt: Add rdt common/cqm compile option Vikas Shivappa
2016-12-16 23:12 ` [PATCH 04/14] x86/cqm: Add Per pkg rmid support Vikas Shivappa
2016-12-16 23:12 ` [PATCH 05/14] x86/cqm,perf/core: Cgroup support prepare Vikas Shivappa
2016-12-16 23:13 ` [PATCH 06/14] x86/cqm: Add cgroup hierarchical monitoring support Vikas Shivappa
2016-12-16 23:13 ` [PATCH 07/14] x86/rdt,cqm: Scheduling support update Vikas Shivappa
2016-12-16 23:13 ` [PATCH 08/14] x86/cqm: Add support for monitoring task and cgroup together Vikas Shivappa
2016-12-16 23:13 ` [PATCH 09/14] x86/cqm: Add Continuous cgroup monitoring Vikas Shivappa
2016-12-16 23:13 ` Vikas Shivappa [this message]
2016-12-16 23:13 ` [PATCH 11/14] x86/cqm: Add failure on open and read Vikas Shivappa
2016-12-23 11:58   ` David Carrillo-Cisneros
2016-12-16 23:13 ` [PATCH 12/14] perf/core,x86/cqm: Add read for Cgroup events,per pkg reads Vikas Shivappa
2016-12-16 23:13 ` [PATCH 13/14] perf/stat: fix bug in handling events in error state Vikas Shivappa
2016-12-16 23:13 ` [PATCH 14/14] perf/stat: revamp read error handling, snapshot and per_pkg events Vikas Shivappa

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1481929988-31569-11-git-send-email-vikas.shivappa@linux.intel.com \
    --to=vikas.shivappa@linux.intel.com \
    --cc=andi.kleen@intel.com \
    --cc=davidcc@google.com \
    --cc=eranian@google.com \
    --cc=fenghua.yu@intel.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vikas.shivappa@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).