linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Fenghua Yu" <fenghua.yu@intel.com>
To: "Thomas Gleixner" <tglx@linutronix.de>
Cc: "H. Peter Anvin" <h.peter.anvin@intel.com>,
	"Ingo Molnar" <mingo@elte.hu>, "Tony Luck" <tony.luck@intel.com>,
	"Peter Zijlstra" <peterz@infradead.org>,
	"Stephane Eranian" <eranian@google.com>,
	"Borislav Petkov" <bp@suse.de>,
	"Dave Hansen" <dave.hansen@intel.com>,
	"Nilay Vaish" <nilayvaish@gmail.com>, "Shaohua Li" <shli@fb.com>,
	"David Carrillo-Cisneros" <davidcc@google.com>,
	"Ravi V Shankar" <ravi.v.shankar@intel.com>,
	"Sai Prakhya" <sai.praneeth.prakhya@intel.com>,
	"Vikas Shivappa" <vikas.shivappa@linux.intel.com>,
	"linux-kernel" <linux-kernel@vger.kernel.org>,
	"x86" <x86@kernel.org>, "Fenghua Yu" <fenghua.yu@intel.com>
Subject: [PATCH v3 10/18] x86/intel_rdt: Build structures for each resource based on cache topology
Date: Fri,  7 Oct 2016 19:45:55 -0700	[thread overview]
Message-ID: <1475894763-64683-11-git-send-email-fenghua.yu@intel.com> (raw)
In-Reply-To: <1475894763-64683-1-git-send-email-fenghua.yu@intel.com>

From: Tony Luck <tony.luck@intel.com>

We use the cpu hotplug notifier to catch each cpu in turn and look at
its cache topology w.r.t each of the resource groups. As we discover
new resources, we initialize the bitmask array for each to the default
(full access) value.

Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
---
 arch/x86/include/asm/intel_rdt.h |  35 ++++++++++
 arch/x86/kernel/cpu/intel_rdt.c  | 135 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 170 insertions(+)

diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
index 251ac2a..bad8dc7 100644
--- a/arch/x86/include/asm/intel_rdt.h
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -40,6 +40,39 @@ struct rdt_resource {
 		if (r->enabled)
 
 #define IA32_L3_CBM_BASE	0xc90
+
+/**
+ * struct rdt_domain - group of cpus sharing an RDT resource
+ * @list:	all instances of this resource
+ * @id:		unique id for this instance
+ * @cpu_mask:	which cpus share this resource
+ * @cbm:	array of cache bit masks (indexed by CLOSID)
+ */
+struct rdt_domain {
+	struct list_head	list;
+	int			id;
+	struct cpumask		cpu_mask;
+	u32			*cbm;
+};
+
+/**
+ * struct msr_param - set a range of MSRs from a domain
+ * @res:       The resource to use
+ * @low:       Beginning index from base MSR
+ * @high:      End index
+ */
+struct msr_param {
+	struct rdt_resource	*res;
+	int			low;
+	int			high;
+};
+
+extern struct static_key_false rdt_enable_key;
+extern struct mutex rdtgroup_mutex;
+
+int __init rdtgroup_init(void);
+
+extern struct rdtgroup *rdtgroup_default;
 extern struct rdt_resource rdt_resources_all[];
 
 enum {
@@ -64,4 +97,6 @@ union cpuid_0x10_1_edx {
 	} split;
 	unsigned int full;
 };
+
+void rdt_cbm_update(void *arg);
 #endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index b9b1e9e..76b7476 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -24,12 +24,18 @@
 
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
+#include <linux/cacheinfo.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/cpuhotplug.h>
+
 #include <asm/intel_rdt_common.h>
 #include <asm/intel-family.h>
 #include <asm/intel_rdt.h>
 
+/* Mutex to protect rdtgroup access. */
+DEFINE_MUTEX(rdtgroup_mutex);
+
 int rdt_max_closid;
 
 #define domain_init(name) LIST_HEAD_INIT(rdt_resources_all[name].domains)
@@ -119,14 +125,143 @@ static inline bool get_rdt_resources(struct cpuinfo_x86 *c)
 	return ret;
 }
 
+static int get_cache_id(int cpu, int level)
+{
+	struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+	int i;
+
+	for (i = 0; i < ci->num_leaves; i++)
+		if (ci->info_list[i].level == level)
+			return ci->info_list[i].id;
+	return -1;
+}
+
+void rdt_cbm_update(void *arg)
+{
+	struct msr_param *m = (struct msr_param *)arg;
+	struct rdt_resource *r = m->res;
+	struct rdt_domain *d;
+	struct list_head *l;
+	int i, cpu = smp_processor_id();
+
+	list_for_each(l, &r->domains) {
+		d = list_entry(l, struct rdt_domain, list);
+		if (cpumask_test_cpu(cpu, &d->cpu_mask))
+			goto found;
+	}
+	pr_info_once("cpu %d not found in any domain for resource %s\n",
+		     cpu, r->name);
+
+found:
+	for (i = m->low; i < m->high; i++)
+		wrmsrl(r->msr_base + i, d->cbm[i]);
+}
+
+static void update_domain(int cpu, struct rdt_resource *r, int add)
+{
+	struct list_head *l;
+	struct rdt_domain *d;
+	int i, cache_id;
+
+	cache_id = get_cache_id(cpu, r->cache_level);
+
+	if (cache_id == -1) {
+		pr_info_once("Could't find cache id for cpu %d\n", cpu);
+		return;
+	}
+	list_for_each(l, &r->domains) {
+		d = list_entry(l, struct rdt_domain, list);
+		if (cache_id == d->id)
+			goto found;
+		if (cache_id < d->id)
+			break;
+	}
+	if (!add) {
+		pr_info_once("removed unknown cpu %d\n", cpu);
+		return;
+	}
+	d = kzalloc(sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return;
+
+	d->id = cache_id;
+	d->cbm = kmalloc_array(r->max_closid, sizeof(*d->cbm), GFP_KERNEL);
+	if (!d->cbm) {
+		pr_info("Failed to alloc CBM array for cpu %d\n", cpu);
+		kfree(d);
+		return;
+	}
+	cpumask_set_cpu(cpu, &d->cpu_mask);
+	for (i = 0; i < r->max_closid; i++) {
+		d->cbm[i] = r->max_cbm;
+		wrmsrl(r->msr_base + i, d->cbm[i]);
+	}
+	list_add_tail(&d->list, l);
+	r->num_domains++;
+	return;
+
+found:
+	if (add) {
+		cpumask_set_cpu(cpu, &d->cpu_mask);
+	} else {
+		cpumask_clear_cpu(cpu, &d->cpu_mask);
+		if (cpumask_empty(&d->cpu_mask)) {
+			r->num_domains--;
+			kfree(d->cbm);
+			list_del(&d->list);
+			kfree(d);
+		}
+	}
+}
+
+static void rdt_reset_pqr_assoc_closid(void *v)
+{
+	struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+
+	state->closid = 0;
+	wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
+}
+
+static int intel_rdt_online_cpu(unsigned int cpu)
+{
+	struct rdt_resource *r;
+
+	mutex_lock(&rdtgroup_mutex);
+	for_each_rdt_resource(r)
+		update_domain(cpu, r, 1);
+	smp_call_function_single(cpu, rdt_reset_pqr_assoc_closid, NULL, 1);
+	mutex_unlock(&rdtgroup_mutex);
+
+	return 0;
+}
+
+static int intel_rdt_offline_cpu(unsigned int cpu)
+{
+	struct rdt_resource *r;
+
+	mutex_lock(&rdtgroup_mutex);
+	for_each_rdt_resource(r)
+		update_domain(cpu, r, 0);
+	mutex_unlock(&rdtgroup_mutex);
+
+	return 0;
+}
+
 static int __init intel_rdt_late_init(void)
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 	struct rdt_resource *r;
+	int ret;
 
 	if (!get_rdt_resources(c))
 		return -ENODEV;
 
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+				"AP_INTEL_RDT_ONLINE",
+				intel_rdt_online_cpu, intel_rdt_offline_cpu);
+	if (ret < 0)
+		return ret;
+
 	for_each_rdt_resource(r)
 		rdt_max_closid = max(rdt_max_closid, r->max_closid);
 
-- 
2.5.0

  parent reply	other threads:[~2016-10-07 23:45 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-08  2:45 [PATCH v3 00/18] Intel Cache Allocation Technology Fenghua Yu
2016-10-07 23:54 ` [RFC PATCH 19/18] x86/intel_rdt: Add support for L2 cache allocation Luck, Tony
2016-10-08  2:45 ` [PATCH v3 01/18] Documentation, ABI: Add a document entry for cache id Fenghua Yu
2016-10-08 17:11   ` Nilay Vaish
2016-10-10 16:45     ` Luck, Tony
2016-10-11 16:48       ` Nilay Vaish
2016-10-08  2:45 ` [PATCH v3 02/18] cacheinfo: Introduce " Fenghua Yu
2016-10-08 17:10   ` Nilay Vaish
2016-10-08  2:45 ` [PATCH v3 03/18] x86, intel_cacheinfo: Enable cache id in x86 Fenghua Yu
2016-10-08  2:45 ` [PATCH v3 04/18] x86/intel_rdt: Feature discovery Fenghua Yu
2016-10-08 17:11   ` Nilay Vaish
2016-10-08 20:54     ` Fenghua Yu
2016-10-08 19:52       ` Borislav Petkov
2016-10-11 16:57         ` Nilay Vaish
2016-10-11 17:03           ` Borislav Petkov
2016-10-10 16:01     ` Dave Hansen
2016-10-10 16:18       ` Borislav Petkov
2016-10-08  2:45 ` [PATCH v3 05/18] Documentation, x86: Documentation for Intel resource allocation user interface Fenghua Yu
2016-10-08 17:12   ` Nilay Vaish
2016-10-08 20:33     ` Fenghua Yu
2016-10-10 17:19       ` Luck, Tony
2016-10-11 17:07         ` Nilay Vaish
2016-10-11 18:04           ` Luck, Tony
2016-10-08  2:45 ` [PATCH v3 06/18] x86/intel_rdt: Add CONFIG, Makefile, and basic initialization Fenghua Yu
2016-10-08 20:57   ` Borislav Petkov
2016-10-08  2:45 ` [PATCH v3 07/18] x86/intel_rdt: Add Haswell feature discovery Fenghua Yu
2016-10-09 11:41   ` Borislav Petkov
2016-10-09 17:09     ` Fenghua Yu
2016-10-09 16:28       ` Borislav Petkov
2016-10-10 18:55         ` Luck, Tony
2016-10-11 11:12           ` Borislav Petkov
2016-10-11 14:51             ` Luck, Tony
2016-10-08  2:45 ` [PATCH v3 08/18] x86/intel_rdt: Pick up L3 RDT parameters from CPUID Fenghua Yu
2016-10-08  2:45 ` [PATCH v3 09/18] x86/cqm: Move PQR_ASSOC management code into generic code used by both CQM and CAT Fenghua Yu
2016-10-08  2:45 ` Fenghua Yu [this message]
2016-10-09 21:57   ` [PATCH v3 10/18] x86/intel_rdt: Build structures for each resource based on cache topology Nilay Vaish
2016-10-08  2:45 ` [PATCH v3 11/18] x86/intel_rdt: Add basic resctrl filesystem support Fenghua Yu
2016-10-09 22:31   ` Nilay Vaish
2016-10-10 23:44     ` Luck, Tony
2016-10-08  2:45 ` [PATCH v3 12/18] x86/intel_rdt: Add "info" files to resctrl file system Fenghua Yu
2016-10-08  2:45 ` [PATCH v3 13/18] x86/intel_rdt: Add mkdir " Fenghua Yu
2016-10-10 17:51   ` Nilay Vaish
2016-10-08  2:45 ` [PATCH v3 14/18] x86/intel_rdt: Add cpus file Fenghua Yu
2016-10-08  2:46 ` [PATCH v3 15/18] x86/intel_rdt: Add tasks files Fenghua Yu
2016-10-08  2:46 ` [PATCH v3 16/18] x86/intel_rdt: Add schemata file Fenghua Yu
2016-10-08  2:46 ` [PATCH v3 17/18] x86/intel_rdt: Add scheduler hook Fenghua Yu
2016-10-08  2:46 ` [PATCH v3 18/18] MAINTAINERS: Add maintainer for Intel RDT resource allocation Fenghua Yu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1475894763-64683-11-git-send-email-fenghua.yu@intel.com \
    --to=fenghua.yu@intel.com \
    --cc=bp@suse.de \
    --cc=dave.hansen@intel.com \
    --cc=davidcc@google.com \
    --cc=eranian@google.com \
    --cc=h.peter.anvin@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=nilayvaish@gmail.com \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=sai.praneeth.prakhya@intel.com \
    --cc=shli@fb.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vikas.shivappa@linux.intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).