linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Fenghua Yu" <fenghua.yu@intel.com>
To: "Thomas Gleixner" <tglx@linutronix.de>,
	"Ingo Molnar" <mingo@elte.hu>,
	"H. Peter Anvin" <h.peter.anvin@intel.com>,
	"Tony Luck" <tony.luck@intel.com>, "Tejun Heo" <tj@kernel.org>,
	"Borislav Petkov" <bp@suse.de>,
	"Stephane Eranian" <eranian@google.com>,
	"Peter Zijlstra" <peterz@infradead.org>,
	"Marcelo Tosatti" <mtosatti@redhat.com>,
	"David Carrillo-Cisneros" <davidcc@google.com>,
	"Ravi V Shankar" <ravi.v.shankar@intel.com>,
	"Vikas Shivappa" <vikas.shivappa@linux.intel.com>,
	"Sai Prakhya" <sai.praneeth.prakhya@intel.com>
Cc: "linux-kernel" <linux-kernel@vger.kernel.org>,
	"x86" <x86@kernel.org>, "Fenghua Yu" <fenghua.yu@intel.com>
Subject: [PATCH 26/32] x86/intel_rdt_rdtgroup.c: Create info directory
Date: Tue, 12 Jul 2016 18:02:59 -0700	[thread overview]
Message-ID: <1468371785-53231-27-git-send-email-fenghua.yu@intel.com> (raw)
In-Reply-To: <1468371785-53231-1-git-send-email-fenghua.yu@intel.com>

From: Fenghua Yu <fenghua.yu@intel.com>

During boot time, the "info" directory is set up under rscctrl root.
it contains one "info" file and one resource specific directory
if the resource is enabled.

If L3 is enabled, "l3" sub-directory is created under the "info"
directory. There are three l3 specific info files under it:
max_closid, max_cbm_len, and domain_to_cache_id.

The "info" directory is exposed to user after rscctrl is mounted.

Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
---
 arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 259 +++++++++++++++++++++++++++++++
 1 file changed, 259 insertions(+)

diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index e1936d2..b2140a8 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -238,6 +238,179 @@ static int get_shared_domain(int domain, int level)
 
 	return -1;
 }
+static void rdt_info_show_cat(struct seq_file *seq, int level)
+{
+	int domain;
+	int domain_num = get_domain_num(level);
+	int closid;
+	u64 cbm;
+	struct clos_cbm_table **cctable;
+	int maxid;
+	int shared_domain;
+	int cnt;
+
+	if (level == CACHE_LEVEL3)
+		cctable = l3_cctable;
+	else
+		return;
+
+	maxid = cconfig.max_closid;
+	for (domain = 0; domain < domain_num; domain++) {
+		seq_printf(seq, "domain %d:\n", domain);
+		shared_domain = get_shared_domain(domain, level);
+		for (closid = 0; closid < maxid; closid++) {
+			int dindex, iindex;
+
+			if (test_bit(closid,
+			(unsigned long *)cconfig.closmap[shared_domain])) {
+				dindex = get_dcbm_table_index(closid);
+				cbm = cctable[domain][dindex].cbm;
+				cnt = cctable[domain][dindex].clos_refcnt;
+				seq_printf(seq, "cbm[%d]=%lx, refcnt=%d\n",
+					 dindex, (unsigned long)cbm, cnt);
+				if (cdp_enabled) {
+					iindex = get_icbm_table_index(closid);
+					cbm = cctable[domain][iindex].cbm;
+					cnt =
+					   cctable[domain][iindex].clos_refcnt;
+					seq_printf(seq,
+						   "cbm[%d]=%lx, refcnt=%d\n",
+						   iindex, (unsigned long)cbm,
+						   cnt);
+				}
+			} else {
+				cbm = max_cbm(level);
+				cnt = 0;
+				dindex = get_dcbm_table_index(closid);
+				seq_printf(seq, "cbm[%d]=%lx, refcnt=%d\n",
+					 dindex, (unsigned long)cbm, cnt);
+				if (cdp_enabled) {
+					iindex = get_icbm_table_index(closid);
+					seq_printf(seq,
+						 "cbm[%d]=%lx, refcnt=%d\n",
+						 iindex, (unsigned long)cbm,
+						 cnt);
+				}
+			}
+		}
+	}
+}
+
+static void show_shared_domain(struct seq_file *seq)
+{
+	int domain;
+
+	seq_puts(seq, "Shared domains:\n");
+
+	for_each_cache_domain(domain, 0, shared_domain_num) {
+		struct shared_domain *sd;
+
+		sd = &shared_domain[domain];
+		seq_printf(seq, "domain[%d]:", domain);
+		if (cat_enabled(CACHE_LEVEL3))
+			seq_printf(seq, "l3_domain=%d ", sd->l3_domain);
+		seq_printf(seq, "cpumask=%*pb\n",
+			   cpumask_pr_args(&sd->cpumask));
+	}
+}
+
+static int rdt_info_show(struct seq_file *seq, void *v)
+{
+	show_shared_domain(seq);
+
+	if (cat_l3_enabled) {
+		if (rdt_opts.verbose)
+			rdt_info_show_cat(seq, CACHE_LEVEL3);
+	}
+
+	seq_puts(seq, "\n");
+
+	return 0;
+}
+
+static int res_type_to_level(enum resource_type res_type, int *level)
+{
+	int ret = 0;
+
+	switch (res_type) {
+	case RESOURCE_L3:
+		*level = CACHE_LEVEL3;
+		break;
+	case RESOURCE_NUM:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int domain_to_cache_id_show(struct seq_file *seq, void *v)
+{
+	struct kernfs_open_file *of = seq->private;
+	enum resource_type res_type;
+	int domain;
+	int leaf;
+	int level = 0;
+	int ret;
+
+	res_type = (enum resource_type)of->kn->parent->priv;
+
+	ret = res_type_to_level(res_type, &level);
+	if (ret)
+		return 0;
+
+	leaf =	get_cache_leaf(level, 0);
+
+	for (domain = 0; domain < get_domain_num(level); domain++) {
+		unsigned int cid;
+
+		cid = cache_domains[leaf].shared_cache_id[domain];
+		seq_printf(seq, "%d:%d\n", domain, cid);
+	}
+
+	return 0;
+}
+
+static struct rftype info_files[] = {
+	{
+		.name = "info",
+		.seq_show = rdt_info_show,
+	},
+	{ }	/* terminate */
+};
+
+/* rdtgroup information files for one cache resource. */
+static struct rftype res_info_files[] = {
+	{
+		.name = "max_closid",
+		.seq_show = rdt_max_closid_show,
+	},
+	{
+		.name = "max_cbm_len",
+		.seq_show = rdt_max_cbm_len_show,
+	},
+	{
+		.name = "domain_to_cache_id",
+		.seq_show = domain_to_cache_id_show,
+	},
+	{ }	/* terminate */
+};
+
+static int info_populate_dir(struct kernfs_node *kn)
+{
+	struct rftype *rfts;
+
+	rfts = info_files;
+	return rdtgroup_addrm_files(kn, rfts, true);
+}
+
+static int res_info_populate_dir(struct kernfs_node *kn)
+{
+	struct rftype *rfts;
+
+	rfts = res_info_files;
+	return rdtgroup_addrm_files(kn, rfts, true);
+}
 
 static int rdtgroup_populate_dir(struct kernfs_node *kn)
 {
@@ -377,6 +550,90 @@ static char *res_info_dir_name(enum resource_type res_type, char *name)
 	return name;
 }
 
+static int create_res_info(enum resource_type res_type,
+			   struct kernfs_node *parent_kn)
+{
+	struct kernfs_node *kn;
+	char name[RDTGROUP_FILE_NAME_MAX];
+	int ret;
+
+	res_info_dir_name(res_type, name);
+	kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, NULL);
+	if (IS_ERR(kn)) {
+		ret = PTR_ERR(kn);
+		goto out;
+	}
+
+	/*
+	 * This extra ref will be put in kernfs_remove() and guarantees
+	 * that @rdtgrp->kn is always accessible.
+	 */
+	kernfs_get(kn);
+
+	ret = rdtgroup_kn_set_ugid(kn);
+	if (ret)
+		goto out_destroy;
+
+	ret = res_info_populate_dir(kn);
+	if (ret)
+		goto out_destroy;
+
+	kernfs_activate(kn);
+
+	ret = 0;
+	goto out;
+
+out_destroy:
+	kernfs_remove(kn);
+out:
+	return ret;
+
+}
+
+static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn,
+				    const char *name)
+{
+	struct kernfs_node *kn;
+	int ret;
+
+	if (parent_kn != root_rdtgrp->kn)
+		return -EPERM;
+
+	/* create the directory */
+	kn = kernfs_create_dir(parent_kn, "info", parent_kn->mode, root_rdtgrp);
+	if (IS_ERR(kn)) {
+		ret = PTR_ERR(kn);
+		goto out;
+	}
+
+	ret = info_populate_dir(kn);
+	if (ret)
+		goto out_destroy;
+
+	if (cat_enabled(CACHE_LEVEL3))
+		create_res_info(RESOURCE_L3, kn);
+
+	/*
+	 * This extra ref will be put in kernfs_remove() and guarantees
+	 * that @rdtgrp->kn is always accessible.
+	 */
+	kernfs_get(kn);
+
+	ret = rdtgroup_kn_set_ugid(kn);
+	if (ret)
+		goto out_destroy;
+
+	kernfs_activate(kn);
+
+	ret = 0;
+	goto out;
+
+out_destroy:
+	kernfs_remove(kn);
+out:
+	return ret;
+}
+
 static int rdtgroup_setup_root(struct rdtgroup_root *root,
 			       unsigned long ss_mask)
 {
@@ -411,6 +668,8 @@ static int rdtgroup_setup_root(struct rdtgroup_root *root,
 	if (ret)
 		goto destroy_root;
 
+	rdtgroup_create_info_dir(root->kf_root->kn, "info_dir");
+
 	/*
 	 * Link the root rdtgroup in this hierarchy into all the css_set
 	 * objects.
-- 
2.5.0

  parent reply	other threads:[~2016-07-12 22:08 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-13  1:02 [PATCH 00/32] Enable Intel Resource Allocation in Resource Director Technology Fenghua Yu
2016-07-13  1:02 ` [PATCH 01/32] x86/intel_rdt: Cache Allocation documentation Fenghua Yu
2016-07-13  1:02 ` [PATCH 02/32] x86/intel_rdt: Add support for Cache Allocation detection Fenghua Yu
2016-07-26 19:00   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 03/32] x86/intel_rdt: Add Class of service management Fenghua Yu
2016-07-13  1:02 ` [PATCH 04/32] x86/intel_rdt: Add L3 cache capacity bitmask management Fenghua Yu
2016-07-22  7:12   ` Marcelo Tosatti
2016-07-22 21:43     ` Luck, Tony
2016-07-23  4:31       ` Marcelo Tosatti
2016-07-26  3:18         ` Luck, Tony
2016-07-26 17:10         ` Shivappa Vikas
2016-07-13  1:02 ` [PATCH 05/32] x86/intel_rdt: Implement scheduling support for Intel RDT Fenghua Yu
2016-07-25 16:25   ` Nilay Vaish
2016-07-25 16:31   ` Nilay Vaish
2016-07-25 18:05     ` Luck, Tony
2016-07-25 22:47       ` David Carrillo-Cisneros
2016-07-13  1:02 ` [PATCH 06/32] x86/intel_rdt: Hot cpu support for Cache Allocation Fenghua Yu
2016-07-13  9:19   ` Thomas Gleixner
2016-07-21 19:46     ` Shivappa Vikas
2016-07-14  0:40   ` David Carrillo-Cisneros
2016-07-14 22:58     ` Yu, Fenghua
2016-07-13  1:02 ` [PATCH 07/32] x86/intel_rdt: Intel haswell Cache Allocation enumeration Fenghua Yu
2016-07-13  1:02 ` [PATCH 08/32] Define CONFIG_INTEL_RDT Fenghua Yu
2016-07-13 10:25   ` Thomas Gleixner
2016-07-13 18:05     ` Yu, Fenghua
2016-07-13 21:09       ` Thomas Gleixner
2016-07-13 21:18         ` Yu, Fenghua
2016-07-13  1:02 ` [PATCH 09/32] x86/intel_rdt: Intel Code Data Prioritization detection Fenghua Yu
2016-07-13  1:02 ` [PATCH 10/32] x86/intel_rdt: Adds support to enable Code Data Prioritization Fenghua Yu
2016-07-26 19:23   ` Nilay Vaish
2016-07-26 20:32     ` Shivappa Vikas
2016-07-13  1:02 ` [PATCH 11/32] x86/intel_rdt: Class of service and capacity bitmask management for CDP Fenghua Yu
2016-07-13  1:02 ` [PATCH 12/32] x86/intel_rdt: Hot cpu update for code data prioritization Fenghua Yu
2016-07-13  1:02 ` [PATCH 13/32] Documentation, x86: Documentation for Intel resource allocation user interface Fenghua Yu
2016-07-13 12:47   ` Thomas Gleixner
2016-07-13 17:13     ` Luck, Tony
2016-07-14  6:53       ` Thomas Gleixner
2016-07-14 17:16         ` Luck, Tony
2016-07-19 12:32           ` Thomas Gleixner
2016-08-04 23:38             ` Yu, Fenghua
2016-07-27 16:20   ` Nilay Vaish
2016-07-27 16:57     ` Luck, Tony
2016-08-03 22:15   ` Marcelo Tosatti
2016-07-13  1:02 ` [PATCH 14/32] x86/cpufeatures: Get max closid and max cbm len and clean feature comments and code Fenghua Yu
2016-07-27 16:49   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 15/32] cacheinfo: Introduce cache id Fenghua Yu
2016-07-27 17:04   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 16/32] Documentation, ABI: Add a document entry for " Fenghua Yu
2016-07-13  1:02 ` [PATCH 17/32] x86, intel_cacheinfo: Enable cache id in x86 Fenghua Yu
2016-07-28  5:41   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 18/32] drivers/base/cacheinfo.c: Export some cacheinfo functions for others to use Fenghua Yu
2016-07-13  1:02 ` [PATCH 19/32] sched.h: Add rg_list and rdtgroup in task_struct Fenghua Yu
2016-07-13 12:56   ` Thomas Gleixner
2016-07-13 17:50     ` Yu, Fenghua
2016-07-28  5:53   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 20/32] magic number for rscctrl file system Fenghua Yu
2016-07-28  5:57   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 21/32] x86/intel_rdt.h: Header for inter_rdt.c Fenghua Yu
2016-07-28 14:07   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 22/32] x86/intel_rdt_rdtgroup.h: Header for user interface Fenghua Yu
2016-07-13  1:02 ` [PATCH 23/32] x86/intel_rdt.c: Extend RDT to per cache and per resources Fenghua Yu
2016-07-13 13:07   ` Thomas Gleixner
2016-07-13 17:40     ` Yu, Fenghua
2016-07-13  1:02 ` [PATCH 24/32] Task fork and exit for rdtgroup Fenghua Yu
2016-07-13 13:14   ` Thomas Gleixner
2016-07-13 17:32     ` Yu, Fenghua
2016-07-13 21:02       ` Thomas Gleixner
2016-07-13 21:22         ` Yu, Fenghua
2016-07-13  1:02 ` [PATCH 25/32] x86/intel_rdt_rdtgroup.c: User interface for RDT Fenghua Yu
2016-07-14 12:30   ` Thomas Gleixner
2016-07-13  1:02 ` Fenghua Yu [this message]
2016-07-13  1:03 ` [PATCH 27/32] x86/intel_rdt_rdtgroup.c: Implement rscctrl file system commands Fenghua Yu
2016-07-13  1:03 ` [PATCH 28/32] x86/intel_rdt_rdtgroup.c: Read and write cpus Fenghua Yu
2016-07-13  1:03 ` [PATCH 29/32] x86/intel_rdt_rdtgroup.c: Tasks iterator and write Fenghua Yu
2016-07-13  1:03 ` [PATCH 30/32] x86/intel_rdt_rdtgroup.c: Process schemas input from rscctrl interface Fenghua Yu
2016-07-14  0:41   ` David Carrillo-Cisneros
2016-07-14  6:11     ` Thomas Gleixner
2016-07-14  6:16       ` Yu, Fenghua
2016-07-14  6:32     ` Yu, Fenghua
2016-07-13  1:03 ` [PATCH 31/32] MAINTAINERS: Add maintainer for Intel RDT resource allocation Fenghua Yu
2016-07-13  1:03 ` [PATCH 32/32] x86/Makefile: Build intel_rdt_rdtgroup.c Fenghua Yu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1468371785-53231-27-git-send-email-fenghua.yu@intel.com \
    --to=fenghua.yu@intel.com \
    --cc=bp@suse.de \
    --cc=davidcc@google.com \
    --cc=eranian@google.com \
    --cc=h.peter.anvin@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=mtosatti@redhat.com \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=sai.praneeth.prakhya@intel.com \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=tony.luck@intel.com \
    --cc=vikas.shivappa@linux.intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).