From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
George Dunlap <george.dunlap@citrix.com>,
Ian Jackson <iwj@xenproject.org>, Jan Beulich <jbeulich@suse.com>,
Julien Grall <julien@xen.org>,
Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>,
Dario Faggioli <dfaggioli@suse.com>
Subject: [PATCH v4 4/5] xen/cpupool: add scheduling granularity entry to cpupool entries
Date: Mon, 18 Jan 2021 12:55:15 +0100 [thread overview]
Message-ID: <20210118115516.11001-5-jgross@suse.com> (raw)
In-Reply-To: <20210118115516.11001-1-jgross@suse.com>
Add a "sched-gran" entry to the per-cpupool hypfs directories.
For now make this entry read-only and let it contain one of the
strings "cpu", "core" or "socket".
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- added const (Jan Beulich)
- modify test in cpupool_gran_read() (Jan Beulich)
---
docs/misc/hypfs-paths.pandoc | 4 ++
xen/common/sched/cpupool.c | 72 ++++++++++++++++++++++++++++++++++--
2 files changed, 72 insertions(+), 4 deletions(-)
diff --git a/docs/misc/hypfs-paths.pandoc b/docs/misc/hypfs-paths.pandoc
index aaca1cdf92..f1ce24d7fe 100644
--- a/docs/misc/hypfs-paths.pandoc
+++ b/docs/misc/hypfs-paths.pandoc
@@ -184,6 +184,10 @@ A directory of all current cpupools.
The individual cpupools. Each entry is a directory with the name being the
cpupool-id (e.g. /cpupool/0/).
+#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket")
+
+The scheduling granularity of a cpupool.
+
#### /params/
A directory of runtime parameters.
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index f293ba0cc4..e2011367bd 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -41,9 +41,10 @@ static DEFINE_SPINLOCK(cpupool_lock);
static enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
static unsigned int __read_mostly sched_granularity = 1;
+#define SCHED_GRAN_NAME_LEN 8
struct sched_gran_name {
enum sched_gran mode;
- char name[8];
+ char name[SCHED_GRAN_NAME_LEN];
};
static const struct sched_gran_name sg_name[] = {
@@ -52,7 +53,7 @@ static const struct sched_gran_name sg_name[] = {
{SCHED_GRAN_socket, "socket"},
};
-static void sched_gran_print(enum sched_gran mode, unsigned int gran)
+static const char *sched_gran_get_name(enum sched_gran mode)
{
const char *name = "";
unsigned int i;
@@ -66,8 +67,13 @@ static void sched_gran_print(enum sched_gran mode, unsigned int gran)
}
}
+ return name;
+}
+
+static void sched_gran_print(enum sched_gran mode, unsigned int gran)
+{
printk("Scheduling granularity: %s, %u CPU%s per sched-resource\n",
- name, gran, gran == 1 ? "" : "s");
+ sched_gran_get_name(mode), gran, gran == 1 ? "" : "s");
}
#ifdef CONFIG_HAS_SCHED_GRANULARITY
@@ -1014,10 +1020,16 @@ static int cpupool_dir_read(const struct hypfs_entry *entry,
XEN_GUEST_HANDLE_PARAM(void) uaddr)
{
int ret = 0;
- const struct cpupool *c;
+ struct cpupool *c;
+ struct hypfs_dyndir_id *data;
+
+ data = hypfs_get_dyndata();
list_for_each_entry(c, &cpupool_list, list)
{
+ data->id = c->cpupool_id;
+ data->data = c;
+
ret = hypfs_read_dyndir_id_entry(&cpupool_pooldir, c->cpupool_id,
list_is_last(&c->list, &cpupool_list),
&uaddr);
@@ -1080,6 +1092,56 @@ static struct hypfs_entry *cpupool_dir_findentry(
return hypfs_gen_dyndir_id_entry(&cpupool_pooldir, id, cpupool);
}
+static int cpupool_gran_read(const struct hypfs_entry *entry,
+ XEN_GUEST_HANDLE_PARAM(void) uaddr)
+{
+ const struct hypfs_dyndir_id *data;
+ const struct cpupool *cpupool;
+ const char *gran;
+
+ data = hypfs_get_dyndata();
+ cpupool = data->data;
+ ASSERT(cpupool);
+
+ gran = sched_gran_get_name(cpupool->gran);
+
+ if ( !*gran )
+ return -ENOENT;
+
+ return copy_to_guest(uaddr, gran, strlen(gran) + 1) ? -EFAULT : 0;
+}
+
+static unsigned int hypfs_gran_getsize(const struct hypfs_entry *entry)
+{
+ const struct hypfs_dyndir_id *data;
+ const struct cpupool *cpupool;
+ const char *gran;
+
+ data = hypfs_get_dyndata();
+ cpupool = data->data;
+ ASSERT(cpupool);
+
+ gran = sched_gran_get_name(cpupool->gran);
+
+ return strlen(gran) + 1;
+}
+
+static const struct hypfs_funcs cpupool_gran_funcs = {
+ .enter = hypfs_node_enter,
+ .exit = hypfs_node_exit,
+ .read = cpupool_gran_read,
+ .write = hypfs_write_deny,
+ .getsize = hypfs_gran_getsize,
+ .findentry = hypfs_leaf_findentry,
+};
+
+static HYPFS_VARSIZE_INIT(cpupool_gran, XEN_HYPFS_TYPE_STRING, "sched-gran",
+ 0, &cpupool_gran_funcs);
+static char granstr[SCHED_GRAN_NAME_LEN] = {
+ [0 ... SCHED_GRAN_NAME_LEN - 2] = '?',
+ [SCHED_GRAN_NAME_LEN - 1] = 0
+};
+
static const struct hypfs_funcs cpupool_dir_funcs = {
.enter = cpupool_dir_enter,
.exit = cpupool_dir_exit,
@@ -1095,6 +1157,8 @@ static void cpupool_hypfs_init(void)
{
hypfs_add_dir(&hypfs_root, &cpupool_dir, true);
hypfs_add_dyndir(&cpupool_dir, &cpupool_pooldir);
+ hypfs_string_set_reference(&cpupool_gran, granstr);
+ hypfs_add_leaf(&cpupool_pooldir, &cpupool_gran, true);
}
#else /* CONFIG_HYPFS */
--
2.26.2
next prev parent reply other threads:[~2021-01-18 11:55 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-18 11:55 [PATCH v4 0/5] xen: support per-cpupool scheduling granularity Juergen Gross
2021-01-18 11:55 ` [PATCH v4 1/5] xen/hypfs: support dynamic hypfs nodes Juergen Gross
2021-01-21 15:47 ` Jan Beulich
2021-01-18 11:55 ` [PATCH v4 2/5] xen/hypfs: add support for id-based dynamic directories Juergen Gross
2021-01-21 15:49 ` Jan Beulich
2021-01-18 11:55 ` [PATCH v4 3/5] xen/cpupool: add cpupool directories Juergen Gross
2021-01-18 11:55 ` Juergen Gross [this message]
2021-01-21 15:50 ` [PATCH v4 4/5] xen/cpupool: add scheduling granularity entry to cpupool entries Jan Beulich
2021-01-21 17:27 ` Dario Faggioli
2021-01-18 11:55 ` [PATCH v4 5/5] xen/cpupool: make per-cpupool sched-gran hypfs node writable Juergen Gross
2021-01-21 15:55 ` Jan Beulich
2021-01-21 16:10 ` Jürgen Groß
2021-01-21 17:27 ` Dario Faggioli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210118115516.11001-5-jgross@suse.com \
--to=jgross@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=dfaggioli@suse.com \
--cc=george.dunlap@citrix.com \
--cc=iwj@xenproject.org \
--cc=jbeulich@suse.com \
--cc=julien@xen.org \
--cc=sstabellini@kernel.org \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).