All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Ian Jackson <iwj@xenproject.org>, Jan Beulich <jbeulich@suse.com>,
	Julien Grall <julien@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>,
	Dario Faggioli <dfaggioli@suse.com>
Subject: [PATCH 11/12] xen/hypfs: add scheduling granularity entry to cpupool entries
Date: Mon, 26 Oct 2020 10:13:15 +0100	[thread overview]
Message-ID: <20201026091316.25680-12-jgross@suse.com> (raw)
In-Reply-To: <20201026091316.25680-1-jgross@suse.com>

Add a "sched-gran" entry to the per-cpupool hypfs directories.

For now make this entry read-only and let it contain one of the
strings "cpu", "core" or "socket".

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 docs/misc/hypfs-paths.pandoc |  4 +++
 xen/common/sched/cpupool.c   | 51 +++++++++++++++++++++++++++++++++---
 2 files changed, 52 insertions(+), 3 deletions(-)

diff --git a/docs/misc/hypfs-paths.pandoc b/docs/misc/hypfs-paths.pandoc
index aaca1cdf92..f1ce24d7fe 100644
--- a/docs/misc/hypfs-paths.pandoc
+++ b/docs/misc/hypfs-paths.pandoc
@@ -184,6 +184,10 @@ A directory of all current cpupools.
 The individual cpupools. Each entry is a directory with the name being the
 cpupool-id (e.g. /cpupool/0/).
 
+#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket")
+
+The scheduling granularity of a cpupool.
+
 #### /params/
 
 A directory of runtime parameters.
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 8612ee5cf6..8674ac0fdd 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -42,9 +42,10 @@ static DEFINE_SPINLOCK(cpupool_lock);
 static enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
 static unsigned int __read_mostly sched_granularity = 1;
 
+#define SCHED_GRAN_NAME_LEN  8
 struct sched_gran_name {
     enum sched_gran mode;
-    char name[8];
+    char name[SCHED_GRAN_NAME_LEN];
 };
 
 static const struct sched_gran_name sg_name[] = {
@@ -53,7 +54,7 @@ static const struct sched_gran_name sg_name[] = {
     {SCHED_GRAN_socket, "socket"},
 };
 
-static void sched_gran_print(enum sched_gran mode, unsigned int gran)
+static const char *sched_gran_get_name(enum sched_gran mode)
 {
     const char *name = "";
     unsigned int i;
@@ -67,8 +68,13 @@ static void sched_gran_print(enum sched_gran mode, unsigned int gran)
         }
     }
 
+    return name;
+}
+
+static void sched_gran_print(enum sched_gran mode, unsigned int gran)
+{
     printk("Scheduling granularity: %s, %u CPU%s per sched-resource\n",
-           name, gran, gran == 1 ? "" : "s");
+           sched_gran_get_name(mode), gran, gran == 1 ? "" : "s");
 }
 
 #ifdef CONFIG_HAS_SCHED_GRANULARITY
@@ -1057,6 +1063,43 @@ static struct hypfs_entry *cpupool_dir_findentry(struct hypfs_entry_dir *dir,
     return hypfs_gen_dyndir_entry_id(&cpupool_pooldir, id);
 }
 
+static int cpupool_gran_read(const struct hypfs_entry *entry,
+                             XEN_GUEST_HANDLE_PARAM(void) uaddr)
+{
+    const struct hypfs_dyndir_id *data;
+    struct cpupool *cpupool;
+    const char *name = "";
+
+    data = hypfs_get_dyndata();
+    if ( !data )
+        return -ENOENT;
+
+    spin_lock(&cpupool_lock);
+
+    cpupool = __cpupool_find_by_id(data->id, true);
+    if ( cpupool )
+        name = sched_gran_get_name(cpupool->gran);
+
+    spin_unlock(&cpupool_lock);
+
+    if ( !cpupool )
+        return -ENOENT;
+
+    return copy_to_guest(uaddr, name, strlen(name) + 1) ? -EFAULT : 0;
+}
+
+static struct hypfs_funcs cpupool_gran_funcs = {
+    .read = cpupool_gran_read,
+    .getsize = hypfs_getsize,
+};
+
+static HYPFS_VARSIZE_INIT(cpupool_gran, XEN_HYPFS_TYPE_STRING, "sched-gran",
+                          0, &cpupool_gran_funcs);
+static char granstr[SCHED_GRAN_NAME_LEN] = {
+    [0 ... SCHED_GRAN_NAME_LEN - 2] = '?',
+    [SCHED_GRAN_NAME_LEN - 1] = 0
+};
+
 static struct hypfs_funcs cpupool_dir_funcs = {
     .read = cpupool_dir_read,
     .getsize = cpupool_dir_getsize,
@@ -1075,6 +1118,8 @@ static int __init cpupool_init(void)
 
 #ifdef CONFIG_HYPFS
     hypfs_add_dir(&hypfs_root, &cpupool_dir, true);
+    hypfs_string_set_reference(&cpupool_gran, granstr);
+    hypfs_add_leaf(&cpupool_pooldir, &cpupool_gran, true);
 #endif
 
     cpupool0 = cpupool_create(0, 0, &err);
-- 
2.26.2



  parent reply	other threads:[~2020-10-26  9:13 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-26  9:13 [PATCH 00/12] xen: support per-cpupool scheduling granularity Juergen Gross
2020-10-26  9:13 ` [PATCH 01/12] xen/cpupool: add cpu to sched_res_mask when removing it from cpupool Juergen Gross
2020-11-11 14:32   ` Dario Faggioli
2020-11-11 14:43     ` Jürgen Groß
2020-10-26  9:13 ` [PATCH 02/12] xen/cpupool: add missing bits for per-cpupool scheduling granularity Juergen Gross
2020-11-11 14:38   ` Dario Faggioli
2020-10-26  9:13 ` [PATCH 03/12] xen/sched: support moving a domain between cpupools with different granularity Juergen Gross
2020-10-26  9:13 ` [PATCH 04/12] xen/sched: sort included headers in cpupool.c Juergen Gross
2020-11-11 14:40   ` Dario Faggioli
2020-10-26  9:13 ` [PATCH 05/12] docs: fix hypfs path documentation Juergen Gross
2020-10-26  9:36   ` Jan Beulich
2020-10-26  9:13 ` [PATCH 06/12] xen/hypfs: move per-node function pointers into a dedicated struct Juergen Gross
2020-11-17 11:18   ` Jan Beulich
2020-11-17 14:19     ` Jürgen Groß
2020-10-26  9:13 ` [PATCH 07/12] xen/hypfs: pass real failure reason up from hypfs_get_entry() Juergen Gross
2020-11-17 11:23   ` Jan Beulich
2020-10-26  9:13 ` [PATCH 08/12] xen/hypfs: support dynamic hypfs nodes Juergen Gross
2020-11-17 12:37   ` Jan Beulich
2020-11-17 14:29     ` Jürgen Groß
2020-11-17 14:40       ` Jan Beulich
2020-11-17 15:07         ` Jürgen Groß
2020-10-26  9:13 ` [PATCH 09/12] xen/hypfs: add support for id-based dynamic directories Juergen Gross
2020-11-17 13:33   ` Jan Beulich
2020-11-17 14:38     ` Jürgen Groß
2020-11-17 14:50       ` Jan Beulich
2020-11-17 15:15         ` Jürgen Groß
2020-10-26  9:13 ` [PATCH 10/12] xen/hypfs: add cpupool directories Juergen Gross
2020-11-11 14:51   ` Dario Faggioli
2020-11-11 14:56     ` Jan Beulich
2020-11-11 15:00       ` Jürgen Groß
2020-11-11 15:11         ` Dario Faggioli
2020-11-11 14:56     ` Jürgen Groß
2020-11-11 14:58       ` Dario Faggioli
2020-11-17 14:13   ` Jan Beulich
2020-11-17 15:01     ` Jürgen Groß
2020-10-26  9:13 ` Juergen Gross [this message]
2020-11-11 15:21   ` [PATCH 11/12] xen/hypfs: add scheduling granularity entry to cpupool entries Dario Faggioli
2020-11-17 16:49   ` Jan Beulich
2020-11-17 17:05     ` Jürgen Groß
2020-10-26  9:13 ` [PATCH 12/12] xen/cpupool: make per-cpupool sched-gran hypfs node writable Juergen Gross
2020-10-29 14:58   ` Jan Beulich
2020-10-29 14:59     ` Jürgen Groß

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201026091316.25680-12-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@citrix.com \
    --cc=iwj@xenproject.org \
    --cc=jbeulich@suse.com \
    --cc=julien@xen.org \
    --cc=sstabellini@kernel.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.