xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Ian Jackson <iwj@xenproject.org>, Jan Beulich <jbeulich@suse.com>,
	Julien Grall <julien@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>,
	Dario Faggioli <dfaggioli@suse.com>
Subject: [PATCH v2 17/17] xen/cpupool: make per-cpupool sched-gran hypfs node writable
Date: Tue,  1 Dec 2020 09:21:28 +0100	[thread overview]
Message-ID: <20201201082128.15239-18-jgross@suse.com> (raw)
In-Reply-To: <20201201082128.15239-1-jgross@suse.com>

Make /cpupool/<id>/sched-gran in hypfs writable. This will enable per
cpupool selectable scheduling granularity.

Writing this node is allowed only with no cpu assigned to the cpupool.
Allowed are values "cpu", "core" and "socket".

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- test user parameters earlier (Jan Beulich)
---
 docs/misc/hypfs-paths.pandoc |  5 ++-
 xen/common/sched/cpupool.c   | 70 ++++++++++++++++++++++++++++++------
 2 files changed, 63 insertions(+), 12 deletions(-)

diff --git a/docs/misc/hypfs-paths.pandoc b/docs/misc/hypfs-paths.pandoc
index f1ce24d7fe..e86f7d0dbe 100644
--- a/docs/misc/hypfs-paths.pandoc
+++ b/docs/misc/hypfs-paths.pandoc
@@ -184,10 +184,13 @@ A directory of all current cpupools.
 The individual cpupools. Each entry is a directory with the name being the
 cpupool-id (e.g. /cpupool/0/).
 
-#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket")
+#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket") [w]
 
 The scheduling granularity of a cpupool.
 
+Writing a value is allowed only for cpupools with no cpu assigned and if the
+architecture is supporting different scheduling granularities.
+
 #### /params/
 
 A directory of runtime parameters.
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index cfc75ccbe4..b1d9507978 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -77,7 +77,7 @@ static void sched_gran_print(enum sched_gran mode, unsigned int gran)
 }
 
 #ifdef CONFIG_HAS_SCHED_GRANULARITY
-static int __init sched_select_granularity(const char *str)
+static int sched_gran_get(const char *str, enum sched_gran *mode)
 {
     unsigned int i;
 
@@ -85,36 +85,43 @@ static int __init sched_select_granularity(const char *str)
     {
         if ( strcmp(sg_name[i].name, str) == 0 )
         {
-            opt_sched_granularity = sg_name[i].mode;
+            *mode = sg_name[i].mode;
             return 0;
         }
     }
 
     return -EINVAL;
 }
+
+static int __init sched_select_granularity(const char *str)
+{
+    return sched_gran_get(str, &opt_sched_granularity);
+}
 custom_param("sched-gran", sched_select_granularity);
+#else
+static int sched_gran_get(const char *str, enum sched_gran *mode)
+{
+    return -EINVAL;
+}
 #endif
 
-static unsigned int __init cpupool_check_granularity(void)
+static unsigned int cpupool_check_granularity(enum sched_gran mode)
 {
     unsigned int cpu;
     unsigned int siblings, gran = 0;
 
-    if ( opt_sched_granularity == SCHED_GRAN_cpu )
+    if ( mode == SCHED_GRAN_cpu )
         return 1;
 
     for_each_online_cpu ( cpu )
     {
-        siblings = cpumask_weight(sched_get_opt_cpumask(opt_sched_granularity,
-                                                        cpu));
+        siblings = cpumask_weight(sched_get_opt_cpumask(mode, cpu));
         if ( gran == 0 )
             gran = siblings;
         else if ( gran != siblings )
             return 0;
     }
 
-    sched_disable_smt_switching = true;
-
     return gran;
 }
 
@@ -126,7 +133,7 @@ static void __init cpupool_gran_init(void)
 
     while ( gran == 0 )
     {
-        gran = cpupool_check_granularity();
+        gran = cpupool_check_granularity(opt_sched_granularity);
 
         if ( gran == 0 )
         {
@@ -152,6 +159,9 @@ static void __init cpupool_gran_init(void)
     if ( fallback )
         warning_add(fallback);
 
+    if ( opt_sched_granularity != SCHED_GRAN_cpu )
+        sched_disable_smt_switching = true;
+
     sched_granularity = gran;
     sched_gran_print(opt_sched_granularity, sched_granularity);
 }
@@ -1145,17 +1155,55 @@ static unsigned int hypfs_gran_getsize(const struct hypfs_entry *entry)
     return strlen(gran) + 1;
 }
 
+static int cpupool_gran_write(struct hypfs_entry_leaf *leaf,
+                              XEN_GUEST_HANDLE_PARAM(void) uaddr,
+                              unsigned int ulen)
+{
+    const struct hypfs_dyndir_id *data;
+    struct cpupool *cpupool;
+    enum sched_gran gran;
+    unsigned int sched_gran = 0;
+    char name[SCHED_GRAN_NAME_LEN];
+    int ret = 0;
+
+    if ( ulen > SCHED_GRAN_NAME_LEN )
+        return -ENOSPC;
+
+    if ( copy_from_guest(name, uaddr, ulen) )
+        return -EFAULT;
+
+    if ( memchr(name, 0, ulen) == (name + ulen - 1) )
+        sched_gran = sched_gran_get(name, &gran) ?
+                     0 : cpupool_check_granularity(gran);
+    if ( sched_gran == 0 )
+        return -EINVAL;
+
+    data = hypfs_get_dyndata();
+    cpupool = __cpupool_find_by_id(data->id, true);
+    ASSERT(cpupool);
+
+    if ( !cpumask_empty(cpupool->cpu_valid) )
+        ret = -EBUSY;
+    else
+    {
+        cpupool->gran = gran;
+        cpupool->sched_gran = sched_gran;
+    }
+
+    return ret;
+}
+
 static struct hypfs_funcs cpupool_gran_funcs = {
     .enter = hypfs_node_enter,
     .exit = hypfs_node_exit,
     .read = cpupool_gran_read,
-    .write = hypfs_write_deny,
+    .write = cpupool_gran_write,
     .getsize = hypfs_gran_getsize,
     .findentry = hypfs_leaf_findentry,
 };
 
 static HYPFS_VARSIZE_INIT(cpupool_gran, XEN_HYPFS_TYPE_STRING, "sched-gran",
-                          0, &cpupool_gran_funcs);
+                          SCHED_GRAN_NAME_LEN, &cpupool_gran_funcs);
 static char granstr[SCHED_GRAN_NAME_LEN] = {
     [0 ... SCHED_GRAN_NAME_LEN - 2] = '?',
     [SCHED_GRAN_NAME_LEN - 1] = 0
-- 
2.26.2



  parent reply	other threads:[~2020-12-01  8:22 UTC|newest]

Thread overview: 73+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-01  8:21 [PATCH v2 00/17] xen: support per-cpupool scheduling granularity Juergen Gross
2020-12-01  8:21 ` [PATCH v2 01/17] xen/cpupool: add cpu to sched_res_mask when removing it from cpupool Juergen Gross
2020-12-01  8:21 ` [PATCH v2 02/17] xen/cpupool: add missing bits for per-cpupool scheduling granularity Juergen Gross
2020-12-01  8:21 ` [PATCH v2 03/17] xen/cpupool: sort included headers in cpupool.c Juergen Gross
2020-12-01  8:21 ` [PATCH v2 04/17] xen/cpupool: switch cpupool id to unsigned Juergen Gross
2020-12-01  8:55   ` Jan Beulich
2020-12-01  9:01     ` Jürgen Groß
2020-12-01  9:07       ` Jan Beulich
2020-12-07  9:59       ` Jan Beulich
2020-12-07 14:48         ` Jürgen Groß
2020-12-07 15:00           ` Jan Beulich
2020-12-04 15:52   ` Dario Faggioli
2020-12-07  9:58     ` Jan Beulich
2020-12-07 15:21     ` Jan Beulich
2020-12-01  8:21 ` [PATCH v2 05/17] xen/cpupool: switch cpupool list to normal list interface Juergen Gross
2020-12-01  9:12   ` Jan Beulich
2020-12-01  9:18     ` Jürgen Groß
2020-12-04 16:13       ` Dario Faggioli
2020-12-04 16:16         ` Jürgen Groß
2020-12-04 16:25           ` Dario Faggioli
2020-12-04 16:56   ` Dario Faggioli
2020-12-01  8:21 ` [PATCH v2 06/17] xen/cpupool: use ERR_PTR() for returning error cause from cpupool_create() Juergen Gross
2020-12-02  8:58   ` Jan Beulich
2020-12-02  9:56     ` Jürgen Groß
2020-12-02 10:46       ` Jan Beulich
2020-12-02 10:58         ` Jürgen Groß
2020-12-04 16:29   ` Dario Faggioli
2020-12-01  8:21 ` [PATCH v2 07/17] xen/cpupool: support moving domain between cpupools with different granularity Juergen Gross
2020-12-01  8:21 ` [PATCH v2 08/17] docs: fix hypfs path documentation Juergen Gross
2020-12-01  8:21 ` [PATCH v2 09/17] xen/hypfs: move per-node function pointers into a dedicated struct Juergen Gross
2020-12-02 15:36   ` Jan Beulich
2020-12-02 15:41     ` Jürgen Groß
2020-12-03  8:47     ` Jürgen Groß
2020-12-03  9:12       ` Jan Beulich
2020-12-03  9:51         ` Jürgen Groß
2020-12-01  8:21 ` [PATCH v2 10/17] xen/hypfs: pass real failure reason up from hypfs_get_entry() Juergen Gross
2020-12-01  8:21 ` [PATCH v2 11/17] xen/hypfs: add getsize() and findentry() callbacks to hypfs_funcs Juergen Gross
2020-12-02 15:42   ` Jan Beulich
2020-12-02 15:51     ` Jürgen Groß
2020-12-03  8:12       ` Jan Beulich
2020-12-03  9:39         ` Jürgen Groß
2020-12-04  8:58   ` Jan Beulich
2020-12-04 11:14     ` Jürgen Groß
2020-12-01  8:21 ` [PATCH v2 12/17] xen/hypfs: add new enter() and exit() per node callbacks Juergen Gross
2020-12-03 14:59   ` Jan Beulich
2020-12-03 15:14     ` Jürgen Groß
2020-12-03 15:29       ` Jan Beulich
2020-12-04  8:33         ` Jürgen Groß
2020-12-04  8:30   ` Jan Beulich
2020-12-04  8:35     ` Jürgen Groß
2020-12-01  8:21 ` [PATCH v2 13/17] xen/hypfs: support dynamic hypfs nodes Juergen Gross
2020-12-03 15:08   ` Jan Beulich
2020-12-03 15:18     ` Jürgen Groß
2020-12-01  8:21 ` [PATCH v2 14/17] xen/hypfs: add support for id-based dynamic directories Juergen Gross
2020-12-03 15:44   ` Jan Beulich
2020-12-04  8:52     ` Jürgen Groß
2020-12-04  9:16       ` Jan Beulich
2020-12-04 13:08         ` Jürgen Groß
2020-12-07  7:54           ` Jan Beulich
2020-12-01  8:21 ` [PATCH v2 15/17] xen/cpupool: add cpupool directories Juergen Gross
2020-12-01  9:00   ` Jan Beulich
2020-12-01  9:03     ` Jürgen Groß
2020-12-02 15:46   ` Jürgen Groß
2020-12-03 14:46     ` Jan Beulich
2020-12-03 15:11       ` Jürgen Groß
2020-12-04  9:10   ` Jan Beulich
2020-12-04 11:08     ` Jürgen Groß
2020-12-04 11:54       ` Jan Beulich
2020-12-01  8:21 ` [PATCH v2 16/17] xen/cpupool: add scheduling granularity entry to cpupool entries Juergen Gross
2020-12-01  8:21 ` Juergen Gross [this message]
2020-12-04 23:53 ` [PATCH v2 00/17] xen: support per-cpupool scheduling granularity Andrew Cooper
2020-12-05  7:41   ` Jürgen Groß
2020-12-07  9:00   ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201201082128.15239-18-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@citrix.com \
    --cc=iwj@xenproject.org \
    --cc=jbeulich@suse.com \
    --cc=julien@xen.org \
    --cc=sstabellini@kernel.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).