From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
George Dunlap <george.dunlap@citrix.com>,
Dario Faggioli <dfaggioli@suse.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Ian Jackson <iwj@xenproject.org>, Jan Beulich <jbeulich@suse.com>,
Julien Grall <julien@xen.org>,
Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>
Subject: [PATCH v2 04/17] xen/cpupool: switch cpupool id to unsigned
Date: Tue, 1 Dec 2020 09:21:15 +0100 [thread overview]
Message-ID: <20201201082128.15239-5-jgross@suse.com> (raw)
In-Reply-To: <20201201082128.15239-1-jgross@suse.com>
The cpupool id is an unsigned value in the public interface header, so
there is no reason why it is a signed value in struct cpupool.
Switch it to unsigned int.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- new patch
---
xen/common/sched/core.c | 2 +-
xen/common/sched/cpupool.c | 48 +++++++++++++++++++-------------------
xen/common/sched/private.h | 8 +++----
xen/include/xen/sched.h | 4 ++--
4 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index f8c81592af..6063f6d9ea 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -757,7 +757,7 @@ void sched_destroy_vcpu(struct vcpu *v)
}
}
-int sched_init_domain(struct domain *d, int poolid)
+int sched_init_domain(struct domain *d, unsigned int poolid)
{
void *sdom;
int ret;
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 84f326ea63..01fa71dd00 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -187,7 +187,7 @@ static struct cpupool *alloc_cpupool_struct(void)
* the searched id is returned
* returns NULL if not found.
*/
-static struct cpupool *__cpupool_find_by_id(int id, bool exact)
+static struct cpupool *__cpupool_find_by_id(unsigned int id, bool exact)
{
struct cpupool **q;
@@ -200,12 +200,12 @@ static struct cpupool *__cpupool_find_by_id(int id, bool exact)
return (!exact || (*q == NULL) || ((*q)->cpupool_id == id)) ? *q : NULL;
}
-static struct cpupool *cpupool_find_by_id(int poolid)
+static struct cpupool *cpupool_find_by_id(unsigned int poolid)
{
return __cpupool_find_by_id(poolid, true);
}
-static struct cpupool *__cpupool_get_by_id(int poolid, bool exact)
+static struct cpupool *__cpupool_get_by_id(unsigned int poolid, bool exact)
{
struct cpupool *c;
spin_lock(&cpupool_lock);
@@ -216,12 +216,12 @@ static struct cpupool *__cpupool_get_by_id(int poolid, bool exact)
return c;
}
-struct cpupool *cpupool_get_by_id(int poolid)
+struct cpupool *cpupool_get_by_id(unsigned int poolid)
{
return __cpupool_get_by_id(poolid, true);
}
-static struct cpupool *cpupool_get_next_by_id(int poolid)
+static struct cpupool *cpupool_get_next_by_id(unsigned int poolid)
{
return __cpupool_get_by_id(poolid, false);
}
@@ -243,11 +243,11 @@ void cpupool_put(struct cpupool *pool)
* - unknown scheduler
*/
static struct cpupool *cpupool_create(
- int poolid, unsigned int sched_id, int *perr)
+ unsigned int poolid, unsigned int sched_id, int *perr)
{
struct cpupool *c;
struct cpupool **q;
- int last = 0;
+ unsigned int last = 0;
*perr = -ENOMEM;
if ( (c = alloc_cpupool_struct()) == NULL )
@@ -256,7 +256,7 @@ static struct cpupool *cpupool_create(
/* One reference for caller, one reference for cpupool_destroy(). */
atomic_set(&c->refcnt, 2);
- debugtrace_printk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
+ debugtrace_printk("cpupool_create(pool=%u,sched=%u)\n", poolid, sched_id);
spin_lock(&cpupool_lock);
@@ -295,7 +295,7 @@ static struct cpupool *cpupool_create(
spin_unlock(&cpupool_lock);
- debugtrace_printk("Created cpupool %d with scheduler %s (%s)\n",
+ debugtrace_printk("Created cpupool %u with scheduler %s (%s)\n",
c->cpupool_id, c->sched->name, c->sched->opt_name);
*perr = 0;
@@ -337,7 +337,7 @@ static int cpupool_destroy(struct cpupool *c)
cpupool_put(c);
- debugtrace_printk("cpupool_destroy(pool=%d)\n", c->cpupool_id);
+ debugtrace_printk("cpupool_destroy(pool=%u)\n", c->cpupool_id);
return 0;
}
@@ -521,7 +521,7 @@ static long cpupool_unassign_cpu_helper(void *info)
struct cpupool *c = info;
long ret;
- debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+ debugtrace_printk("cpupool_unassign_cpu(pool=%u,cpu=%d)\n",
cpupool_cpu_moving->cpupool_id, cpupool_moving_cpu);
spin_lock(&cpupool_lock);
@@ -551,7 +551,7 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
int ret;
unsigned int master_cpu;
- debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+ debugtrace_printk("cpupool_unassign_cpu(pool=%u,cpu=%d)\n",
c->cpupool_id, cpu);
if ( !cpu_online(cpu) )
@@ -561,7 +561,7 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
ret = cpupool_unassign_cpu_start(c, master_cpu);
if ( ret )
{
- debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
+ debugtrace_printk("cpupool_unassign_cpu(pool=%u,cpu=%d) ret %d\n",
c->cpupool_id, cpu, ret);
return ret;
}
@@ -582,7 +582,7 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
* - pool does not exist
* - no cpu assigned to pool
*/
-int cpupool_add_domain(struct domain *d, int poolid)
+int cpupool_add_domain(struct domain *d, unsigned int poolid)
{
struct cpupool *c;
int rc;
@@ -604,7 +604,7 @@ int cpupool_add_domain(struct domain *d, int poolid)
rc = 0;
}
spin_unlock(&cpupool_lock);
- debugtrace_printk("cpupool_add_domain(dom=%d,pool=%d) n_dom %d rc %d\n",
+ debugtrace_printk("cpupool_add_domain(dom=%d,pool=%u) n_dom %d rc %d\n",
d->domain_id, poolid, n_dom, rc);
return rc;
}
@@ -614,7 +614,7 @@ int cpupool_add_domain(struct domain *d, int poolid)
*/
void cpupool_rm_domain(struct domain *d)
{
- int cpupool_id;
+ unsigned int cpupool_id;
int n_dom;
if ( d->cpupool == NULL )
@@ -625,7 +625,7 @@ void cpupool_rm_domain(struct domain *d)
n_dom = d->cpupool->n_dom;
d->cpupool = NULL;
spin_unlock(&cpupool_lock);
- debugtrace_printk("cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
+ debugtrace_printk("cpupool_rm_domain(dom=%d,pool=%u) n_dom %d\n",
d->domain_id, cpupool_id, n_dom);
return;
}
@@ -767,7 +767,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
case XEN_SYSCTL_CPUPOOL_OP_CREATE:
{
- int poolid;
+ unsigned int poolid;
poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
CPUPOOLID_NONE: op->cpupool_id;
@@ -811,7 +811,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
const cpumask_t *cpus;
cpu = op->cpu;
- debugtrace_printk("cpupool_assign_cpu(pool=%d,cpu=%d)\n",
+ debugtrace_printk("cpupool_assign_cpu(pool=%u,cpu=%u)\n",
op->cpupool_id, cpu);
spin_lock(&cpupool_lock);
@@ -844,7 +844,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
addcpu_out:
spin_unlock(&cpupool_lock);
- debugtrace_printk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
+ debugtrace_printk("cpupool_assign_cpu(pool=%u,cpu=%u) ret %d\n",
op->cpupool_id, cpu, ret);
}
@@ -885,7 +885,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
rcu_unlock_domain(d);
break;
}
- debugtrace_printk("cpupool move_domain(dom=%d)->pool=%d\n",
+ debugtrace_printk("cpupool move_domain(dom=%d)->pool=%u\n",
d->domain_id, op->cpupool_id);
ret = -ENOENT;
spin_lock(&cpupool_lock);
@@ -895,7 +895,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
ret = cpupool_move_domain_locked(d, c);
spin_unlock(&cpupool_lock);
- debugtrace_printk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
+ debugtrace_printk("cpupool move_domain(dom=%d)->pool=%u ret %d\n",
d->domain_id, op->cpupool_id, ret);
rcu_unlock_domain(d);
}
@@ -916,7 +916,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
return ret;
}
-int cpupool_get_id(const struct domain *d)
+unsigned int cpupool_get_id(const struct domain *d)
{
return d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
}
@@ -946,7 +946,7 @@ void dump_runq(unsigned char key)
for_each_cpupool(c)
{
- printk("Cpupool %d:\n", (*c)->cpupool_id);
+ printk("Cpupool %u:\n", (*c)->cpupool_id);
printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid));
sched_gran_print((*c)->gran, cpupool_get_granularity(*c));
schedule_dump(*c);
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index 685992cab9..e69d9be1e8 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -505,8 +505,8 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
struct cpupool
{
- int cpupool_id;
-#define CPUPOOLID_NONE (-1)
+ unsigned int cpupool_id;
+#define CPUPOOLID_NONE (~0U)
unsigned int n_dom;
cpumask_var_t cpu_valid; /* all cpus assigned to pool */
cpumask_var_t res_valid; /* all scheduling resources of pool */
@@ -601,9 +601,9 @@ int cpu_disable_scheduler(unsigned int cpu);
int schedule_cpu_add(unsigned int cpu, struct cpupool *c);
int schedule_cpu_rm(unsigned int cpu);
int sched_move_domain(struct domain *d, struct cpupool *c);
-struct cpupool *cpupool_get_by_id(int poolid);
+struct cpupool *cpupool_get_by_id(unsigned int poolid);
void cpupool_put(struct cpupool *pool);
-int cpupool_add_domain(struct domain *d, int poolid);
+int cpupool_add_domain(struct domain *d, unsigned int poolid);
void cpupool_rm_domain(struct domain *d);
#endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index a345cc01f8..b2878e7b2a 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -691,7 +691,7 @@ void noreturn asm_domain_crash_synchronous(unsigned long addr);
void scheduler_init(void);
int sched_init_vcpu(struct vcpu *v);
void sched_destroy_vcpu(struct vcpu *v);
-int sched_init_domain(struct domain *d, int poolid);
+int sched_init_domain(struct domain *d, unsigned int poolid);
void sched_destroy_domain(struct domain *d);
long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
long sched_adjust_global(struct xen_sysctl_scheduler_op *);
@@ -1089,7 +1089,7 @@ static always_inline bool is_cpufreq_controller(const struct domain *d)
int cpupool_move_domain(struct domain *d, struct cpupool *c);
int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
-int cpupool_get_id(const struct domain *d);
+unsigned int cpupool_get_id(const struct domain *d);
const cpumask_t *cpupool_valid_cpus(const struct cpupool *pool);
extern void dump_runq(unsigned char key);
--
2.26.2
next prev parent reply other threads:[~2020-12-01 8:21 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-01 8:21 [PATCH v2 00/17] xen: support per-cpupool scheduling granularity Juergen Gross
2020-12-01 8:21 ` [PATCH v2 01/17] xen/cpupool: add cpu to sched_res_mask when removing it from cpupool Juergen Gross
2020-12-01 8:21 ` [PATCH v2 02/17] xen/cpupool: add missing bits for per-cpupool scheduling granularity Juergen Gross
2020-12-01 8:21 ` [PATCH v2 03/17] xen/cpupool: sort included headers in cpupool.c Juergen Gross
2020-12-01 8:21 ` Juergen Gross [this message]
2020-12-01 8:55 ` [PATCH v2 04/17] xen/cpupool: switch cpupool id to unsigned Jan Beulich
2020-12-01 9:01 ` Jürgen Groß
2020-12-01 9:07 ` Jan Beulich
2020-12-07 9:59 ` Jan Beulich
2020-12-07 14:48 ` Jürgen Groß
2020-12-07 15:00 ` Jan Beulich
2020-12-04 15:52 ` Dario Faggioli
2020-12-07 9:58 ` Jan Beulich
2020-12-07 15:21 ` Jan Beulich
2020-12-01 8:21 ` [PATCH v2 05/17] xen/cpupool: switch cpupool list to normal list interface Juergen Gross
2020-12-01 9:12 ` Jan Beulich
2020-12-01 9:18 ` Jürgen Groß
2020-12-04 16:13 ` Dario Faggioli
2020-12-04 16:16 ` Jürgen Groß
2020-12-04 16:25 ` Dario Faggioli
2020-12-04 16:56 ` Dario Faggioli
2020-12-01 8:21 ` [PATCH v2 06/17] xen/cpupool: use ERR_PTR() for returning error cause from cpupool_create() Juergen Gross
2020-12-02 8:58 ` Jan Beulich
2020-12-02 9:56 ` Jürgen Groß
2020-12-02 10:46 ` Jan Beulich
2020-12-02 10:58 ` Jürgen Groß
2020-12-04 16:29 ` Dario Faggioli
2020-12-01 8:21 ` [PATCH v2 07/17] xen/cpupool: support moving domain between cpupools with different granularity Juergen Gross
2020-12-01 8:21 ` [PATCH v2 08/17] docs: fix hypfs path documentation Juergen Gross
2020-12-01 8:21 ` [PATCH v2 09/17] xen/hypfs: move per-node function pointers into a dedicated struct Juergen Gross
2020-12-02 15:36 ` Jan Beulich
2020-12-02 15:41 ` Jürgen Groß
2020-12-03 8:47 ` Jürgen Groß
2020-12-03 9:12 ` Jan Beulich
2020-12-03 9:51 ` Jürgen Groß
2020-12-01 8:21 ` [PATCH v2 10/17] xen/hypfs: pass real failure reason up from hypfs_get_entry() Juergen Gross
2020-12-01 8:21 ` [PATCH v2 11/17] xen/hypfs: add getsize() and findentry() callbacks to hypfs_funcs Juergen Gross
2020-12-02 15:42 ` Jan Beulich
2020-12-02 15:51 ` Jürgen Groß
2020-12-03 8:12 ` Jan Beulich
2020-12-03 9:39 ` Jürgen Groß
2020-12-04 8:58 ` Jan Beulich
2020-12-04 11:14 ` Jürgen Groß
2020-12-01 8:21 ` [PATCH v2 12/17] xen/hypfs: add new enter() and exit() per node callbacks Juergen Gross
2020-12-03 14:59 ` Jan Beulich
2020-12-03 15:14 ` Jürgen Groß
2020-12-03 15:29 ` Jan Beulich
2020-12-04 8:33 ` Jürgen Groß
2020-12-04 8:30 ` Jan Beulich
2020-12-04 8:35 ` Jürgen Groß
2020-12-01 8:21 ` [PATCH v2 13/17] xen/hypfs: support dynamic hypfs nodes Juergen Gross
2020-12-03 15:08 ` Jan Beulich
2020-12-03 15:18 ` Jürgen Groß
2020-12-01 8:21 ` [PATCH v2 14/17] xen/hypfs: add support for id-based dynamic directories Juergen Gross
2020-12-03 15:44 ` Jan Beulich
2020-12-04 8:52 ` Jürgen Groß
2020-12-04 9:16 ` Jan Beulich
2020-12-04 13:08 ` Jürgen Groß
2020-12-07 7:54 ` Jan Beulich
2020-12-01 8:21 ` [PATCH v2 15/17] xen/cpupool: add cpupool directories Juergen Gross
2020-12-01 9:00 ` Jan Beulich
2020-12-01 9:03 ` Jürgen Groß
2020-12-02 15:46 ` Jürgen Groß
2020-12-03 14:46 ` Jan Beulich
2020-12-03 15:11 ` Jürgen Groß
2020-12-04 9:10 ` Jan Beulich
2020-12-04 11:08 ` Jürgen Groß
2020-12-04 11:54 ` Jan Beulich
2020-12-01 8:21 ` [PATCH v2 16/17] xen/cpupool: add scheduling granularity entry to cpupool entries Juergen Gross
2020-12-01 8:21 ` [PATCH v2 17/17] xen/cpupool: make per-cpupool sched-gran hypfs node writable Juergen Gross
2020-12-04 23:53 ` [PATCH v2 00/17] xen: support per-cpupool scheduling granularity Andrew Cooper
2020-12-05 7:41 ` Jürgen Groß
2020-12-07 9:00 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201201082128.15239-5-jgross@suse.com \
--to=jgross@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=dfaggioli@suse.com \
--cc=george.dunlap@citrix.com \
--cc=iwj@xenproject.org \
--cc=jbeulich@suse.com \
--cc=julien@xen.org \
--cc=sstabellini@kernel.org \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).