xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>, Dario Faggioli <dfaggioli@suse.com>
Subject: [Xen-devel] [PATCH v4 4/4] xen/sched: switch to debugtrace in cpupool handling
Date: Sat, 14 Sep 2019 07:19:44 +0200	[thread overview]
Message-ID: <20190914051944.21409-5-jgross@suse.com> (raw)
In-Reply-To: <20190914051944.21409-1-jgross@suse.com>

Instead of having a cpupool_dprintk() define just use debugtrace.

Signed-off-by: Juergen Gross <jgross@suse.com>
Acked-by: Dario Faggioli <dfaggioli@suse.com>
---
 xen/common/cpupool.c | 48 +++++++++++++++++++++++-------------------------
 1 file changed, 23 insertions(+), 25 deletions(-)

diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index dcdf18ee08..fd30040922 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -36,8 +36,6 @@ static DEFINE_SPINLOCK(cpupool_lock);
 
 DEFINE_PER_CPU(struct cpupool *, cpupool);
 
-#define cpupool_dprintk(x...) ((void)0)
-
 static struct cpupool *alloc_cpupool_struct(void)
 {
     struct cpupool *c = xzalloc(struct cpupool);
@@ -133,7 +131,7 @@ static struct cpupool *cpupool_create(
     /* One reference for caller, one reference for cpupool_destroy(). */
     atomic_set(&c->refcnt, 2);
 
-    cpupool_dprintk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
+    debugtrace_printk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
 
     spin_lock(&cpupool_lock);
 
@@ -175,8 +173,8 @@ static struct cpupool *cpupool_create(
 
     spin_unlock(&cpupool_lock);
 
-    cpupool_dprintk("Created cpupool %d with scheduler %s (%s)\n",
-                    c->cpupool_id, c->sched->name, c->sched->opt_name);
+    debugtrace_printk("Created cpupool %d with scheduler %s (%s)\n",
+                      c->cpupool_id, c->sched->name, c->sched->opt_name);
 
     *perr = 0;
     return c;
@@ -212,7 +210,7 @@ static int cpupool_destroy(struct cpupool *c)
 
     cpupool_put(c);
 
-    cpupool_dprintk("cpupool_destroy(pool=%d)\n", c->cpupool_id);
+    debugtrace_printk("cpupool_destroy(pool=%d)\n", c->cpupool_id);
     return 0;
 }
 
@@ -375,14 +373,14 @@ static long cpupool_unassign_cpu_helper(void *info)
     struct cpupool *c = info;
     long ret;
 
-    cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
-                    cpupool_cpu_moving->cpupool_id, cpupool_moving_cpu);
+    debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+                      cpupool_cpu_moving->cpupool_id, cpupool_moving_cpu);
     spin_lock(&cpupool_lock);
 
     ret = cpupool_unassign_cpu_finish(c);
 
     spin_unlock(&cpupool_lock);
-    cpupool_dprintk("cpupool_unassign_cpu ret=%ld\n", ret);
+    debugtrace_printk("cpupool_unassign_cpu ret=%ld\n", ret);
 
     return ret;
 }
@@ -404,14 +402,14 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
     int work_cpu;
     int ret;
 
-    cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
-                    c->cpupool_id, cpu);
+    debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+                      c->cpupool_id, cpu);
 
     ret = cpupool_unassign_cpu_start(c, cpu);
     if ( ret )
     {
-        cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
-                        c->cpupool_id, cpu, ret);
+        debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
+                          c->cpupool_id, cpu, ret);
         return ret;
     }
 
@@ -453,8 +451,8 @@ int cpupool_add_domain(struct domain *d, int poolid)
         rc = 0;
     }
     spin_unlock(&cpupool_lock);
-    cpupool_dprintk("cpupool_add_domain(dom=%d,pool=%d) n_dom %d rc %d\n",
-                    d->domain_id, poolid, n_dom, rc);
+    debugtrace_printk("cpupool_add_domain(dom=%d,pool=%d) n_dom %d rc %d\n",
+                      d->domain_id, poolid, n_dom, rc);
     return rc;
 }
 
@@ -474,8 +472,8 @@ void cpupool_rm_domain(struct domain *d)
     n_dom = d->cpupool->n_dom;
     d->cpupool = NULL;
     spin_unlock(&cpupool_lock);
-    cpupool_dprintk("cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
-                    d->domain_id, cpupool_id, n_dom);
+    debugtrace_printk("cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
+                      d->domain_id, cpupool_id, n_dom);
     return;
 }
 
@@ -642,8 +640,8 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
         unsigned cpu;
 
         cpu = op->cpu;
-        cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n",
-                        op->cpupool_id, cpu);
+        debugtrace_printk("cpupool_assign_cpu(pool=%d,cpu=%d)\n",
+                          op->cpupool_id, cpu);
         spin_lock(&cpupool_lock);
         if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
             cpu = cpumask_first(&cpupool_free_cpus);
@@ -661,8 +659,8 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
         ret = cpupool_assign_cpu_locked(c, cpu);
     addcpu_out:
         spin_unlock(&cpupool_lock);
-        cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
-                        op->cpupool_id, cpu, ret);
+        debugtrace_printk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
+                          op->cpupool_id, cpu, ret);
     }
     break;
 
@@ -701,8 +699,8 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
             rcu_unlock_domain(d);
             break;
         }
-        cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d\n",
-                        d->domain_id, op->cpupool_id);
+        debugtrace_printk("cpupool move_domain(dom=%d)->pool=%d\n",
+                          d->domain_id, op->cpupool_id);
         ret = -ENOENT;
         spin_lock(&cpupool_lock);
 
@@ -711,8 +709,8 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
             ret = cpupool_move_domain_locked(d, c);
 
         spin_unlock(&cpupool_lock);
-        cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
-                        d->domain_id, op->cpupool_id, ret);
+        debugtrace_printk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
+                          d->domain_id, op->cpupool_id, ret);
         rcu_unlock_domain(d);
     }
     break;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

      parent reply	other threads:[~2019-09-14  5:20 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-14  5:19 [Xen-devel] [PATCH v4 0/4] xen/sched: use new idle scheduler for free cpus Juergen Gross
2019-09-14  5:19 ` [Xen-devel] [PATCH v4 1/4] xen/sched: populate cpupool0 only after all cpus are up Juergen Gross
2019-09-14  5:19 ` [Xen-devel] [PATCH v4 2/4] xen/sched: remove cpu from pool0 before removing it Juergen Gross
2019-09-24 14:14   ` Dario Faggioli
2019-09-14  5:19 ` [Xen-devel] [PATCH v4 3/4] xen/sched: add minimalistic idle scheduler for free cpus Juergen Gross
2019-09-14  5:19 ` Juergen Gross [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190914051944.21409-5-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=dfaggioli@suse.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).