From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Anshul Makkar <anshul.makkar@citrix.com>,
George Dunlap <george.dunlap@citrix.com>,
David Vrabel <david.vrabel@citrix.com>
Subject: [PATCH 13/19] xen: credit2: make the code less experimental
Date: Sat, 18 Jun 2016 01:12:51 +0200 [thread overview]
Message-ID: <146620517107.29766.17643295412098867216.stgit@Solace.fritz.box> (raw)
In-Reply-To: <146620492155.29766.10321123657058307698.stgit@Solace.fritz.box>
Mainly, almost all of the BUG_ON-s can be converted into
ASSERTS, and the debug printk either removed or turned
into tracing.
The 'TODO' list, in a comment at the beginning of the file,
was also stale, so remove items that were still there but
are actually done.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Cc: George Dunlap <george.dunlap@citrix.com>
Cc: Anshul Makkar <anshul.makkar@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
---
xen/common/sched_credit2.c | 244 +++++++++++++++++++++++---------------------
1 file changed, 126 insertions(+), 118 deletions(-)
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 2ca63ae..ba3a78a 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -27,9 +27,6 @@
#include <xen/cpu.h>
#include <xen/keyhandler.h>
-#define d2printk(x...)
-//#define d2printk printk
-
/*
* Credit2 tracing events ("only" 512 available!). Check
* include/public/trace.h for more details.
@@ -46,16 +43,16 @@
#define TRC_CSCHED2_RUNQ_ASSIGN TRC_SCHED_CLASS_EVT(CSCHED2, 10)
#define TRC_CSCHED2_UPDATE_VCPU_LOAD TRC_SCHED_CLASS_EVT(CSCHED2, 11)
#define TRC_CSCHED2_UPDATE_RUNQ_LOAD TRC_SCHED_CLASS_EVT(CSCHED2, 12)
+#define TRC_CSCHED2_TICKLE_NEW TRC_SCHED_CLASS_EVT(CSCHED2, 13)
+#define TRC_CSCHED2_RUNQ_MAX_WEIGHT TRC_SCHED_CLASS_EVT(CSCHED2, 14)
+#define TRC_CSCHED2_MIGRATE TRC_SCHED_CLASS_EVT(CSCHED2, 15)
/*
* WARNING: This is still in an experimental phase. Status and work can be found at the
* credit2 wiki page:
* http://wiki.xen.org/wiki/Credit2_Scheduler_Development
+ *
* TODO:
- * + Multiple sockets
- * - Simple load balancer / runqueue assignment
- * - Runqueue load measurement
- * - Load-based load balancer
* + Hyperthreading
* - Look for non-busy core if possible
* - "Discount" time run on a thread with busy siblings
@@ -608,8 +605,8 @@ __update_runq_load(const struct scheduler *ops,
delta = now - rqd->load_last_update;
if ( unlikely(delta < 0) )
{
- d2printk("%s: Time went backwards? now %"PRI_stime" llu %"PRI_stime"\n",
- __func__, now, rqd->load_last_update);
+ printk("WARNING: %s: Time went backwards? now %"PRI_stime" llu %"PRI_stime"\n",
+ __func__, now, rqd->load_last_update);
delta = 0;
}
@@ -680,8 +677,8 @@ __update_svc_load(const struct scheduler *ops,
delta = now - svc->load_last_update;
if ( unlikely(delta < 0) )
{
- d2printk("%s: Time went backwards? now %"PRI_stime" llu %"PRI_stime"\n",
- __func__, now, svc->load_last_update);
+ printk("WARNING: %s: Time went backwards? now %"PRI_stime" llu %"PRI_stime"\n",
+ __func__, now, svc->load_last_update);
delta = 0;
}
@@ -723,23 +720,18 @@ __runq_insert(struct list_head *runq, struct csched2_vcpu *svc)
struct list_head *iter;
int pos = 0;
- d2printk("rqi %pv\n", svc->vcpu);
-
- BUG_ON(&svc->rqd->runq != runq);
- /* Idle vcpus not allowed on the runqueue anymore */
- BUG_ON(is_idle_vcpu(svc->vcpu));
- BUG_ON(svc->vcpu->is_running);
- BUG_ON(svc->flags & CSFLAG_scheduled);
+ ASSERT(&svc->rqd->runq == runq);
+ ASSERT(!is_idle_vcpu(svc->vcpu));
+ ASSERT(!svc->vcpu->is_running);
+ ASSERT(!(svc->flags & CSFLAG_scheduled));
list_for_each( iter, runq )
{
struct csched2_vcpu * iter_svc = __runq_elem(iter);
if ( svc->credit > iter_svc->credit )
- {
- d2printk(" p%d %pv\n", pos, iter_svc->vcpu);
break;
- }
+
pos++;
}
@@ -755,10 +747,10 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
struct list_head * runq = &RQD(ops, cpu)->runq;
int pos = 0;
- ASSERT( spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock) );
+ ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
- BUG_ON( __vcpu_on_runq(svc) );
- BUG_ON( c2r(ops, cpu) != c2r(ops, svc->vcpu->processor) );
+ ASSERT(!__vcpu_on_runq(svc));
+ ASSERT(c2r(ops, cpu) == c2r(ops, svc->vcpu->processor));
pos = __runq_insert(runq, svc);
@@ -781,7 +773,7 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
static inline void
__runq_remove(struct csched2_vcpu *svc)
{
- BUG_ON( !__vcpu_on_runq(svc) );
+ ASSERT(__vcpu_on_runq(svc));
list_del_init(&svc->runq_elem);
}
@@ -806,16 +798,29 @@ void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, s_ti
static void
runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
{
- int i, ipid=-1;
- s_time_t lowest=(1<<30);
+ int i, ipid = -1;
+ s_time_t lowest = (1<<30);
unsigned int cpu = new->vcpu->processor;
struct csched2_runqueue_data *rqd = RQD(ops, cpu);
cpumask_t mask;
struct csched2_vcpu * cur;
- d2printk("rqt %pv curr %pv\n", new->vcpu, current);
+ ASSERT(new->rqd == rqd);
- BUG_ON(new->rqd != rqd);
+ /* TRACE */
+ {
+ struct {
+ unsigned vcpu:16, dom:16;
+ unsigned processor, credit;
+ } d;
+ d.dom = new->vcpu->domain->domain_id;
+ d.vcpu = new->vcpu->vcpu_id;
+ d.processor = new->vcpu->processor;
+ d.credit = new->credit;
+ trace_var(TRC_CSCHED2_TICKLE_NEW, 1,
+ sizeof(d),
+ (unsigned char *)&d);
+ }
/*
* Get a mask of idle, but not tickled, processors that new is
@@ -861,7 +866,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
cur = CSCHED2_VCPU(curr_on_cpu(i));
- BUG_ON(is_idle_vcpu(cur->vcpu));
+ ASSERT(!is_idle_vcpu(cur->vcpu));
/* Update credits for current to see if we want to preempt. */
burn_credits(rqd, cur, now);
@@ -951,8 +956,8 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
- BUG_ON( is_idle_vcpu(svc->vcpu) );
- BUG_ON( svc->rqd != rqd );
+ ASSERT(!is_idle_vcpu(svc->vcpu));
+ ASSERT(svc->rqd == rqd);
start_credit = svc->credit;
@@ -996,12 +1001,11 @@ void burn_credits(struct csched2_runqueue_data *rqd,
{
s_time_t delta;
- /* Assert svc is current */
- ASSERT(svc==CSCHED2_VCPU(curr_on_cpu(svc->vcpu->processor)));
+ ASSERT(svc == CSCHED2_VCPU(curr_on_cpu(svc->vcpu->processor)));
if ( unlikely(is_idle_vcpu(svc->vcpu)) )
{
- BUG_ON(svc->credit != CSCHED2_IDLE_CREDIT);
+ ASSERT(svc->credit == CSCHED2_IDLE_CREDIT);
return;
}
@@ -1012,12 +1016,10 @@ void burn_credits(struct csched2_runqueue_data *rqd,
SCHED_STAT_CRANK(burn_credits_t2c);
t2c_update(rqd, delta, svc);
svc->start_time = now;
-
- d2printk("b %pv c%d\n", svc->vcpu, svc->credit);
}
else if ( delta < 0 )
{
- d2printk("%s: Time went backwards? now %"PRI_stime" start %"PRI_stime"\n",
+ printk("WARNING: %s: Time went backwards? now %"PRI_stime" start_time %"PRI_stime"\n",
__func__, now, svc->start_time);
}
@@ -1051,7 +1053,6 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight,
if ( new_weight > rqd->max_weight )
{
rqd->max_weight = new_weight;
- d2printk("%s: Runqueue id %d max weight %d\n", __func__, rqd->id, rqd->max_weight);
SCHED_STAT_CRANK(upd_max_weight_quick);
}
else if ( old_weight == rqd->max_weight )
@@ -1068,9 +1069,20 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight,
}
rqd->max_weight = max_weight;
- d2printk("%s: Runqueue %d max weight %d\n", __func__, rqd->id, rqd->max_weight);
SCHED_STAT_CRANK(upd_max_weight_full);
}
+
+ /* TRACE */
+ {
+ struct {
+ unsigned rqi:16, max_weight:16;
+ } d;
+ d.rqi = rqd->id;
+ d.max_weight = rqd->max_weight;
+ trace_var(TRC_CSCHED2_RUNQ_MAX_WEIGHT, 1,
+ sizeof(d),
+ (unsigned char *)&d);
+ }
}
#ifndef NDEBUG
@@ -1117,8 +1129,7 @@ csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
if ( ! is_idle_vcpu(vc) )
{
- BUG_ON( svc->sdom == NULL );
-
+ ASSERT(svc->sdom != NULL);
svc->credit = CSCHED2_CREDIT_INIT;
svc->weight = svc->sdom->weight;
/* Starting load of 50% */
@@ -1127,7 +1138,7 @@ csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
}
else
{
- BUG_ON( svc->sdom != NULL );
+ ASSERT(svc->sdom == NULL);
svc->credit = CSCHED2_IDLE_CREDIT;
svc->weight = 0;
}
@@ -1171,7 +1182,7 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc)
{
struct csched2_vcpu *svc = vc->sched_priv;
- BUG_ON(svc->rqd != NULL);
+ ASSERT(svc->rqd == NULL);
__runq_assign(svc, RQD(ops, vc->processor));
}
@@ -1179,8 +1190,8 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc)
static void
__runq_deassign(struct csched2_vcpu *svc)
{
- BUG_ON(__vcpu_on_runq(svc));
- BUG_ON(svc->flags & CSFLAG_scheduled);
+ ASSERT(!__vcpu_on_runq(svc));
+ ASSERT(!(svc->flags & CSFLAG_scheduled));
list_del_init(&svc->rqd_elem);
update_max_weight(svc->rqd, 0, svc->weight);
@@ -1196,7 +1207,7 @@ runq_deassign(const struct scheduler *ops, struct vcpu *vc)
{
struct csched2_vcpu *svc = vc->sched_priv;
- BUG_ON(svc->rqd != RQD(ops, vc->processor));
+ ASSERT(svc->rqd == RQD(ops, vc->processor));
__runq_deassign(svc);
}
@@ -1208,9 +1219,8 @@ csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
struct csched2_dom * const sdom = svc->sdom;
spinlock_t *lock;
- printk("%s: Inserting %pv\n", __func__, vc);
-
- BUG_ON(is_idle_vcpu(vc));
+ ASSERT(!is_idle_vcpu(vc));
+ ASSERT(list_empty(&svc->runq_elem));
/* Add vcpu to runqueue of initial processor */
lock = vcpu_schedule_lock_irq(vc);
@@ -1238,26 +1248,21 @@ static void
csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
{
struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
- struct csched2_dom * const sdom = svc->sdom;
-
- BUG_ON( sdom == NULL );
- BUG_ON( !list_empty(&svc->runq_elem) );
+ spinlock_t *lock;
- if ( ! is_idle_vcpu(vc) )
- {
- spinlock_t *lock;
+ ASSERT(!is_idle_vcpu(vc));
+ ASSERT(list_empty(&svc->runq_elem));
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(vcpu_remove);
- /* Remove from runqueue */
- lock = vcpu_schedule_lock_irq(vc);
+ /* Remove from runqueue */
+ lock = vcpu_schedule_lock_irq(vc);
- runq_deassign(ops, vc);
+ runq_deassign(ops, vc);
- vcpu_schedule_unlock_irq(lock, vc);
+ vcpu_schedule_unlock_irq(lock, vc);
- svc->sdom->nr_vcpus--;
- }
+ svc->sdom->nr_vcpus--;
}
static void
@@ -1265,14 +1270,14 @@ csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
{
struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
- BUG_ON( is_idle_vcpu(vc) );
+ ASSERT(!is_idle_vcpu(vc));
SCHED_STAT_CRANK(vcpu_sleep);
if ( curr_on_cpu(vc->processor) == vc )
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
else if ( __vcpu_on_runq(svc) )
{
- BUG_ON(svc->rqd != RQD(ops, vc->processor));
+ ASSERT(svc->rqd == RQD(ops, vc->processor));
update_load(ops, svc->rqd, svc, -1, NOW());
__runq_remove(svc);
}
@@ -1288,9 +1293,7 @@ csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
/* Schedule lock should be held at this point. */
- d2printk("w %pv\n", vc);
-
- BUG_ON( is_idle_vcpu(vc) );
+ ASSERT(!is_idle_vcpu(vc));
if ( unlikely(curr_on_cpu(vc->processor) == vc) )
{
@@ -1322,7 +1325,7 @@ csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
if ( svc->rqd == NULL )
runq_assign(ops, vc);
else
- BUG_ON(RQD(ops, vc->processor) != svc->rqd );
+ ASSERT(RQD(ops, vc->processor) == svc->rqd );
now = NOW();
@@ -1333,7 +1336,6 @@ csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
runq_tickle(ops, svc, now);
out:
- d2printk("w-\n");
return;
}
@@ -1345,6 +1347,7 @@ csched2_context_saved(const struct scheduler *ops, struct vcpu *vc)
s_time_t now = NOW();
BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor));
+ ASSERT(is_idle_vcpu(vc) || svc->rqd == RQD(ops, vc->processor));
/* This vcpu is now eligible to be put on the runqueue again */
__clear_bit(__CSFLAG_scheduled, &svc->flags);
@@ -1360,7 +1363,7 @@ csched2_context_saved(const struct scheduler *ops, struct vcpu *vc)
if ( __test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags)
&& likely(vcpu_runnable(vc)) )
{
- BUG_ON(__vcpu_on_runq(svc));
+ ASSERT(!__vcpu_on_runq(svc));
runq_insert(ops, svc);
runq_tickle(ops, svc, now);
@@ -1380,7 +1383,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
struct csched2_vcpu *svc = CSCHED2_VCPU(vc);
s_time_t min_avgload;
- BUG_ON(cpumask_empty(&prv->active_queues));
+ ASSERT(!cpumask_empty(&prv->active_queues));
/* Locking:
* - vc->processor is already locked
@@ -1399,12 +1402,8 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
if ( !spin_trylock(&prv->lock) )
{
- if ( __test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) )
- {
- d2printk("%pv -\n", svc->vcpu);
- __clear_bit(__CSFLAG_runq_migrate_request, &svc->flags);
- }
-
+ /* We may be here because someon requested us to migrate */
+ __clear_bit(__CSFLAG_runq_migrate_request, &svc->flags);
return get_fallback_cpu(svc);
}
@@ -1414,7 +1413,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
{
if ( unlikely(svc->migrate_rqd->id < 0) )
{
- printk("%s: Runqueue migrate aborted because target runqueue disappeared!\n",
+ printk(XENLOG_WARNING "%s: target runqueue disappeared!\n",
__func__);
}
else
@@ -1423,10 +1422,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
&svc->migrate_rqd->active);
new_cpu = cpumask_any(cpumask_scratch);
if ( new_cpu < nr_cpu_ids )
- {
- d2printk("%pv +\n", svc->vcpu);
goto out_up;
- }
}
/* Fall-through to normal cpu pick */
}
@@ -1540,9 +1536,26 @@ static void migrate(const struct scheduler *ops,
struct csched2_runqueue_data *trqd,
s_time_t now)
{
- if ( svc->flags & CSFLAG_scheduled )
+ bool_t running = svc->flags & CSFLAG_scheduled;
+ bool_t on_runq = __vcpu_on_runq(svc);
+
+ /* TRACE */
+ {
+ struct {
+ unsigned vcpu:16, dom:16;
+ unsigned rqi:16, trqi:16;
+ } d;
+ d.dom = svc->vcpu->domain->domain_id;
+ d.vcpu = svc->vcpu->vcpu_id;
+ d.rqi = svc->rqd->id;
+ d.trqi = trqd->id;
+ trace_var(TRC_CSCHED2_MIGRATE, 1,
+ sizeof(d),
+ (unsigned char *)&d);
+ }
+
+ if ( running )
{
- d2printk("%pv %d-%d a\n", svc->vcpu, svc->rqd->id, trqd->id);
/* It's running; mark it to migrate. */
svc->migrate_rqd = trqd;
__set_bit(_VPF_migrating, &svc->vcpu->pause_flags);
@@ -1551,21 +1564,19 @@ static void migrate(const struct scheduler *ops,
}
else
{
- int on_runq=0;
/* It's not running; just move it */
- d2printk("%pv %d-%d i\n", svc->vcpu, svc->rqd->id, trqd->id);
- if ( __vcpu_on_runq(svc) )
+ if ( on_runq )
{
__runq_remove(svc);
update_load(ops, svc->rqd, NULL, -1, now);
- on_runq=1;
+ on_runq = 1;
}
__runq_deassign(svc);
cpumask_and(cpumask_scratch, svc->vcpu->cpu_hard_affinity,
&trqd->active);
svc->vcpu->processor = cpumask_any(cpumask_scratch);
- BUG_ON(svc->vcpu->processor >= nr_cpu_ids);
+ ASSERT(svc->vcpu->processor < nr_cpu_ids);
__runq_assign(svc, trqd);
if ( on_runq )
@@ -1760,7 +1771,7 @@ csched2_vcpu_migrate(
struct csched2_runqueue_data *trqd;
/* Check if new_cpu is valid */
- BUG_ON(!cpumask_test_cpu(new_cpu, &CSCHED2_PRIV(ops)->initialized));
+ ASSERT(cpumask_test_cpu(new_cpu, &CSCHED2_PRIV(ops)->initialized));
ASSERT(cpumask_test_cpu(new_cpu, vc->cpu_hard_affinity));
trqd = RQD(ops, new_cpu);
@@ -1820,7 +1831,7 @@ csched2_dom_cntl(
* been disabled. */
spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
- BUG_ON(svc->rqd != RQD(ops, svc->vcpu->processor));
+ ASSERT(svc->rqd == RQD(ops, svc->vcpu->processor));
svc->weight = sdom->weight;
update_max_weight(svc->rqd, svc->weight, old_weight);
@@ -1869,8 +1880,6 @@ csched2_dom_init(const struct scheduler *ops, struct domain *dom)
{
struct csched2_dom *sdom;
- printk("%s: Initializing domain %d\n", __func__, dom->domain_id);
-
if ( is_idle_domain(dom) )
return 0;
@@ -1901,7 +1910,7 @@ csched2_free_domdata(const struct scheduler *ops, void *data)
static void
csched2_dom_destroy(const struct scheduler *ops, struct domain *dom)
{
- BUG_ON(CSCHED2_DOM(dom)->nr_vcpus > 0);
+ ASSERT(CSCHED2_DOM(dom)->nr_vcpus == 0);
csched2_free_domdata(ops, CSCHED2_DOM(dom));
}
@@ -2042,8 +2051,6 @@ csched2_schedule(
SCHED_STAT_CRANK(schedule);
CSCHED2_VCPU_CHECK(current);
- d2printk("sc p%d c %pv now %"PRI_stime"\n", cpu, scurr->vcpu, now);
-
BUG_ON(!cpumask_test_cpu(cpu, &CSCHED2_PRIV(ops)->initialized));
rqd = RQD(ops, cpu);
@@ -2051,7 +2058,7 @@ csched2_schedule(
/* Protected by runqueue lock */
- /* DEBUG */
+#ifndef NDEBUG
if ( !is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd)
{
int other_rqi = -1, this_rqi = c2r(ops, cpu);
@@ -2069,12 +2076,13 @@ csched2_schedule(
}
}
}
- printk("%s: pcpu %d rq %d, but scurr %pv assigned to "
+ printk("DEBUG: %s: pcpu %d rq %d, but scurr %pv assigned to "
"pcpu %d rq %d!\n",
__func__,
cpu, this_rqi,
scurr->vcpu, scurr->vcpu->processor, other_rqi);
}
+#endif
BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
/* Clear "tickled" bit now that we've been scheduled */
@@ -2125,15 +2133,10 @@ csched2_schedule(
/* If switching, remove this from the runqueue and mark it scheduled */
if ( snext != scurr )
{
- BUG_ON(snext->rqd != rqd);
-
+ ASSERT(snext->rqd == rqd);
+ ASSERT(!snext->vcpu->is_running);
+
__runq_remove(snext);
- if ( snext->vcpu->is_running )
- {
- printk("p%d: snext %pv running on p%d! scurr %pv\n",
- cpu, snext->vcpu, snext->vcpu->processor, scurr->vcpu);
- BUG();
- }
__set_bit(__CSFLAG_scheduled, &snext->flags);
}
@@ -2439,10 +2442,10 @@ init_pdata(struct csched2_private *prv, unsigned int cpu)
rqd = prv->rqd + rqi;
- printk("Adding cpu %d to runqueue %d\n", cpu, rqi);
+ printk(XENLOG_INFO "Adding cpu %d to runqueue %d\n", cpu, rqi);
if ( ! cpumask_test_cpu(rqi, &prv->active_queues) )
{
- printk(" First cpu on runqueue, activating\n");
+ printk(XENLOG_INFO " First cpu on runqueue, activating\n");
activate_runqueue(prv, rqi);
}
@@ -2554,14 +2557,14 @@ csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
/* No need to save IRQs here, they're already disabled */
spin_lock(&rqd->lock);
- printk("Removing cpu %d from runqueue %d\n", cpu, rqi);
+ printk(XENLOG_INFO "Removing cpu %d from runqueue %d\n", cpu, rqi);
__cpumask_clear_cpu(cpu, &rqd->idle);
__cpumask_clear_cpu(cpu, &rqd->active);
if ( cpumask_empty(&rqd->active) )
{
- printk(" No cpus left on runqueue, disabling\n");
+ printk(XENLOG_INFO " No cpus left on runqueue, disabling\n");
deactivate_runqueue(prv, rqi);
}
@@ -2580,15 +2583,20 @@ csched2_init(struct scheduler *ops)
int i;
struct csched2_private *prv;
- printk("Initializing Credit2 scheduler\n" \
- " WARNING: This is experimental software in development.\n" \
+ printk("Initializing Credit2 scheduler\n");
+ printk(" WARNING: This is experimental software in development.\n" \
" Use at your own risk.\n");
- printk(" load_precision_shift: %d\n", opt_load_precision_shift);
- printk(" load_window_shift: %d\n", opt_load_window_shift);
- printk(" underload_balance_tolerance: %d\n", opt_underload_balance_tolerance);
- printk(" overload_balance_tolerance: %d\n", opt_overload_balance_tolerance);
- printk(" runqueues arrangement: %s\n", opt_runqueue_str[opt_runqueue]);
+ printk(XENLOG_INFO " load_precision_shift: %d\n"
+ " load_window_shift: %d\n"
+ " underload_balance_tolerance: %d\n"
+ " overload_balance_tolerance: %d\n"
+ " runqueues arrangement: %s\n",
+ opt_load_precision_shift,
+ opt_load_window_shift,
+ opt_underload_balance_tolerance,
+ opt_overload_balance_tolerance,
+ opt_runqueue_str[opt_runqueue]);
if ( opt_load_window_shift < LOADAVG_PRECISION_SHIFT_MIN )
{
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-06-17 23:12 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-17 23:11 [PATCH 00/19] xen: sched: assorted fixes and improvements to Credit2 Dario Faggioli
2016-06-17 23:11 ` [PATCH 01/19] xen: sched: leave CPUs doing tasklet work alone Dario Faggioli
2016-06-20 7:48 ` Jan Beulich
2016-07-07 10:11 ` Dario Faggioli
2016-06-21 16:17 ` anshul makkar
2016-07-06 15:41 ` George Dunlap
2016-07-07 10:25 ` Dario Faggioli
2016-06-17 23:11 ` [PATCH 02/19] xen: sched: make the 'tickled' perf counter clearer Dario Faggioli
2016-06-18 0:36 ` Meng Xu
2016-07-06 15:52 ` George Dunlap
2016-06-17 23:11 ` [PATCH 03/19] xen: credit2: insert and tickle don't need a cpu parameter Dario Faggioli
2016-06-21 16:41 ` anshul makkar
2016-07-06 15:59 ` George Dunlap
2016-06-17 23:11 ` [PATCH 04/19] xen: credit2: kill useless helper function choose_cpu Dario Faggioli
2016-07-06 16:02 ` George Dunlap
2016-07-07 10:26 ` Dario Faggioli
2016-06-17 23:11 ` [PATCH 05/19] xen: credit2: do not warn if calling burn_credits more than once Dario Faggioli
2016-07-06 16:05 ` George Dunlap
2016-06-17 23:12 ` [PATCH 06/19] xen: credit2: read NOW() with the proper runq lock held Dario Faggioli
2016-06-20 7:56 ` Jan Beulich
2016-07-06 16:10 ` George Dunlap
2016-07-07 10:28 ` Dario Faggioli
2016-06-17 23:12 ` [PATCH 07/19] xen: credit2: prevent load balancing to go mad if time goes backwards Dario Faggioli
2016-06-20 8:02 ` Jan Beulich
2016-07-06 16:21 ` George Dunlap
2016-07-07 7:29 ` Jan Beulich
2016-07-07 9:09 ` George Dunlap
2016-07-07 9:18 ` Jan Beulich
2016-07-07 10:53 ` Dario Faggioli
2016-06-17 23:12 ` [PATCH 08/19] xen: credit2: when tickling, check idle cpus first Dario Faggioli
2016-07-06 16:36 ` George Dunlap
2016-06-17 23:12 ` [PATCH 09/19] xen: credit2: avoid calling __update_svc_load() multiple times on the same vcpu Dario Faggioli
2016-07-06 16:40 ` George Dunlap
2016-06-17 23:12 ` [PATCH 10/19] xen: credit2: rework load tracking logic Dario Faggioli
2016-07-06 17:33 ` George Dunlap
2016-06-17 23:12 ` [PATCH 11/19] tools: tracing: adapt Credit2 load tracking events to new format Dario Faggioli
2016-06-21 9:27 ` Wei Liu
2016-06-17 23:12 ` [PATCH 12/19] xen: credit2: use non-atomic cpumask and bit operations Dario Faggioli
2016-07-07 9:45 ` George Dunlap
2016-06-17 23:12 ` Dario Faggioli [this message]
2016-06-20 8:13 ` [PATCH 13/19] xen: credit2: make the code less experimental Jan Beulich
2016-07-07 10:59 ` Dario Faggioli
2016-07-07 15:17 ` George Dunlap
2016-07-07 16:43 ` Dario Faggioli
2016-06-17 23:12 ` [PATCH 14/19] xen: credit2: add yet some more tracing Dario Faggioli
2016-06-20 8:15 ` Jan Beulich
2016-07-07 15:34 ` George Dunlap
2016-07-07 15:34 ` George Dunlap
2016-06-17 23:13 ` [PATCH 15/19] xen: credit2: only marshall trace point arguments if tracing enabled Dario Faggioli
2016-07-07 15:37 ` George Dunlap
2016-06-17 23:13 ` [PATCH 16/19] tools: tracing: deal with new Credit2 events Dario Faggioli
2016-07-07 15:39 ` George Dunlap
2016-06-17 23:13 ` [PATCH 17/19] xen: credit2: the private scheduler lock can be an rwlock Dario Faggioli
2016-07-07 16:00 ` George Dunlap
2016-06-17 23:13 ` [PATCH 18/19] xen: credit2: implement SMT support independent runq arrangement Dario Faggioli
2016-06-20 8:26 ` Jan Beulich
2016-06-20 10:38 ` Dario Faggioli
2016-06-27 15:20 ` anshul makkar
2016-07-12 13:40 ` George Dunlap
2016-06-17 23:13 ` [PATCH 19/19] xen: credit2: use cpumask_first instead of cpumask_any when choosing cpu Dario Faggioli
2016-06-20 8:30 ` Jan Beulich
2016-06-20 11:28 ` Dario Faggioli
2016-06-21 10:42 ` David Vrabel
2016-07-07 16:55 ` Dario Faggioli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=146620517107.29766.17643295412098867216.stgit@Solace.fritz.box \
--to=dario.faggioli@citrix.com \
--cc=anshul.makkar@citrix.com \
--cc=david.vrabel@citrix.com \
--cc=george.dunlap@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).