All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>
Subject: [PATCH v2 10/11] xen: sched: privde some scratch space for not putting cpumasks on stack
Date: Wed, 06 Apr 2016 19:24:07 +0200	[thread overview]
Message-ID: <20160406172407.25877.89123.stgit@Solace.fritz.box> (raw)
In-Reply-To: <20160406170023.25877.15622.stgit@Solace.fritz.box>

directly, from schedule.c, for any scheduler that needs
it to use it.

In fact, Credit1 and RTDS needs this already. Credit2 is
also going to need it, for supporting hard affinity
(which is, typically, what requires a lot of cpumask
manipulations, inside various functions).

Therefore, let's define the scratch space at a broader
scope, to limit code duplication in handling it.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes from v1:
* scratch space for cpumask is not "global", and defined
  in schedule.c, as suggested during review.
---
 xen/common/sched_credit.c  |   34 ++++++-------------------
 xen/common/sched_credit2.c |    1 +
 xen/common/sched_rt.c      |   59 +++-----------------------------------------
 xen/common/schedule.c      |    8 ++++++
 xen/include/xen/sched-if.h |    4 +++
 5 files changed, 25 insertions(+), 81 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 540d515..eac3f5e 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -171,20 +171,9 @@ struct csched_pcpu {
     struct timer ticker;
     unsigned int tick;
     unsigned int idle_bias;
-    /* Store this here to avoid having too many cpumask_var_t-s on stack */
-    cpumask_var_t balance_mask;
 };
 
 /*
- * Convenience macro for accessing the per-PCPU cpumask we need for
- * implementing the two steps (soft and hard affinity) balancing logic.
- * It is stored in csched_pcpu so that serialization is not an issue,
- * as there is a csched_pcpu for each PCPU, and we always hold the
- * runqueue lock for the proper PCPU when using this.
- */
-#define csched_balance_mask(c) (CSCHED_PCPU(c)->balance_mask)
-
-/*
  * Virtual CPU
  */
 struct csched_vcpu {
@@ -416,10 +405,10 @@ static inline void __runq_tickle(struct csched_vcpu *new)
 
             /* Are there idlers suitable for new (for this balance step)? */
             csched_balance_cpumask(new->vcpu, balance_step,
-                                   csched_balance_mask(cpu));
-            cpumask_and(csched_balance_mask(cpu),
-                        csched_balance_mask(cpu), &idle_mask);
-            new_idlers_empty = cpumask_empty(csched_balance_mask(cpu));
+                                   cpumask_scratch_cpu(cpu));
+            cpumask_and(cpumask_scratch_cpu(cpu),
+                        cpumask_scratch_cpu(cpu), &idle_mask);
+            new_idlers_empty = cpumask_empty(cpumask_scratch_cpu(cpu));
 
             /*
              * Let's not be too harsh! If there aren't idlers suitable
@@ -445,8 +434,8 @@ static inline void __runq_tickle(struct csched_vcpu *new)
             if ( new_idlers_empty && new->pri > cur->pri )
             {
                 csched_balance_cpumask(cur->vcpu, balance_step,
-                                       csched_balance_mask(cpu));
-                if ( cpumask_intersects(csched_balance_mask(cpu),
+                                       cpumask_scratch_cpu(cpu));
+                if ( cpumask_intersects(cpumask_scratch_cpu(cpu),
                                         &idle_mask) )
                 {
                     SCHED_VCPU_STAT_CRANK(cur, kicked_away);
@@ -519,7 +508,6 @@ csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 
     spin_unlock_irqrestore(&prv->lock, flags);
 
-    free_cpumask_var(spc->balance_mask);
     xfree(spc);
 }
 
@@ -533,12 +521,6 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu)
     if ( spc == NULL )
         return ERR_PTR(-ENOMEM);
 
-    if ( !alloc_cpumask_var(&spc->balance_mask) )
-    {
-        xfree(spc);
-        return ERR_PTR(-ENOMEM);
-    }
-
     return spc;
 }
 
@@ -1592,9 +1574,9 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
                  && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
                 continue;
 
-            csched_balance_cpumask(vc, balance_step, csched_balance_mask(cpu));
+            csched_balance_cpumask(vc, balance_step, cpumask_scratch_cpu(cpu));
             if ( __csched_vcpu_is_migrateable(vc, cpu,
-                                              csched_balance_mask(cpu)) )
+                                              cpumask_scratch_cpu(cpu)) )
             {
                 /* We got a candidate. Grab it! */
                 TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 3b45816..084963a 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -2252,6 +2252,7 @@ csched2_init(struct scheduler *ops)
     if ( prv == NULL )
         return -ENOMEM;
     ops->sched_data = prv;
+
     spin_lock_init(&prv->lock);
     INIT_LIST_HEAD(&prv->sdom);
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 3bb8c71..673fc92 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -155,24 +155,6 @@
 #define TRC_RTDS_BUDGET_REPLENISH TRC_SCHED_CLASS_EVT(RTDS, 4)
 #define TRC_RTDS_SCHED_TASKLET    TRC_SCHED_CLASS_EVT(RTDS, 5)
 
- /*
-  * Useful to avoid too many cpumask_var_t on the stack.
-  */
-static cpumask_var_t *_cpumask_scratch;
-#define cpumask_scratch _cpumask_scratch[smp_processor_id()]
-
-/*
- * We want to only allocate the _cpumask_scratch array the first time an
- * instance of this scheduler is used, and avoid reallocating and leaking
- * the old one when more instance are activated inside new cpupools. We
- * also want to get rid of it when the last instance is de-inited.
- *
- * So we (sort of) reference count the number of initialized instances. This
- * does not need to happen via atomic_t refcounters, as it only happens either
- * during boot, or under the protection of the cpupool_lock spinlock.
- */
-static unsigned int nr_rt_ops;
-
 static void repl_timer_handler(void *data);
 
 /*
@@ -301,12 +283,11 @@ rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
     /*
      * We can't just use 'cpumask_scratch' because the dumping can
      * happen from a pCPU outside of this scheduler's cpupool, and
-     * hence it's not right to use the pCPU's scratch mask (which
-     * may even not exist!). On the other hand, it is safe to use
-     * svc->vcpu->processor's own scratch space, since we hold the
-     * runqueue lock.
+     * hence it's not right to use its pCPU's scratch mask.
+     * On the other hand, it is safe to use svc->vcpu->processor's
+     * own scratch space, since we hold the runqueue lock.
      */
-    mask = _cpumask_scratch[svc->vcpu->processor];
+    mask = cpumask_scratch_cpu(svc->vcpu->processor);
 
     cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
     cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
@@ -609,16 +590,6 @@ rt_init(struct scheduler *ops)
     if ( prv == NULL )
         return -ENOMEM;
 
-    ASSERT( _cpumask_scratch == NULL || nr_rt_ops > 0 );
-
-    if ( !_cpumask_scratch )
-    {
-        _cpumask_scratch = xmalloc_array(cpumask_var_t, nr_cpu_ids);
-        if ( !_cpumask_scratch )
-            goto no_mem;
-    }
-    nr_rt_ops++;
-
     spin_lock_init(&prv->lock);
     INIT_LIST_HEAD(&prv->sdom);
     INIT_LIST_HEAD(&prv->runq);
@@ -636,10 +607,6 @@ rt_init(struct scheduler *ops)
     prv->repl_timer = NULL;
 
     return 0;
-
- no_mem:
-    xfree(prv);
-    return -ENOMEM;
 }
 
 static void
@@ -647,14 +614,6 @@ rt_deinit(struct scheduler *ops)
 {
     struct rt_private *prv = rt_priv(ops);
 
-    ASSERT( _cpumask_scratch && nr_rt_ops > 0 );
-
-    if ( (--nr_rt_ops) == 0 )
-    {
-        xfree(_cpumask_scratch);
-        _cpumask_scratch = NULL;
-    }
-
     kill_timer(prv->repl_timer);
     xfree(prv->repl_timer);
 
@@ -718,9 +677,6 @@ rt_alloc_pdata(const struct scheduler *ops, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
 
-    if ( !alloc_cpumask_var(&_cpumask_scratch[cpu]) )
-        return ERR_PTR(-ENOMEM);
-
     if ( prv->repl_timer == NULL )
     {
         /* Allocate the timer on the first cpu of this pool. */
@@ -735,12 +691,6 @@ rt_alloc_pdata(const struct scheduler *ops, int cpu)
     return NULL;
 }
 
-static void
-rt_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
-{
-    free_cpumask_var(_cpumask_scratch[cpu]);
-}
-
 static void *
 rt_alloc_domdata(const struct scheduler *ops, struct domain *dom)
 {
@@ -1484,7 +1434,6 @@ static const struct scheduler sched_rtds_def = {
     .init           = rt_init,
     .deinit         = rt_deinit,
     .alloc_pdata    = rt_alloc_pdata,
-    .free_pdata     = rt_free_pdata,
     .init_pdata     = rt_init_pdata,
     .switch_sched   = rt_switch_sched,
     .alloc_domdata  = rt_alloc_domdata,
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 5559aa1..922b035 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -65,6 +65,14 @@ static void poll_timer_fn(void *data);
 DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 
+/*
+ * Scratch space, for avoiding having too many cpumask_var_t on the stack.
+ * Properly serializing access, if necessary, is responsibility of each
+ * scheduler (typically, one can expect this to be protected by the per pCPU
+ * or per runqueue lock).
+ */
+DEFINE_PER_CPU(cpumask_t, cpumask_scratch);
+
 extern const struct scheduler *__start_schedulers_array[], *__end_schedulers_array[];
 #define NUM_SCHEDULERS (__end_schedulers_array - __start_schedulers_array)
 #define schedulers __start_schedulers_array
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9cebe41..1db7c8d 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -47,6 +47,10 @@ DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 
+DECLARE_PER_CPU(cpumask_t, cpumask_scratch);
+#define cpumask_scratch        (&this_cpu(cpumask_scratch))
+#define cpumask_scratch_cpu(c) (&per_cpu(cpumask_scratch, c))
+
 #define sched_lock(kind, param, cpu, irq, arg...) \
 static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
 { \


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-04-06 17:24 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-06 17:22 [PATCH v2 00/11] Fixes and improvement (including hard affinity!) for Credit2 Dario Faggioli
2016-04-06 17:22 ` [PATCH v2 01/11] xen: sched: make implementing .alloc_pdata optional Dario Faggioli
2016-04-07  4:56   ` Juergen Gross
2016-04-07 11:24   ` George Dunlap
2016-04-06 17:22 ` [PATCH v2 02/11] xen: sched: implement .init_pdata in Credit, Credit2 and RTDS Dario Faggioli
2016-04-06 17:23 ` [PATCH v2 03/11] xen: sched: move pCPU initialization in an helper Dario Faggioli
2016-04-06 17:23 ` [PATCH v2 04/11] xen: sched: close potential races when switching scheduler to CPUs Dario Faggioli
2016-04-07 14:54   ` George Dunlap
2016-04-07 23:48     ` Dario Faggioli
2016-04-06 17:23 ` [PATCH v2 05/11] xen: sched: improve credit2 bootparams' scope, placement and signedness Dario Faggioli
2016-04-06 17:23 ` [PATCH v2 06/11] xen: sched: on Credit2, don't reprogram the timer if idle Dario Faggioli
2016-04-06 17:23 ` [PATCH v2 07/11] xen: sched: fix per-socket runqueue creation in credit2 Dario Faggioli
2016-04-06 17:23 ` [PATCH v2 08/11] xen: sched: allow for choosing credit2 runqueues configuration at boot Dario Faggioli
2016-04-07  5:04   ` Juergen Gross
2016-04-07 15:04     ` George Dunlap
2016-04-07 22:45       ` Dario Faggioli
2016-04-06 17:23 ` [PATCH v2 09/11] xen: sched: per-core runqueues as default in credit2 Dario Faggioli
2016-04-06 17:24 ` Dario Faggioli [this message]
2016-04-07 15:12   ` [PATCH v2 10/11] xen: sched: privde some scratch space for not putting cpumasks on stack George Dunlap
2016-04-06 17:24 ` [PATCH v2 11/11] xen: sched: implement vcpu hard affinity in Credit2 Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160406172407.25877.89123.stgit@Solace.fritz.box \
    --to=dario.faggioli@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.