From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Justin T. Weaver" Subject: [PATCH v3 2/4] sched: factor out per-vcpu affinity related code to common header file Date: Wed, 25 Mar 2015 23:48:32 -1000 Message-ID: <1427363314-25430-3-git-send-email-jtweaver@hawaii.edu> References: <1427363314-25430-1-git-send-email-jtweaver@hawaii.edu> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1427363314-25430-1-git-send-email-jtweaver@hawaii.edu> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: george.dunlap@eu.citrix.com, dario.faggioli@citrix.com, "Justin T. Weaver" , henric@hawaii.edu List-Id: xen-devel@lists.xenproject.org Move affinity balancing related functions and defines from sched_credit.c to sched-if.h so other schedulers can use them. Change name prefixes from csched to sched since they are no longer specific to the credit scheduler. Signed-off-by: Justin T. Weaver --- Changes in v3: First introduced in patch series version 3 --- xen/common/sched_credit.c | 87 ++++++-------------------------------------- xen/include/xen/sched-if.h | 65 +++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 76 deletions(-) diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index bec67ff..3eb9440 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -112,26 +112,6 @@ /* - * Hard and soft affinity load balancing. - * - * Idea is each vcpu has some pcpus that it prefers, some that it does not - * prefer but is OK with, and some that it cannot run on at all. The first - * set of pcpus are the ones that are both in the soft affinity *and* in the - * hard affinity; the second set of pcpus are the ones that are in the hard - * affinity but *not* in the soft affinity; the third set of pcpus are the - * ones that are not in the hard affinity. - * - * We implement a two step balancing logic. Basically, every time there is - * the need to decide where to run a vcpu, we first check the soft affinity - * (well, actually, the && between soft and hard affinity), to see if we can - * send it where it prefers to (and can) run on. However, if the first step - * does not find any suitable and free pcpu, we fall back checking the hard - * affinity. - */ -#define CSCHED_BALANCE_SOFT_AFFINITY 0 -#define CSCHED_BALANCE_HARD_AFFINITY 1 - -/* * Boot parameters */ static int __read_mostly sched_credit_tslice_ms = CSCHED_DEFAULT_TSLICE_MS; @@ -273,51 +253,6 @@ __runq_remove(struct csched_vcpu *svc) } -#define for_each_csched_balance_step(step) \ - for ( (step) = 0; (step) <= CSCHED_BALANCE_HARD_AFFINITY; (step)++ ) - - -/* - * Hard affinity balancing is always necessary and must never be skipped. - * But soft affinity need only be considered when it has a functionally - * different effect than other constraints (such as hard affinity, cpus - * online, or cpupools). - * - * Soft affinity only needs to be considered if: - * * The cpus in the cpupool are not a subset of soft affinity - * * The hard affinity is not a subset of soft affinity - * * There is an overlap between the soft affinity and the mask which is - * currently being considered. - */ -static inline int __vcpu_has_soft_affinity(const struct vcpu *vc, - const cpumask_t *mask) -{ - return !cpumask_subset(cpupool_online_cpumask(vc->domain->cpupool), - vc->cpu_soft_affinity) && - !cpumask_subset(vc->cpu_hard_affinity, vc->cpu_soft_affinity) && - cpumask_intersects(vc->cpu_soft_affinity, mask); -} - -/* - * Each csched-balance step uses its own cpumask. This function determines - * which one (given the step) and copies it in mask. For the soft affinity - * balancing step, the pcpus that are not part of vc's hard affinity are - * filtered out from the result, to avoid running a vcpu where it would - * like, but is not allowed to! - */ -static void -csched_balance_cpumask(const struct vcpu *vc, int step, cpumask_t *mask) -{ - if ( step == CSCHED_BALANCE_SOFT_AFFINITY ) - { - cpumask_and(mask, vc->cpu_soft_affinity, vc->cpu_hard_affinity); - - if ( unlikely(cpumask_empty(mask)) ) - cpumask_copy(mask, vc->cpu_hard_affinity); - } - else /* step == CSCHED_BALANCE_HARD_AFFINITY */ - cpumask_copy(mask, vc->cpu_hard_affinity); -} static void burn_credits(struct csched_vcpu *svc, s_time_t now) { @@ -379,18 +314,18 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) * Soft and hard affinity balancing loop. For vcpus without * a useful soft affinity, consider hard affinity only. */ - for_each_csched_balance_step( balance_step ) + for_each_sched_balance_step( balance_step ) { int new_idlers_empty; - if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY + if ( balance_step == SCHED_BALANCE_SOFT_AFFINITY && !__vcpu_has_soft_affinity(new->vcpu, new->vcpu->cpu_hard_affinity) ) continue; /* Are there idlers suitable for new (for this balance step)? */ - csched_balance_cpumask(new->vcpu, balance_step, - csched_balance_mask); + sched_balance_cpumask(new->vcpu, balance_step, + csched_balance_mask); cpumask_and(&idle_mask, prv->idlers, csched_balance_mask); new_idlers_empty = cpumask_empty(&idle_mask); @@ -400,7 +335,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) * hard affinity as well, before taking final decisions. */ if ( new_idlers_empty - && balance_step == CSCHED_BALANCE_SOFT_AFFINITY ) + && balance_step == SCHED_BALANCE_SOFT_AFFINITY ) continue; /* @@ -622,7 +557,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) online = cpupool_scheduler_cpumask(vc->domain->cpupool); cpumask_and(&cpus, vc->cpu_hard_affinity, online); - for_each_csched_balance_step( balance_step ) + for_each_sched_balance_step( balance_step ) { /* * We want to pick up a pcpu among the ones that are online and @@ -642,12 +577,12 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) * cpus and, if the result is empty, we just skip the soft affinity * balancing step all together. */ - if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY + if ( balance_step == SCHED_BALANCE_SOFT_AFFINITY && !__vcpu_has_soft_affinity(vc, &cpus) ) continue; /* Pick an online CPU from the proper affinity mask */ - csched_balance_cpumask(vc, balance_step, &cpus); + sched_balance_cpumask(vc, balance_step, &cpus); cpumask_and(&cpus, &cpus, online); /* If present, prefer vc's current processor */ @@ -1465,11 +1400,11 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) * vCPUs with useful soft affinities in some sort of bitmap * or counter. */ - if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY + if ( balance_step == SCHED_BALANCE_SOFT_AFFINITY && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) ) continue; - csched_balance_cpumask(vc, balance_step, csched_balance_mask); + sched_balance_cpumask(vc, balance_step, csched_balance_mask); if ( __csched_vcpu_is_migrateable(vc, cpu, csched_balance_mask) ) { /* We got a candidate. Grab it! */ @@ -1520,7 +1455,7 @@ csched_load_balance(struct csched_private *prv, int cpu, * 1. any "soft-affine work" to steal first, * 2. if not finding anything, any "hard-affine work" to steal. */ - for_each_csched_balance_step( bstep ) + for_each_sched_balance_step( bstep ) { /* * We peek at the non-idling CPUs in a node-wise fashion. In fact, diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 7cc25c6..3a118da 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -188,4 +188,69 @@ struct cpupool #define cpupool_online_cpumask(_pool) \ (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid) +/* + * Hard and soft affinity load balancing. + * + * Idea is each vcpu has some pcpus that it prefers, some that it does not + * prefer but is OK with, and some that it cannot run on at all. The first + * set of pcpus are the ones that are both in the soft affinity *and* in the + * hard affinity; the second set of pcpus are the ones that are in the hard + * affinity but *not* in the soft affinity; the third set of pcpus are the + * ones that are not in the hard affinity. + * + * We implement a two step balancing logic. Basically, every time there is + * the need to decide where to run a vcpu, we first check the soft affinity + * (well, actually, the && between soft and hard affinity), to see if we can + * send it where it prefers to (and can) run on. However, if the first step + * does not find any suitable and free pcpu, we fall back checking the hard + * affinity. + */ +#define SCHED_BALANCE_SOFT_AFFINITY 0 +#define SCHED_BALANCE_HARD_AFFINITY 1 + +#define for_each_sched_balance_step(step) \ + for ( (step) = 0; (step) <= SCHED_BALANCE_HARD_AFFINITY; (step)++ ) + +/* + * Hard affinity balancing is always necessary and must never be skipped. + * But soft affinity need only be considered when it has a functionally + * different effect than other constraints (such as hard affinity, cpus + * online, or cpupools). + * + * Soft affinity only needs to be considered if: + * * The cpus in the cpupool are not a subset of soft affinity + * * The hard affinity is not a subset of soft affinity + * * There is an overlap between the soft affinity and the mask which is + * currently being considered. + */ +static inline int __vcpu_has_soft_affinity(const struct vcpu *vc, + const cpumask_t *mask) +{ + return !cpumask_subset(cpupool_online_cpumask(vc->domain->cpupool), + vc->cpu_soft_affinity) && + !cpumask_subset(vc->cpu_hard_affinity, vc->cpu_soft_affinity) && + cpumask_intersects(vc->cpu_soft_affinity, mask); +} + +/* + * Each sched-balance step uses its own cpumask. This function determines + * which one (given the step) and copies it in mask. For the soft affinity + * balancing step, the pcpus that are not part of vc's hard affinity are + * filtered out from the result, to avoid running a vcpu where it would + * like, but is not allowed to! + */ +static inline void +sched_balance_cpumask(const struct vcpu *vc, int step, cpumask_t *mask) +{ + if ( step == SCHED_BALANCE_SOFT_AFFINITY ) + { + cpumask_and(mask, vc->cpu_soft_affinity, vc->cpu_hard_affinity); + + if ( unlikely(cpumask_empty(mask)) ) + cpumask_copy(mask, vc->cpu_hard_affinity); + } + else /* step == SCHED_BALANCE_HARD_AFFINITY */ + cpumask_copy(mask, vc->cpu_hard_affinity); +} + #endif /* __XEN_SCHED_IF_H__ */ -- 1.7.10.4