All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>,
	Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@kernel.org>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Linux-MM <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH,RFC] numa,sched: use group fault statistics in numa placement
Date: Thu, 1 Aug 2013 12:37:13 +0200	[thread overview]
Message-ID: <20130801103713.GO3008@twins.programming.kicks-ass.net> (raw)
In-Reply-To: <20130801022319.4a6a977a@annuminas.surriel.com>

On Thu, Aug 01, 2013 at 02:23:19AM -0400, Rik van Riel wrote:
> Subject: [PATCH,RFC] numa,sched: use group fault statistics in numa placement
> 
> Here is a quick strawman on how the group fault stuff could be used
> to help pick the best node for a task. This is likely to be quite
> suboptimal and in need of tweaking. My main goal is to get this to
> Peter & Mel before it's breakfast time on their side of the Atlantic...
> 
> This goes on top of "sched, numa: Use {cpu, pid} to create task groups for shared faults"
> 
> Enjoy :)
> 
> +	/*
> +	 * Should we stay on our own, or move in with the group?
> +	 * The absolute count of faults may not be useful, but comparing
> +	 * the fraction of accesses in each top node may give us a hint
> +	 * where to start looking for a migration target.
> +	 *
> +	 *  max_group_faults     max_faults
> +	 * ------------------ > ------------
> +	 * total_group_faults   total_faults
> +	 */
> +	if (max_group_nid >= 0 && max_group_nid != max_nid) {
> +		if (max_group_faults * total_faults >
> +				max_faults * total_group_faults)
> +			max_nid = max_group_nid;
> +	}

This makes sense.. another part of the problem, which you might already
have spotted is selecting a task to swap with. 

If you only look at per task faults its often impossible to find a
suitable swap task because moving you to a more suitable node would
degrade the other task -- below a patch you've already seen but I
haven't yet posted because I'm not at all sure its something 'sane' :-)

With group information your case might be stronger because you already
have many tasks on that node.

Still there's the tie where there's two groups with each exactly half
their tasks crossed between two nodes. I suppose we should forcefully
tie break in this case.

And all this while also maintaining the invariants placed by the regular
balancer. It would be no good to move tasks about if the balancer would
then have to shuffle stuff right back (or worse) in order to maintain
fairness.

---
Subject: sched, numa: Alternative migration scheme
From: Peter Zijlstra <peterz@infradead.org>
Date: Sun Jul 21 23:12:13 CEST 2013


Signed-off-by: Peter Zijlstra <peterz@infradead.org>
---
 kernel/sched/fair.c |  260 +++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 187 insertions(+), 73 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -816,6 +816,8 @@ update_stats_curr_start(struct cfs_rq *c
  * Scheduling class queueing methods:
  */
 
+static unsigned long task_h_load(struct task_struct *p);
+
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * Approximate time to scan a full NUMA task in ms. The task scan period is
@@ -885,92 +887,206 @@ static inline int task_faults_idx(int ni
 	return 2 * nid + priv;
 }
 
+static inline unsigned long task_faults(struct task_struct *p, int nid)
+{
+	if (!p->numa_faults)
+		return 0;
+
+	return p->numa_faults[2*nid] + p->numa_faults[2*nid+1];
+}
+
+static unsigned long weighted_cpuload(const int cpu);
 static unsigned long source_load(int cpu, int type);
 static unsigned long target_load(int cpu, int type);
 static unsigned long power_of(int cpu);
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
 
-static int task_numa_find_cpu(struct task_struct *p, int nid)
+struct numa_stats {
+	unsigned long nr_running;
+	unsigned long load;
+	unsigned long power;
+	unsigned long capacity;
+	int has_capacity;
+};
+
+/*
+ * XXX borrowed from update_sg_lb_stats
+ */
+static void update_numa_stats(struct numa_stats *ns, int nid)
 {
-	int node_cpu = cpumask_first(cpumask_of_node(nid));
-	int cpu, src_cpu = task_cpu(p), dst_cpu = src_cpu;
-	unsigned long src_load, dst_load;
-	unsigned long min_load = ULONG_MAX;
-	struct task_group *tg = task_group(p);
-	s64 src_eff_load, dst_eff_load;
-	struct sched_domain *sd;
-	unsigned long weight;
-	bool balanced;
-	int imbalance_pct, idx = -1;
+	int cpu;
+
+	memset(ns, 0, sizeof(*ns));
+	for_each_cpu(cpu, cpumask_of_node(nid)) {
+		struct rq *rq = cpu_rq(cpu);
+
+		ns->nr_running += rq->nr_running;
+		ns->load += weighted_cpuload(cpu);
+		ns->power += power_of(cpu);
+	}
+
+	ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
+	ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
+	ns->has_capacity = (ns->nr_running < ns->capacity);
+}
+
+struct task_numa_env {
+	struct task_struct *p;
+
+	int src_cpu, src_nid;
+	int dst_cpu, dst_nid;
+
+	struct numa_stats src_stats, dst_stats;
+
+	int imbalance_pct, idx;
+
+	struct task_struct *best_task;
+	long best_imp;
+	int best_cpu;
+};
+
+static void task_numa_assign(struct task_numa_env *env,
+			     struct task_struct *p, long imp)
+{
+	if (env->best_task)
+		put_task_struct(env->best_task);
+	if (p)
+		get_task_struct(p);
+
+	env->best_task = p;
+	env->best_imp = imp;
+	env->best_cpu = env->dst_cpu;
+}
+
+static void task_numa_compare(struct task_numa_env *env, long imp)
+{
+	struct rq *src_rq = cpu_rq(env->src_cpu);
+	struct rq *dst_rq = cpu_rq(env->dst_cpu);
+	struct task_struct *cur;
+	unsigned long dst_load, src_load;
+	unsigned long load;
+
+	rcu_read_lock();
+	cur = ACCESS_ONCE(dst_rq->curr);
+	if (cur->pid == 0) /* idle */
+		cur = NULL;
+
+	if (cur) {
+		imp += task_faults(cur, env->src_nid) -
+		       task_faults(cur, env->dst_nid);
+	}
+
+	if (imp < env->best_imp)
+		goto unlock;
+
+	if (!cur) {
+		/* If there's room for an extra task; go ahead */
+		if (env->dst_stats.has_capacity)
+			goto assign;
+
+		/* If we're both over-capacity; balance */
+		if (!env->src_stats.has_capacity)
+			goto balance;
+
+		goto unlock;
+	}
+
+	/* Balance doesn't matter much if we're running a task per cpu */
+	if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
+		goto assign;
+
+	/*
+	 * In the overloaded case, try and keep the load balanced.
+	 */
+balance:
+	dst_load = env->dst_stats.load;
+	src_load = env->src_stats.load;
+
+	/* XXX missing power terms */
+	load = task_h_load(env->p);
+	dst_load += load;
+	src_load -= load;
+
+	if (cur) {
+		load = task_h_load(cur);
+		dst_load -= load;
+		src_load += load;
+	}
+
+	/* make src_load the smaller */
+	if (dst_load < src_load)
+		swap(dst_load, src_load);
 
-	/* No harm being optimistic */
-	if (idle_cpu(node_cpu))
-		return node_cpu;
+	if (src_load * env->imbalance_pct < dst_load * 100)
+		goto unlock;
+
+assign:
+	task_numa_assign(env, cur, imp);
+unlock:
+	rcu_read_unlock();
+}
+
+static int task_numa_migrate(struct task_struct *p)
+{
+	struct task_numa_env env = {
+		.p = p,
+
+		.src_cpu = task_cpu(p),
+		.src_nid = cpu_to_node(task_cpu(p)),
+
+		.imbalance_pct = 112,
+
+		.best_task = NULL,
+		.best_imp = 0,
+		.best_cpu = -1
+	};
+	struct sched_domain *sd;
+	unsigned long faults;
+	int nid, cpu, ret;
 
 	/*
 	 * Find the lowest common scheduling domain covering the nodes of both
 	 * the CPU the task is currently running on and the target NUMA node.
 	 */
 	rcu_read_lock();
-	for_each_domain(src_cpu, sd) {
-		if (cpumask_test_cpu(node_cpu, sched_domain_span(sd))) {
-			/*
-			 * busy_idx is used for the load decision as it is the
-			 * same index used by the regular load balancer for an
-			 * active cpu.
-			 */
-			idx = sd->busy_idx;
-			imbalance_pct = sd->imbalance_pct;
+	for_each_domain(env.src_cpu, sd) {
+		if (cpumask_intersects(cpumask_of_node(env.src_nid), sched_domain_span(sd))) {
+			env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
 			break;
 		}
 	}
 	rcu_read_unlock();
 
-	if (WARN_ON_ONCE(idx == -1))
-		return src_cpu;
+	faults = task_faults(p, env.src_nid);
+	update_numa_stats(&env.src_stats, env.src_nid);
 
-	/*
-	 * XXX the below is mostly nicked from wake_affine(); we should
-	 * see about sharing a bit if at all possible; also it might want
-	 * some per entity weight love.
-	 */
-	weight = p->se.load.weight;
+	for_each_online_node(nid) {
+		long imp;
 
-	src_load = source_load(src_cpu, idx);
-
-	src_eff_load = 100 + (imbalance_pct - 100) / 2;
-	src_eff_load *= power_of(src_cpu);
-	src_eff_load *= src_load + effective_load(tg, src_cpu, -weight, -weight);
-
-	for_each_cpu(cpu, cpumask_of_node(nid)) {
-		dst_load = target_load(cpu, idx);
-
-		/* If the CPU is idle, use it */
-		if (!dst_load)
-			return cpu;
-
-		/* Otherwise check the target CPU load */
-		dst_eff_load = 100;
-		dst_eff_load *= power_of(cpu);
-		dst_eff_load *= dst_load + effective_load(tg, cpu, weight, weight);
+		if (nid == env.src_nid)
+			continue;
 
-		/*
-		 * Destination is considered balanced if the destination CPU is
-		 * less loaded than the source CPU. Unfortunately there is a
-		 * risk that a task running on a lightly loaded CPU will not
-		 * migrate to its preferred node due to load imbalances.
-		 */
-		balanced = (dst_eff_load <= src_eff_load);
-		if (!balanced)
+		imp = task_faults(p, nid) - faults;
+		if (imp < 0)
 			continue;
 
-		if (dst_load < min_load) {
-			min_load = dst_load;
-			dst_cpu = cpu;
+		env.dst_nid = nid;
+		update_numa_stats(&env.dst_stats, env.dst_nid);
+		for_each_cpu(cpu, cpumask_of_node(nid)) {
+			env.dst_cpu = cpu;
+			task_numa_compare(&env, imp);
 		}
 	}
 
-	return dst_cpu;
+	if (env.best_cpu == -1)
+		return -EAGAIN;
+
+	if (env.best_task == NULL)
+		return migrate_task_to(p, env.best_cpu);
+
+	ret = migrate_swap(p, env.best_task);
+	put_task_struct(env.best_task);
+	return ret;
 }
 
 /* Attempt to migrate a task to a CPU on the preferred node. */
@@ -983,10 +1099,13 @@ static void numa_migrate_preferred(struc
 	if (cpu_to_node(preferred_cpu) == p->numa_preferred_nid)
 		return;
 
-	/* Otherwise, try migrate to a CPU on the preferred node */
-	preferred_cpu = task_numa_find_cpu(p, p->numa_preferred_nid);
-	if (migrate_task_to(p, preferred_cpu) != 0)
-		p->numa_migrate_retry = jiffies + HZ*5;
+	if (!sched_feat(NUMA_BALANCE))
+		return;
+
+	task_numa_migrate(p);
+
+	/* Try again until we hit the preferred node */
+	p->numa_migrate_retry = jiffies + HZ/10;
 }
 
 static void task_numa_placement(struct task_struct *p)
@@ -1003,7 +1122,7 @@ static void task_numa_placement(struct t
 
 	/* Find the node with the highest number of faults */
 	for (nid = 0; nid < nr_node_ids; nid++) {
-		unsigned long faults;
+		unsigned long faults = 0;
 		int priv, i;
 
 		for (priv = 0; priv < 2; priv++) {
@@ -1013,19 +1132,16 @@ static void task_numa_placement(struct t
 			p->numa_faults[i] >>= 1;
 			p->numa_faults[i] += p->numa_faults_buffer[i];
 			p->numa_faults_buffer[i] = 0;
+
+			faults += p->numa_faults[i];
 		}
 
-		/* Find maximum private faults */
-		faults = p->numa_faults[task_faults_idx(nid, 1)];
 		if (faults > max_faults) {
 			max_faults = faults;
 			max_nid = nid;
 		}
 	}
 
-	if (!sched_feat(NUMA_BALANCE))
-		return;
-
 	/* Preferred node as the node with the most faults */
 	if (max_faults && max_nid != p->numa_preferred_nid) {
 		int old_migrate_seq = p->numa_migrate_seq;
@@ -3342,7 +3458,7 @@ static long effective_load(struct task_g
 {
 	struct sched_entity *se = tg->se[cpu];
 
-	if (!tg->parent)	/* the trivial, non-cgroup case */
+	if (!tg->parent || !wl)	/* the trivial / non-cgroup case */
 		return wl;
 
 	for_each_sched_entity(se) {
@@ -4347,8 +4463,6 @@ static int move_one_task(struct lb_env *
 	return 0;
 }
 
-static unsigned long task_h_load(struct task_struct *p);
-
 static const unsigned int sched_nr_migrate_break = 32;
 
 /*

WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <peterz@infradead.org>
To: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>,
	Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@kernel.org>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Linux-MM <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH,RFC] numa,sched: use group fault statistics in numa placement
Date: Thu, 1 Aug 2013 12:37:13 +0200	[thread overview]
Message-ID: <20130801103713.GO3008@twins.programming.kicks-ass.net> (raw)
In-Reply-To: <20130801022319.4a6a977a@annuminas.surriel.com>

On Thu, Aug 01, 2013 at 02:23:19AM -0400, Rik van Riel wrote:
> Subject: [PATCH,RFC] numa,sched: use group fault statistics in numa placement
> 
> Here is a quick strawman on how the group fault stuff could be used
> to help pick the best node for a task. This is likely to be quite
> suboptimal and in need of tweaking. My main goal is to get this to
> Peter & Mel before it's breakfast time on their side of the Atlantic...
> 
> This goes on top of "sched, numa: Use {cpu, pid} to create task groups for shared faults"
> 
> Enjoy :)
> 
> +	/*
> +	 * Should we stay on our own, or move in with the group?
> +	 * The absolute count of faults may not be useful, but comparing
> +	 * the fraction of accesses in each top node may give us a hint
> +	 * where to start looking for a migration target.
> +	 *
> +	 *  max_group_faults     max_faults
> +	 * ------------------ > ------------
> +	 * total_group_faults   total_faults
> +	 */
> +	if (max_group_nid >= 0 && max_group_nid != max_nid) {
> +		if (max_group_faults * total_faults >
> +				max_faults * total_group_faults)
> +			max_nid = max_group_nid;
> +	}

This makes sense.. another part of the problem, which you might already
have spotted is selecting a task to swap with. 

If you only look at per task faults its often impossible to find a
suitable swap task because moving you to a more suitable node would
degrade the other task -- below a patch you've already seen but I
haven't yet posted because I'm not at all sure its something 'sane' :-)

With group information your case might be stronger because you already
have many tasks on that node.

Still there's the tie where there's two groups with each exactly half
their tasks crossed between two nodes. I suppose we should forcefully
tie break in this case.

And all this while also maintaining the invariants placed by the regular
balancer. It would be no good to move tasks about if the balancer would
then have to shuffle stuff right back (or worse) in order to maintain
fairness.

---
Subject: sched, numa: Alternative migration scheme
From: Peter Zijlstra <peterz@infradead.org>
Date: Sun Jul 21 23:12:13 CEST 2013


Signed-off-by: Peter Zijlstra <peterz@infradead.org>
---
 kernel/sched/fair.c |  260 +++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 187 insertions(+), 73 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -816,6 +816,8 @@ update_stats_curr_start(struct cfs_rq *c
  * Scheduling class queueing methods:
  */
 
+static unsigned long task_h_load(struct task_struct *p);
+
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * Approximate time to scan a full NUMA task in ms. The task scan period is
@@ -885,92 +887,206 @@ static inline int task_faults_idx(int ni
 	return 2 * nid + priv;
 }
 
+static inline unsigned long task_faults(struct task_struct *p, int nid)
+{
+	if (!p->numa_faults)
+		return 0;
+
+	return p->numa_faults[2*nid] + p->numa_faults[2*nid+1];
+}
+
+static unsigned long weighted_cpuload(const int cpu);
 static unsigned long source_load(int cpu, int type);
 static unsigned long target_load(int cpu, int type);
 static unsigned long power_of(int cpu);
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
 
-static int task_numa_find_cpu(struct task_struct *p, int nid)
+struct numa_stats {
+	unsigned long nr_running;
+	unsigned long load;
+	unsigned long power;
+	unsigned long capacity;
+	int has_capacity;
+};
+
+/*
+ * XXX borrowed from update_sg_lb_stats
+ */
+static void update_numa_stats(struct numa_stats *ns, int nid)
 {
-	int node_cpu = cpumask_first(cpumask_of_node(nid));
-	int cpu, src_cpu = task_cpu(p), dst_cpu = src_cpu;
-	unsigned long src_load, dst_load;
-	unsigned long min_load = ULONG_MAX;
-	struct task_group *tg = task_group(p);
-	s64 src_eff_load, dst_eff_load;
-	struct sched_domain *sd;
-	unsigned long weight;
-	bool balanced;
-	int imbalance_pct, idx = -1;
+	int cpu;
+
+	memset(ns, 0, sizeof(*ns));
+	for_each_cpu(cpu, cpumask_of_node(nid)) {
+		struct rq *rq = cpu_rq(cpu);
+
+		ns->nr_running += rq->nr_running;
+		ns->load += weighted_cpuload(cpu);
+		ns->power += power_of(cpu);
+	}
+
+	ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
+	ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
+	ns->has_capacity = (ns->nr_running < ns->capacity);
+}
+
+struct task_numa_env {
+	struct task_struct *p;
+
+	int src_cpu, src_nid;
+	int dst_cpu, dst_nid;
+
+	struct numa_stats src_stats, dst_stats;
+
+	int imbalance_pct, idx;
+
+	struct task_struct *best_task;
+	long best_imp;
+	int best_cpu;
+};
+
+static void task_numa_assign(struct task_numa_env *env,
+			     struct task_struct *p, long imp)
+{
+	if (env->best_task)
+		put_task_struct(env->best_task);
+	if (p)
+		get_task_struct(p);
+
+	env->best_task = p;
+	env->best_imp = imp;
+	env->best_cpu = env->dst_cpu;
+}
+
+static void task_numa_compare(struct task_numa_env *env, long imp)
+{
+	struct rq *src_rq = cpu_rq(env->src_cpu);
+	struct rq *dst_rq = cpu_rq(env->dst_cpu);
+	struct task_struct *cur;
+	unsigned long dst_load, src_load;
+	unsigned long load;
+
+	rcu_read_lock();
+	cur = ACCESS_ONCE(dst_rq->curr);
+	if (cur->pid == 0) /* idle */
+		cur = NULL;
+
+	if (cur) {
+		imp += task_faults(cur, env->src_nid) -
+		       task_faults(cur, env->dst_nid);
+	}
+
+	if (imp < env->best_imp)
+		goto unlock;
+
+	if (!cur) {
+		/* If there's room for an extra task; go ahead */
+		if (env->dst_stats.has_capacity)
+			goto assign;
+
+		/* If we're both over-capacity; balance */
+		if (!env->src_stats.has_capacity)
+			goto balance;
+
+		goto unlock;
+	}
+
+	/* Balance doesn't matter much if we're running a task per cpu */
+	if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
+		goto assign;
+
+	/*
+	 * In the overloaded case, try and keep the load balanced.
+	 */
+balance:
+	dst_load = env->dst_stats.load;
+	src_load = env->src_stats.load;
+
+	/* XXX missing power terms */
+	load = task_h_load(env->p);
+	dst_load += load;
+	src_load -= load;
+
+	if (cur) {
+		load = task_h_load(cur);
+		dst_load -= load;
+		src_load += load;
+	}
+
+	/* make src_load the smaller */
+	if (dst_load < src_load)
+		swap(dst_load, src_load);
 
-	/* No harm being optimistic */
-	if (idle_cpu(node_cpu))
-		return node_cpu;
+	if (src_load * env->imbalance_pct < dst_load * 100)
+		goto unlock;
+
+assign:
+	task_numa_assign(env, cur, imp);
+unlock:
+	rcu_read_unlock();
+}
+
+static int task_numa_migrate(struct task_struct *p)
+{
+	struct task_numa_env env = {
+		.p = p,
+
+		.src_cpu = task_cpu(p),
+		.src_nid = cpu_to_node(task_cpu(p)),
+
+		.imbalance_pct = 112,
+
+		.best_task = NULL,
+		.best_imp = 0,
+		.best_cpu = -1
+	};
+	struct sched_domain *sd;
+	unsigned long faults;
+	int nid, cpu, ret;
 
 	/*
 	 * Find the lowest common scheduling domain covering the nodes of both
 	 * the CPU the task is currently running on and the target NUMA node.
 	 */
 	rcu_read_lock();
-	for_each_domain(src_cpu, sd) {
-		if (cpumask_test_cpu(node_cpu, sched_domain_span(sd))) {
-			/*
-			 * busy_idx is used for the load decision as it is the
-			 * same index used by the regular load balancer for an
-			 * active cpu.
-			 */
-			idx = sd->busy_idx;
-			imbalance_pct = sd->imbalance_pct;
+	for_each_domain(env.src_cpu, sd) {
+		if (cpumask_intersects(cpumask_of_node(env.src_nid), sched_domain_span(sd))) {
+			env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
 			break;
 		}
 	}
 	rcu_read_unlock();
 
-	if (WARN_ON_ONCE(idx == -1))
-		return src_cpu;
+	faults = task_faults(p, env.src_nid);
+	update_numa_stats(&env.src_stats, env.src_nid);
 
-	/*
-	 * XXX the below is mostly nicked from wake_affine(); we should
-	 * see about sharing a bit if at all possible; also it might want
-	 * some per entity weight love.
-	 */
-	weight = p->se.load.weight;
+	for_each_online_node(nid) {
+		long imp;
 
-	src_load = source_load(src_cpu, idx);
-
-	src_eff_load = 100 + (imbalance_pct - 100) / 2;
-	src_eff_load *= power_of(src_cpu);
-	src_eff_load *= src_load + effective_load(tg, src_cpu, -weight, -weight);
-
-	for_each_cpu(cpu, cpumask_of_node(nid)) {
-		dst_load = target_load(cpu, idx);
-
-		/* If the CPU is idle, use it */
-		if (!dst_load)
-			return cpu;
-
-		/* Otherwise check the target CPU load */
-		dst_eff_load = 100;
-		dst_eff_load *= power_of(cpu);
-		dst_eff_load *= dst_load + effective_load(tg, cpu, weight, weight);
+		if (nid == env.src_nid)
+			continue;
 
-		/*
-		 * Destination is considered balanced if the destination CPU is
-		 * less loaded than the source CPU. Unfortunately there is a
-		 * risk that a task running on a lightly loaded CPU will not
-		 * migrate to its preferred node due to load imbalances.
-		 */
-		balanced = (dst_eff_load <= src_eff_load);
-		if (!balanced)
+		imp = task_faults(p, nid) - faults;
+		if (imp < 0)
 			continue;
 
-		if (dst_load < min_load) {
-			min_load = dst_load;
-			dst_cpu = cpu;
+		env.dst_nid = nid;
+		update_numa_stats(&env.dst_stats, env.dst_nid);
+		for_each_cpu(cpu, cpumask_of_node(nid)) {
+			env.dst_cpu = cpu;
+			task_numa_compare(&env, imp);
 		}
 	}
 
-	return dst_cpu;
+	if (env.best_cpu == -1)
+		return -EAGAIN;
+
+	if (env.best_task == NULL)
+		return migrate_task_to(p, env.best_cpu);
+
+	ret = migrate_swap(p, env.best_task);
+	put_task_struct(env.best_task);
+	return ret;
 }
 
 /* Attempt to migrate a task to a CPU on the preferred node. */
@@ -983,10 +1099,13 @@ static void numa_migrate_preferred(struc
 	if (cpu_to_node(preferred_cpu) == p->numa_preferred_nid)
 		return;
 
-	/* Otherwise, try migrate to a CPU on the preferred node */
-	preferred_cpu = task_numa_find_cpu(p, p->numa_preferred_nid);
-	if (migrate_task_to(p, preferred_cpu) != 0)
-		p->numa_migrate_retry = jiffies + HZ*5;
+	if (!sched_feat(NUMA_BALANCE))
+		return;
+
+	task_numa_migrate(p);
+
+	/* Try again until we hit the preferred node */
+	p->numa_migrate_retry = jiffies + HZ/10;
 }
 
 static void task_numa_placement(struct task_struct *p)
@@ -1003,7 +1122,7 @@ static void task_numa_placement(struct t
 
 	/* Find the node with the highest number of faults */
 	for (nid = 0; nid < nr_node_ids; nid++) {
-		unsigned long faults;
+		unsigned long faults = 0;
 		int priv, i;
 
 		for (priv = 0; priv < 2; priv++) {
@@ -1013,19 +1132,16 @@ static void task_numa_placement(struct t
 			p->numa_faults[i] >>= 1;
 			p->numa_faults[i] += p->numa_faults_buffer[i];
 			p->numa_faults_buffer[i] = 0;
+
+			faults += p->numa_faults[i];
 		}
 
-		/* Find maximum private faults */
-		faults = p->numa_faults[task_faults_idx(nid, 1)];
 		if (faults > max_faults) {
 			max_faults = faults;
 			max_nid = nid;
 		}
 	}
 
-	if (!sched_feat(NUMA_BALANCE))
-		return;
-
 	/* Preferred node as the node with the most faults */
 	if (max_faults && max_nid != p->numa_preferred_nid) {
 		int old_migrate_seq = p->numa_migrate_seq;
@@ -3342,7 +3458,7 @@ static long effective_load(struct task_g
 {
 	struct sched_entity *se = tg->se[cpu];
 
-	if (!tg->parent)	/* the trivial, non-cgroup case */
+	if (!tg->parent || !wl)	/* the trivial / non-cgroup case */
 		return wl;
 
 	for_each_sched_entity(se) {
@@ -4347,8 +4463,6 @@ static int move_one_task(struct lb_env *
 	return 0;
 }
 
-static unsigned long task_h_load(struct task_struct *p);
-
 static const unsigned int sched_nr_migrate_break = 32;
 
 /*

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2013-08-01 10:37 UTC|newest]

Thread overview: 200+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-07-15 15:20 [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Mel Gorman
2013-07-15 15:20 ` Mel Gorman
2013-07-15 15:20 ` [PATCH 01/18] mm: numa: Document automatic NUMA balancing sysctls Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 15:20 ` [PATCH 02/18] sched: Track NUMA hinting faults on per-node basis Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-17 10:50   ` Peter Zijlstra
2013-07-17 10:50     ` Peter Zijlstra
2013-07-31  7:54     ` Mel Gorman
2013-07-31  7:54       ` Mel Gorman
2013-07-29 10:10   ` Peter Zijlstra
2013-07-29 10:10     ` Peter Zijlstra
2013-07-31  7:54     ` Mel Gorman
2013-07-31  7:54       ` Mel Gorman
2013-07-15 15:20 ` [PATCH 03/18] mm: numa: Account for THP numa hinting faults on the correct node Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-17  0:33   ` Hillf Danton
2013-07-17  0:33     ` Hillf Danton
2013-07-17  1:26     ` Wanpeng Li
2013-07-17  1:26     ` Wanpeng Li
2013-07-15 15:20 ` [PATCH 04/18] mm: numa: Do not migrate or account for hinting faults on the zero page Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-17 11:00   ` Peter Zijlstra
2013-07-17 11:00     ` Peter Zijlstra
2013-07-31  8:11     ` Mel Gorman
2013-07-31  8:11       ` Mel Gorman
2013-07-15 15:20 ` [PATCH 05/18] sched: Select a preferred node with the most numa hinting faults Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 15:20 ` [PATCH 06/18] sched: Update NUMA hinting faults once per scan Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 15:20 ` [PATCH 07/18] sched: Favour moving tasks towards the preferred node Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-25 10:40   ` [PATCH] sched, numa: migrates_degrades_locality() Peter Zijlstra
2013-07-25 10:40     ` Peter Zijlstra
2013-07-31  8:44     ` Mel Gorman
2013-07-31  8:44       ` Mel Gorman
2013-07-31  8:50       ` Peter Zijlstra
2013-07-31  8:50         ` Peter Zijlstra
2013-07-15 15:20 ` [PATCH 08/18] sched: Reschedule task on preferred NUMA node once selected Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-17  1:31   ` Hillf Danton
2013-07-17  1:31     ` Hillf Danton
2013-07-31  9:07     ` Mel Gorman
2013-07-31  9:07       ` Mel Gorman
2013-07-31  9:38       ` Srikar Dronamraju
2013-07-31  9:38         ` Srikar Dronamraju
2013-08-01  4:47   ` Srikar Dronamraju
2013-08-01  4:47     ` Srikar Dronamraju
2013-08-01 15:38     ` Mel Gorman
2013-08-01 15:38       ` Mel Gorman
2013-07-15 15:20 ` [PATCH 09/18] sched: Add infrastructure for split shared/private accounting of NUMA hinting faults Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-17  2:17   ` Hillf Danton
2013-07-17  2:17     ` Hillf Danton
2013-07-31  9:08     ` Mel Gorman
2013-07-31  9:08       ` Mel Gorman
2013-07-15 15:20 ` [PATCH 10/18] sched: Increase NUMA PTE scanning when a new preferred node is selected Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 15:20 ` [PATCH 11/18] sched: Check current->mm before allocating NUMA faults Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 15:20 ` [PATCH 12/18] sched: Set the scan rate proportional to the size of the task being scanned Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 15:20 ` [PATCH 13/18] mm: numa: Scan pages with elevated page_mapcount Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-17  5:22   ` Sam Ben
2013-07-17  5:22     ` Sam Ben
2013-07-31  9:13     ` Mel Gorman
2013-07-31  9:13       ` Mel Gorman
2013-07-15 15:20 ` [PATCH 14/18] sched: Remove check that skips small VMAs Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 15:20 ` [PATCH 15/18] sched: Set preferred NUMA node based on number of private faults Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-18  1:53   ` [PATCH 15/18] fix compilation with !CONFIG_NUMA_BALANCING Rik van Riel
2013-07-18  1:53     ` Rik van Riel
2013-07-31  9:19     ` Mel Gorman
2013-07-31  9:19       ` Mel Gorman
2013-07-26 11:20   ` [PATCH 15/18] sched: Set preferred NUMA node based on number of private faults Peter Zijlstra
2013-07-26 11:20     ` Peter Zijlstra
2013-07-31  9:29     ` Mel Gorman
2013-07-31  9:29       ` Mel Gorman
2013-07-31  9:34       ` Peter Zijlstra
2013-07-31  9:34         ` Peter Zijlstra
2013-07-31 10:10         ` Mel Gorman
2013-07-31 10:10           ` Mel Gorman
2013-07-15 15:20 ` [PATCH 16/18] sched: Avoid overloading CPUs on a preferred NUMA node Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 20:03   ` Peter Zijlstra
2013-07-15 20:03     ` Peter Zijlstra
2013-07-16  8:23     ` Mel Gorman
2013-07-16  8:23       ` Mel Gorman
2013-07-16 10:35       ` Peter Zijlstra
2013-07-16 10:35         ` Peter Zijlstra
2013-07-16 15:55   ` Hillf Danton
2013-07-16 15:55     ` Hillf Danton
2013-07-16 16:01     ` Mel Gorman
2013-07-16 16:01       ` Mel Gorman
2013-07-17 10:54   ` Peter Zijlstra
2013-07-17 10:54     ` Peter Zijlstra
2013-07-31  9:49     ` Mel Gorman
2013-07-31  9:49       ` Mel Gorman
2013-08-01  7:10   ` Srikar Dronamraju
2013-08-01  7:10     ` Srikar Dronamraju
2013-08-01 15:42     ` Mel Gorman
2013-08-01 15:42       ` Mel Gorman
2013-07-15 15:20 ` [PATCH 17/18] sched: Retry migration of tasks to CPU on a preferred node Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-25 10:33   ` Peter Zijlstra
2013-07-25 10:33     ` Peter Zijlstra
2013-07-31 10:03     ` Mel Gorman
2013-07-31 10:03       ` Mel Gorman
2013-07-31 10:05       ` Peter Zijlstra
2013-07-31 10:05         ` Peter Zijlstra
2013-07-31 10:07         ` Mel Gorman
2013-07-31 10:07           ` Mel Gorman
2013-07-25 10:35   ` Peter Zijlstra
2013-07-25 10:35     ` Peter Zijlstra
2013-08-01  5:13   ` Srikar Dronamraju
2013-08-01  5:13     ` Srikar Dronamraju
2013-08-01 15:46     ` Mel Gorman
2013-08-01 15:46       ` Mel Gorman
2013-07-15 15:20 ` [PATCH 18/18] sched: Swap tasks when reschuling if a CPU on a target node is imbalanced Mel Gorman
2013-07-15 15:20   ` Mel Gorman
2013-07-15 20:11   ` Peter Zijlstra
2013-07-15 20:11     ` Peter Zijlstra
2013-07-16  9:41     ` Mel Gorman
2013-07-16  9:41       ` Mel Gorman
2013-08-01  4:59   ` Srikar Dronamraju
2013-08-01  4:59     ` Srikar Dronamraju
2013-08-01 15:48     ` Mel Gorman
2013-08-01 15:48       ` Mel Gorman
2013-07-15 20:14 ` [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Peter Zijlstra
2013-07-15 20:14   ` Peter Zijlstra
2013-07-16 15:10 ` Srikar Dronamraju
2013-07-16 15:10   ` Srikar Dronamraju
2013-07-25 10:36 ` Peter Zijlstra
2013-07-25 10:36   ` Peter Zijlstra
2013-07-31 10:30   ` Mel Gorman
2013-07-31 10:30     ` Mel Gorman
2013-07-31 10:48     ` Peter Zijlstra
2013-07-31 10:48       ` Peter Zijlstra
2013-07-31 11:57       ` Mel Gorman
2013-07-31 11:57         ` Mel Gorman
2013-07-31 15:30         ` Peter Zijlstra
2013-07-31 15:30           ` Peter Zijlstra
2013-07-31 16:11           ` Mel Gorman
2013-07-31 16:11             ` Mel Gorman
2013-07-31 16:39             ` Peter Zijlstra
2013-07-31 16:39               ` Peter Zijlstra
2013-08-01 15:51               ` Mel Gorman
2013-08-01 15:51                 ` Mel Gorman
2013-07-25 10:38 ` [PATCH] mm, numa: Sanitize task_numa_fault() callsites Peter Zijlstra
2013-07-25 10:38   ` Peter Zijlstra
2013-07-31 11:25   ` Mel Gorman
2013-07-31 11:25     ` Mel Gorman
2013-07-25 10:41 ` [PATCH] sched, numa: Improve scanner Peter Zijlstra
2013-07-25 10:41   ` Peter Zijlstra
2013-07-25 10:46 ` [PATCH] mm, sched, numa: Create a per-task MPOL_INTERLEAVE policy Peter Zijlstra
2013-07-25 10:46   ` Peter Zijlstra
2013-07-26  9:55   ` Peter Zijlstra
2013-07-26  9:55     ` Peter Zijlstra
2013-08-26 16:10     ` Peter Zijlstra
2013-08-26 16:10       ` Peter Zijlstra
2013-08-26 16:14       ` Peter Zijlstra
2013-08-26 16:14         ` Peter Zijlstra
2013-07-30 11:24 ` [PATCH] mm, numa: Change page last {nid,pid} into {cpu,pid} Peter Zijlstra
2013-07-30 11:24   ` Peter Zijlstra
2013-08-01 22:33   ` Rik van Riel
2013-08-01 22:33     ` Rik van Riel
2013-07-30 11:38 ` [PATCH] sched, numa: Use {cpu, pid} to create task groups for shared faults Peter Zijlstra
2013-07-30 11:38   ` Peter Zijlstra
2013-07-31 15:07   ` Peter Zijlstra
2013-07-31 15:07     ` Peter Zijlstra
2013-07-31 15:38     ` Peter Zijlstra
2013-07-31 15:38       ` Peter Zijlstra
2013-07-31 15:45     ` Don Morris
2013-07-31 15:45       ` Don Morris
2013-07-31 16:05       ` Peter Zijlstra
2013-07-31 16:05         ` Peter Zijlstra
2013-08-02 16:47       ` [PATCH -v3] " Peter Zijlstra
2013-08-02 16:47         ` Peter Zijlstra
2013-08-02 16:50         ` [PATCH] mm, numa: Do not group on RO pages Peter Zijlstra
2013-08-02 16:50           ` Peter Zijlstra
2013-08-02 19:56           ` Peter Zijlstra
2013-08-02 19:56             ` Peter Zijlstra
2013-08-05 19:36           ` [PATCH] numa,sched: use group fault statistics in numa placement Rik van Riel
2013-08-05 19:36             ` Rik van Riel
2013-08-09 13:55             ` Don Morris
2013-08-28 16:41         ` [PATCH -v3] sched, numa: Use {cpu, pid} to create task groups for shared faults Peter Zijlstra
2013-08-28 16:41           ` Peter Zijlstra
2013-08-28 17:10           ` Rik van Riel
2013-08-28 17:10             ` Rik van Riel
2013-08-01  6:23   ` [PATCH,RFC] numa,sched: use group fault statistics in numa placement Rik van Riel
2013-08-01  6:23     ` Rik van Riel
2013-08-01 10:37     ` Peter Zijlstra [this message]
2013-08-01 10:37       ` Peter Zijlstra
2013-08-01 16:35       ` Rik van Riel
2013-08-01 16:35         ` Rik van Riel
2013-08-01 22:36   ` [RFC PATCH -v2] " Rik van Riel
2013-08-01 22:36     ` Rik van Riel
2013-07-30 13:58 ` [PATCH 0/18] Basic scheduler support for automatic NUMA balancing V5 Andrew Theurer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130801103713.GO3008@twins.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=aarcange@redhat.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mingo@kernel.org \
    --cc=riel@redhat.com \
    --cc=srikar@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.