linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
To: Ingo Molnar <mingo@kernel.org>, Peter Zijlstra <peterz@infradead.org>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Mel Gorman <mgorman@techsingularity.net>,
	Rik van Riel <riel@surriel.com>,
	Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
	Thomas Gleixner <tglx@linutronix.de>
Subject: [PATCH 11/19] sched/numa: Restrict migrating in parallel to the same node.
Date: Mon,  4 Jun 2018 15:30:20 +0530	[thread overview]
Message-ID: <1528106428-19992-12-git-send-email-srikar@linux.vnet.ibm.com> (raw)
In-Reply-To: <1528106428-19992-1-git-send-email-srikar@linux.vnet.ibm.com>

Since task migration under numa balancing can happen in parallel, more
than one task might choose to move to the same node at the same time.
This can cause load imbalances at the node level.

The problem is more likely if there are more cores per node or more
nodes in system.

Use a per-node variable to indicate if task migration
to the node under numa balance is currently active.
This per-node variable will not track swapping of tasks.

Testcase       Time:         Min         Max         Avg      StdDev
numa01.sh      Real:      434.84      676.90      550.53      106.24
numa01.sh       Sys:      125.98      217.34      179.41       30.35
numa01.sh      User:    38318.48    53789.56    45864.17     6620.80
numa02.sh      Real:       60.06       61.27       60.59        0.45
numa02.sh       Sys:       14.25       17.86       16.09        1.28
numa02.sh      User:     5190.13     5225.67     5209.24       13.19
numa03.sh      Real:      748.21      960.25      823.15       73.51
numa03.sh       Sys:       96.68      122.10      110.42       11.29
numa03.sh      User:    58222.16    72595.27    63552.22     5048.87
numa04.sh      Real:      433.08      630.55      499.30       68.15
numa04.sh       Sys:      245.22      386.75      306.09       63.32
numa04.sh      User:    35014.68    46151.72    38530.26     3924.65
numa05.sh      Real:      394.77      410.07      401.41        5.99
numa05.sh       Sys:      212.40      301.82      256.23       35.41
numa05.sh      User:    33224.86    34201.40    33665.61      313.40

Testcase       Time:         Min         Max         Avg      StdDev 	 %Change
numa01.sh      Real:      674.61      997.71      785.01      115.95 	 -29.86%
numa01.sh       Sys:      180.87      318.88      270.13       51.32 	 -33.58%
numa01.sh      User:    54001.30    71936.50    60495.48     6237.55 	 -24.18%
numa02.sh      Real:       60.62       62.30       61.46        0.62 	 -1.415%
numa02.sh       Sys:       15.01       33.63       24.38        6.81 	 -34.00%
numa02.sh      User:     5234.20     5325.60     5276.23       38.85 	 -1.269%
numa03.sh      Real:      827.62      946.85      914.48       44.58 	 -9.987%
numa03.sh       Sys:      135.55      172.40      158.46       12.75 	 -30.31%
numa03.sh      User:    64839.42    73195.44    70805.96     3061.20 	 -10.24%
numa04.sh      Real:      481.01      608.76      521.14       47.28 	 -4.190%
numa04.sh       Sys:      329.59      373.15      353.20       14.20 	 -13.33%
numa04.sh      User:    37649.09    40722.94    38806.32     1072.32 	 -0.711%
numa05.sh      Real:      399.21      415.38      409.88        5.54 	 -2.066%
numa05.sh       Sys:      319.46      418.57      363.31       37.62 	 -29.47%
numa05.sh      User:    33727.77    34732.68    34127.41      447.11 	 -1.353%


The commit does cause some performance regression but is needed from
a fairness/correctness perspective.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
---
 include/linux/mmzone.h |  1 +
 kernel/sched/fair.c    | 14 ++++++++++++++
 mm/page_alloc.c        |  1 +
 3 files changed, 16 insertions(+)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 32699b2..b0767703 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -677,6 +677,7 @@ struct zonelist {
 
 	/* Number of pages migrated during the rate limiting time interval */
 	unsigned long numabalancing_migrate_nr_pages;
+	int active_node_migrate;
 #endif
 	/*
 	 * This is a per-node reserve of pages that are not available
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e19e32..259c343 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1478,11 +1478,22 @@ struct task_numa_env {
 static void task_numa_assign(struct task_numa_env *env,
 			     struct task_struct *p, long imp)
 {
+	pg_data_t *pgdat = NODE_DATA(cpu_to_node(env->dst_cpu));
 	struct rq *rq = cpu_rq(env->dst_cpu);
 
 	if (xchg(&rq->numa_migrate_on, 1))
 		return;
 
+	if (!env->best_task && env->best_cpu != -1)
+		WRITE_ONCE(pgdat->active_node_migrate, 0);
+
+	if (!p) {
+		if (xchg(&pgdat->active_node_migrate, 1)) {
+			WRITE_ONCE(rq->numa_migrate_on, 0);
+			return;
+		}
+	}
+
 	if (env->best_cpu != -1) {
 		rq = cpu_rq(env->best_cpu);
 		WRITE_ONCE(rq->numa_migrate_on, 0);
@@ -1819,8 +1830,11 @@ static int task_numa_migrate(struct task_struct *p)
 
 	best_rq = cpu_rq(env.best_cpu);
 	if (env.best_task == NULL) {
+		pg_data_t *pgdat = NODE_DATA(cpu_to_node(env.dst_cpu));
+
 		ret = migrate_task_to(p, env.best_cpu);
 		WRITE_ONCE(best_rq->numa_migrate_on, 0);
+		WRITE_ONCE(pgdat->active_node_migrate, 0);
 		if (ret != 0)
 			trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
 		return ret;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 905db9d..4526643 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6210,6 +6210,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
 #ifdef CONFIG_NUMA_BALANCING
 	spin_lock_init(&pgdat->numabalancing_migrate_lock);
 	pgdat->numabalancing_migrate_nr_pages = 0;
+	pgdat->active_node_migrate = 0;
 	pgdat->numabalancing_migrate_next_window = jiffies;
 #endif
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-- 
1.8.3.1

  parent reply	other threads:[~2018-06-04 10:01 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-04 10:00 [PATCH 00/19] Fixes for sched/numa_balancing Srikar Dronamraju
2018-06-04 10:00 ` [PATCH 01/19] sched/numa: Remove redundant field Srikar Dronamraju
2018-06-04 14:53   ` Rik van Riel
2018-06-05  8:41   ` Mel Gorman
2018-06-04 10:00 ` [PATCH 02/19] sched/numa: Evaluate move once per node Srikar Dronamraju
2018-06-04 14:51   ` Rik van Riel
2018-06-04 15:45     ` Srikar Dronamraju
2018-06-04 10:00 ` [PATCH 03/19] sched/numa: Simplify load_too_imbalanced Srikar Dronamraju
2018-06-04 14:57   ` Rik van Riel
2018-06-05  8:46   ` Mel Gorman
2018-06-04 10:00 ` [PATCH 04/19] sched/numa: Set preferred_node based on best_cpu Srikar Dronamraju
2018-06-04 12:18   ` Peter Zijlstra
2018-06-04 12:53     ` Srikar Dronamraju
2018-06-04 12:23   ` Peter Zijlstra
2018-06-04 12:59     ` Srikar Dronamraju
2018-06-04 13:39       ` Peter Zijlstra
2018-06-04 13:48         ` Srikar Dronamraju
2018-06-04 14:37       ` Rik van Riel
2018-06-04 15:56         ` Srikar Dronamraju
2018-06-04 10:00 ` [PATCH 05/19] sched/numa: Use task faults only if numa_group is not yet setup Srikar Dronamraju
2018-06-04 12:24   ` Peter Zijlstra
2018-06-04 13:09     ` Srikar Dronamraju
2018-06-04 10:00 ` [PATCH 06/19] sched/debug: Reverse the order of printing faults Srikar Dronamraju
2018-06-04 16:28   ` Rik van Riel
2018-06-05  8:50   ` Mel Gorman
2018-06-04 10:00 ` [PATCH 07/19] sched/numa: Skip nodes that are at hoplimit Srikar Dronamraju
2018-06-04 16:27   ` Rik van Riel
2018-06-05  8:50   ` Mel Gorman
2018-06-04 10:00 ` [PATCH 08/19] sched/numa: Remove unused task_capacity from numa_stats Srikar Dronamraju
2018-06-04 16:28   ` Rik van Riel
2018-06-05  8:57   ` Mel Gorman
2018-06-04 10:00 ` [PATCH 09/19] sched/numa: Modify migrate_swap to accept additional params Srikar Dronamraju
2018-06-04 17:00   ` Rik van Riel
2018-06-05  8:58   ` Mel Gorman
2018-06-04 10:00 ` [PATCH 10/19] sched/numa: Stop multiple tasks from moving to the cpu at the same time Srikar Dronamraju
2018-06-04 17:57   ` Rik van Riel
2018-06-05  9:51   ` Mel Gorman
2018-06-04 10:00 ` Srikar Dronamraju [this message]
2018-06-04 17:59   ` [PATCH 11/19] sched/numa: Restrict migrating in parallel to the same node Rik van Riel
2018-06-05  9:53   ` Mel Gorman
2018-06-06 12:58     ` Srikar Dronamraju
2018-06-04 10:00 ` [PATCH 12/19] sched:numa Remove numa_has_capacity Srikar Dronamraju
2018-06-04 18:07   ` Rik van Riel
2018-06-04 10:00 ` [PATCH 13/19] mm/migrate: Use xchg instead of spinlock Srikar Dronamraju
2018-06-04 18:22   ` Rik van Riel
2018-06-04 19:28   ` Peter Zijlstra
2018-06-05  7:24     ` Srikar Dronamraju
2018-06-05  8:16       ` Peter Zijlstra
2018-06-04 10:00 ` [PATCH 14/19] sched/numa: Updation of scan period need not be in lock Srikar Dronamraju
2018-06-04 18:24   ` Rik van Riel
2018-06-04 10:00 ` [PATCH 15/19] sched/numa: Use group_weights to identify if migration degrades locality Srikar Dronamraju
2018-06-04 18:56   ` Rik van Riel
2018-06-04 10:00 ` [PATCH 16/19] sched/numa: Detect if node actively handling migration Srikar Dronamraju
2018-06-04 20:05   ` Rik van Riel
2018-06-05  3:56     ` Srikar Dronamraju
2018-06-05 13:07       ` Rik van Riel
2018-06-06 12:55         ` Srikar Dronamraju
2018-06-06 13:55           ` Rik van Riel
2018-06-06 15:32             ` Srikar Dronamraju
2018-06-06 17:06               ` Rik van Riel
2018-06-04 10:00 ` [PATCH 17/19] sched/numa: Pass destination cpu as a parameter to migrate_task_rq Srikar Dronamraju
2018-06-04 10:00 ` [PATCH 18/19] sched/numa: Reset scan rate whenever task moves across nodes Srikar Dronamraju
2018-06-04 20:08   ` Rik van Riel
2018-06-05  9:58   ` Mel Gorman
2018-06-06 13:47     ` Srikar Dronamraju
2018-06-04 10:00 ` [PATCH 19/19] sched/numa: Move task_placement closer to numa_migrate_preferred Srikar Dronamraju

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1528106428-19992-12-git-send-email-srikar@linux.vnet.ibm.com \
    --to=srikar@linux.vnet.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=riel@surriel.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).