linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Mel Gorman <mgorman@suse.de>
To: Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Ingo Molnar <mingo@kernel.org>
Cc: Rik van Riel <riel@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Hugh Dickins <hughd@google.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Paul Turner <pjt@google.com>, Hillf Danton <dhillf@gmail.com>,
	David Rientjes <rientjes@google.com>,
	Lee Schermerhorn <Lee.Schermerhorn@hp.com>,
	Alex Shi <lkml.alex@gmail.com>,
	Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
	Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Linux-MM <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>
Subject: [PATCH 40/49] mm: sched: Adapt the scanning rate if a NUMA hinting fault does not migrate
Date: Fri,  7 Dec 2012 10:23:43 +0000	[thread overview]
Message-ID: <1354875832-9700-41-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1354875832-9700-1-git-send-email-mgorman@suse.de>

The PTE scanning rate and fault rates are two of the biggest sources of
system CPU overhead with automatic NUMA placement.  Ideally a proper policy
would detect if a workload was properly placed, schedule and adjust the
PTE scanning rate accordingly. We do not track the necessary information
to do that but we at least know if we migrated or not.

This patch scans slower if a page was not migrated as the result of a
NUMA hinting fault up to sysctl_balance_numa_scan_period_max which is
now higher than the previous default. Once every minute it will reset
the scanner in case of phase changes.

This is hilariously crude and the numbers are arbitrary. Workloads will
converge quite slowly in comparison to what a proper policy should be able
to do. On the plus side, we will chew up less CPU for workloads that have
no need for automatic balancing.

Signed-off-by: Mel Gorman <mgorman@suse.de>
---
 include/linux/mm_types.h |    3 +++
 include/linux/sched.h    |    5 +++--
 kernel/sched/core.c      |    1 +
 kernel/sched/fair.c      |   29 +++++++++++++++++++++--------
 kernel/sysctl.c          |    7 +++++++
 mm/huge_memory.c         |    2 +-
 mm/memory.c              |   12 ++++++++----
 7 files changed, 44 insertions(+), 15 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6b478ff..62d18a9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -410,6 +410,9 @@ struct mm_struct {
 	 */
 	unsigned long numa_next_scan;
 
+	/* numa_next_reset is when the PTE scanner period will be reset */
+	unsigned long numa_next_reset;
+
 	/* Restart point for scanning and setting pte_numa */
 	unsigned long numa_scan_offset;
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a2b06ea..1068afd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1562,9 +1562,9 @@ struct task_struct {
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
 #ifdef CONFIG_BALANCE_NUMA
-extern void task_numa_fault(int node, int pages);
+extern void task_numa_fault(int node, int pages, bool migrated);
 #else
-static inline void task_numa_fault(int node, int pages)
+static inline void task_numa_fault(int node, int pages, bool migrated)
 {
 }
 #endif
@@ -2009,6 +2009,7 @@ extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
 extern unsigned int sysctl_balance_numa_scan_delay;
 extern unsigned int sysctl_balance_numa_scan_period_min;
 extern unsigned int sysctl_balance_numa_scan_period_max;
+extern unsigned int sysctl_balance_numa_scan_period_reset;
 extern unsigned int sysctl_balance_numa_scan_size;
 extern unsigned int sysctl_balance_numa_settle_count;
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 047e3c7..a59d869 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1537,6 +1537,7 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_BALANCE_NUMA
 	if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
 		p->mm->numa_next_scan = jiffies;
+		p->mm->numa_next_reset = jiffies;
 		p->mm->numa_scan_seq = 0;
 	}
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3c632448..c1be907 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -784,7 +784,8 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  * numa task sample period in ms
  */
 unsigned int sysctl_balance_numa_scan_period_min = 100;
-unsigned int sysctl_balance_numa_scan_period_max = 100*16;
+unsigned int sysctl_balance_numa_scan_period_max = 100*50;
+unsigned int sysctl_balance_numa_scan_period_reset = 100*600;
 
 /* Portion of address space to scan in MB */
 unsigned int sysctl_balance_numa_scan_size = 256;
@@ -806,20 +807,19 @@ static void task_numa_placement(struct task_struct *p)
 /*
  * Got a PROT_NONE fault for a page on @node.
  */
-void task_numa_fault(int node, int pages)
+void task_numa_fault(int node, int pages, bool migrated)
 {
 	struct task_struct *p = current;
 
 	/* FIXME: Allocate task-specific structure for placement policy here */
 
 	/*
-	 * Assume that as faults occur that pages are getting properly placed
-	 * and fewer NUMA hints are required. Note that this is a big
-	 * assumption, it assumes processes reach a steady steady with no
-	 * further phase changes.
+	 * If pages are properly placed (did not migrate) then scan slower.
+	 * This is reset periodically in case of phase changes
 	 */
-	p->numa_scan_period = min(sysctl_balance_numa_scan_period_max,
-				p->numa_scan_period + jiffies_to_msecs(2));
+        if (!migrated)
+		p->numa_scan_period = min(sysctl_balance_numa_scan_period_max,
+			p->numa_scan_period + jiffies_to_msecs(10));
 
 	task_numa_placement(p);
 }
@@ -858,6 +858,19 @@ void task_numa_work(struct callback_head *work)
 		return;
 
 	/*
+	 * Reset the scan period if enough time has gone by. Objective is that
+	 * scanning will be reduced if pages are properly placed. As tasks
+	 * can enter different phases this needs to be re-examined. Lacking
+	 * proper tracking of reference behaviour, this blunt hammer is used.
+	 */
+	migrate = mm->numa_next_reset;
+	if (time_after(now, migrate)) {
+		p->numa_scan_period = sysctl_balance_numa_scan_period_min;
+		next_scan = now + msecs_to_jiffies(sysctl_balance_numa_scan_period_reset);
+		xchg(&mm->numa_next_reset, next_scan);
+	}
+
+	/*
 	 * Enforce maximal scan/migration frequency..
 	 */
 	migrate = mm->numa_next_scan;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 5ee587d..c335f426 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -367,6 +367,13 @@ static struct ctl_table kern_table[] = {
 		.proc_handler	= proc_dointvec,
 	},
 	{
+		.procname	= "balance_numa_scan_period_reset",
+		.data		= &sysctl_balance_numa_scan_period_reset,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "balance_numa_scan_period_max_ms",
 		.data		= &sysctl_balance_numa_scan_period_max,
 		.maxlen		= sizeof(unsigned int),
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4c6efa8..1327a03 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1067,7 +1067,7 @@ out_unlock:
 	spin_unlock(&mm->page_table_lock);
 	if (page) {
 		put_page(page);
-		task_numa_fault(numa_node_id(), HPAGE_PMD_NR);
+		task_numa_fault(numa_node_id(), HPAGE_PMD_NR, false);
 	}
 	return 0;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 6a1e534..30e1335 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3468,6 +3468,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	spinlock_t *ptl;
 	int current_nid = -1;
 	int target_nid;
+	bool migrated = false;
 
 	/*
 	* The "pte" at this point cannot be used safely without
@@ -3509,12 +3510,13 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	}
 
 	/* Migrate to the requested node */
-	if (migrate_misplaced_page(page, target_nid))
+	migrated = migrate_misplaced_page(page, target_nid);
+	if (migrated)
 		current_nid = target_nid;
 
 out:
 	if (current_nid != -1)
-		task_numa_fault(current_nid, 1);
+		task_numa_fault(current_nid, 1, migrated);
 	return 0;
 }
 
@@ -3554,6 +3556,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		struct page *page;
 		int curr_nid = local_nid;
 		int target_nid;
+		bool migrated;
 		if (!pte_present(pteval))
 			continue;
 		if (!pte_numa(pteval))
@@ -3590,9 +3593,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
 		/* Migrate to the requested node */
 		pte_unmap_unlock(pte, ptl);
-		if (migrate_misplaced_page(page, target_nid))
+		migrated = migrate_misplaced_page(page, target_nid);
+		if (migrated)
 			curr_nid = target_nid;
-		task_numa_fault(curr_nid, 1);
+		task_numa_fault(curr_nid, 1, migrated);
 
 		pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 	}
-- 
1.7.9.2


  parent reply	other threads:[~2012-12-07 10:27 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-12-07 10:23 [PATCH 00/49] Automatic NUMA Balancing v10 Mel Gorman
2012-12-07 10:23 ` [PATCH 01/49] x86: mm: only do a local tlb flush in ptep_set_access_flags() Mel Gorman
2012-12-07 10:23 ` [PATCH 02/49] x86: mm: drop TLB flush from ptep_set_access_flags Mel Gorman
2012-12-07 10:23 ` [PATCH 03/49] mm,generic: only flush the local TLB in ptep_set_access_flags Mel Gorman
2012-12-07 10:23 ` [PATCH 04/49] x86/mm: Introduce pte_accessible() Mel Gorman
2012-12-07 10:23 ` [PATCH 05/49] mm: Only flush the TLB when clearing an accessible pte Mel Gorman
2012-12-07 10:23 ` [PATCH 06/49] mm: Count the number of pages affected in change_protection() Mel Gorman
2012-12-07 10:23 ` [PATCH 07/49] mm: Optimize the TLB flush of sys_mprotect() and change_protection() users Mel Gorman
2012-12-07 10:23 ` [PATCH 08/49] mm: compaction: Move migration fail/success stats to migrate.c Mel Gorman
2012-12-07 10:23 ` [PATCH 09/49] mm: migrate: Add a tracepoint for migrate_pages Mel Gorman
2012-12-07 10:23 ` [PATCH 10/49] mm: compaction: Add scanned and isolated counters for compaction Mel Gorman
2012-12-07 10:23 ` [PATCH 11/49] mm: numa: define _PAGE_NUMA Mel Gorman
2012-12-07 10:23 ` [PATCH 12/49] mm: numa: pte_numa() and pmd_numa() Mel Gorman
2012-12-07 10:23 ` [PATCH 13/49] mm: numa: Support NUMA hinting page faults from gup/gup_fast Mel Gorman
2012-12-07 10:23 ` [PATCH 14/49] mm: numa: split_huge_page: transfer the NUMA type from the pmd to the pte Mel Gorman
2012-12-07 10:23 ` [PATCH 15/49] mm: numa: Create basic numa page hinting infrastructure Mel Gorman
2012-12-07 10:23 ` [PATCH 16/49] mm: mempolicy: Make MPOL_LOCAL a real policy Mel Gorman
2012-12-07 10:23 ` [PATCH 17/49] mm: mempolicy: Add MPOL_NOOP Mel Gorman
2012-12-07 10:23 ` [PATCH 18/49] mm: mempolicy: Check for misplaced page Mel Gorman
2012-12-07 10:23 ` [PATCH 19/49] mm: migrate: Introduce migrate_misplaced_page() Mel Gorman
2012-12-07 10:23 ` [PATCH 20/49] mm: migrate: Drop the misplaced pages reference count if the target node is full Mel Gorman
2012-12-07 10:23 ` [PATCH 21/49] mm: mempolicy: Use _PAGE_NUMA to migrate pages Mel Gorman
2012-12-07 10:23 ` [PATCH 22/49] mm: mempolicy: Add MPOL_MF_LAZY Mel Gorman
2013-01-05  5:18   ` Simon Jeons
2013-01-07 15:14     ` Mel Gorman
2012-12-07 10:23 ` [PATCH 23/49] mm: mempolicy: Implement change_prot_numa() in terms of change_protection() Mel Gorman
2012-12-07 10:23 ` [PATCH 24/49] mm: mempolicy: Hide MPOL_NOOP and MPOL_MF_LAZY from userspace for now Mel Gorman
2012-12-07 10:23 ` [PATCH 25/49] mm: numa: Add fault driven placement and migration Mel Gorman
2013-01-04 11:56   ` Simon Jeons
2012-12-07 10:23 ` [PATCH 26/49] mm: sched: numa: Implement constant, per task Working Set Sampling (WSS) rate Mel Gorman
2012-12-07 10:23 ` [PATCH 27/49] sched, numa, mm: Count WS scanning against present PTEs, not virtual memory ranges Mel Gorman
2012-12-07 10:23 ` [PATCH 28/49] mm: sched: numa: Implement slow start for working set sampling Mel Gorman
2012-12-07 10:23 ` [PATCH 29/49] mm: numa: Add pte updates, hinting and migration stats Mel Gorman
2013-01-04 11:42   ` Simon Jeons
2013-01-07 15:29     ` Mel Gorman
2012-12-07 10:23 ` [PATCH 30/49] mm: numa: Migrate on reference policy Mel Gorman
2012-12-07 10:23 ` [PATCH 31/49] mm: numa: Migrate pages handled during a pmd_numa hinting fault Mel Gorman
2012-12-07 10:23 ` [PATCH 32/49] mm: numa: Structures for Migrate On Fault per NUMA migration rate limiting Mel Gorman
2012-12-07 10:23 ` [PATCH 33/49] mm: numa: Rate limit the amount of memory that is migrated between nodes Mel Gorman
2012-12-07 10:23 ` [PATCH 34/49] mm: numa: Rate limit setting of pte_numa if node is saturated Mel Gorman
2012-12-07 10:23 ` [PATCH 35/49] sched: numa: Slowly increase the scanning period as NUMA faults are handled Mel Gorman
2012-12-07 10:23 ` [PATCH 36/49] mm: numa: Introduce last_nid to the page frame Mel Gorman
2012-12-07 10:23 ` [PATCH 37/49] mm: numa: split_huge_page: Transfer last_nid on tail page Mel Gorman
2012-12-07 10:23 ` [PATCH 38/49] mm: numa: migrate: Set last_nid on newly allocated page Mel Gorman
2012-12-07 10:23 ` [PATCH 39/49] mm: numa: Use a two-stage filter to restrict pages being migrated for unlikely task<->node relationships Mel Gorman
2012-12-07 10:23 ` Mel Gorman [this message]
2012-12-07 10:23 ` [PATCH 41/49] mm: sched: numa: Control enabling and disabling of NUMA balancing Mel Gorman
2012-12-07 10:23 ` [PATCH 42/49] mm: sched: numa: Control enabling and disabling of NUMA balancing if !SCHED_DEBUG Mel Gorman
2012-12-07 10:23 ` [PATCH 43/49] mm: sched: numa: Delay PTE scanning until a task is scheduled on a new node Mel Gorman
2012-12-07 10:23 ` [PATCH 44/49] mm: numa: Add THP migration for the NUMA working set scanning fault case Mel Gorman
     [not found]   ` <20130105084229.GA3208@hacker.(null)>
2013-01-07 15:37     ` Mel Gorman
2012-12-07 10:23 ` [PATCH 45/49] mm: numa: Add THP migration for the NUMA working set scanning fault case build fix Mel Gorman
2012-12-07 10:23 ` [PATCH 46/49] mm: numa: Account for failed allocations and isolations as migration failures Mel Gorman
2012-12-07 10:23 ` [PATCH 47/49] mm: migrate: Account a transhuge page properly when rate limiting Mel Gorman
2012-12-07 10:23 ` [PATCH 48/49] mm/rmap: Convert the struct anon_vma::mutex to an rwsem Mel Gorman
2012-12-07 10:23 ` [PATCH 49/49] mm/rmap, migration: Make rmap_walk_anon() and try_to_unmap_anon() more scalable Mel Gorman
2012-12-07 11:01 ` [PATCH 00/49] Automatic NUMA Balancing v10 Ingo Molnar
2012-12-09 20:36   ` Mel Gorman
2012-12-09 21:17     ` Kirill A. Shutemov
2012-12-10  8:44       ` Mel Gorman
2012-12-10  5:07     ` Srikar Dronamraju
2012-12-10  6:28       ` Srikar Dronamraju
2012-12-10 12:44         ` [PATCH] sched: Fix task_numa_fault() + KSM crash Ingo Molnar
2012-12-13 13:57           ` Srikar Dronamraju
2012-12-10  8:46       ` [PATCH 00/49] Automatic NUMA Balancing v10 Mel Gorman
2012-12-10 12:35       ` Ingo Molnar
2012-12-10 11:39     ` Ingo Molnar
2012-12-10 11:53       ` Ingo Molnar
2012-12-10 15:24       ` Mel Gorman
2012-12-11  1:02         ` Mel Gorman
2012-12-11  8:52           ` Ingo Molnar
2012-12-11  9:18             ` Ingo Molnar
2012-12-11 15:22               ` Mel Gorman
2012-12-11 16:30             ` Mel Gorman
2012-12-17 10:33               ` Ingo Molnar
2012-12-10 16:42 ` Srikar Dronamraju
2012-12-10 19:23   ` Ingo Molnar
2012-12-10 23:35     ` Srikar Dronamraju
2012-12-10 23:40   ` Srikar Dronamraju
2012-12-13 13:21 ` Srikar Dronamraju

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1354875832-9700-41-git-send-email-mgorman@suse.de \
    --to=mgorman@suse.de \
    --cc=Lee.Schermerhorn@hp.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=dhillf@gmail.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lkml.alex@gmail.com \
    --cc=mingo@kernel.org \
    --cc=pjt@google.com \
    --cc=riel@redhat.com \
    --cc=rientjes@google.com \
    --cc=srikar@linux.vnet.ibm.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).