All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michel Lespinasse <walken@google.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>,
	Rik van Riel <riel@redhat.com>,
	Johannes Weiner <jweiner@redhat.com>,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Hugh Dickins <hughd@google.com>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Michael Wolf <mjwolf@us.ibm.com>
Subject: [PATCH 6/8] kstaled: rate limit pages scanned per second.
Date: Fri, 16 Sep 2011 20:39:11 -0700	[thread overview]
Message-ID: <1316230753-8693-7-git-send-email-walken@google.com> (raw)
In-Reply-To: <1316230753-8693-1-git-send-email-walken@google.com>

Scan some number of pages from each node every second, instead of trying to
scan the entime memory at once and being idle for the rest of the configured
interval.


Signed-off-by: Michel Lespinasse <walken@google.com>
---
 include/linux/mmzone.h |    3 ++
 mm/memcontrol.c        |   85 +++++++++++++++++++++++++++++++++++++++---------
 2 files changed, 72 insertions(+), 16 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6657106..272fbed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -631,6 +631,9 @@ typedef struct pglist_data {
 	unsigned long node_present_pages; /* total number of physical pages */
 	unsigned long node_spanned_pages; /* total size of physical page
 					     range, including holes */
+#ifdef CONFIG_KSTALED
+	unsigned long node_idle_scan_pfn;
+#endif
 	int node_id;
 	wait_queue_head_t kswapd_wait;
 	struct task_struct *kswapd;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0fdc278..4a76fdcf 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5617,6 +5617,7 @@ __setup("swapaccount=", enable_swap_account);
 #ifdef CONFIG_KSTALED
 
 static unsigned int kstaled_scan_seconds;
+static DEFINE_SPINLOCK(kstaled_scan_seconds_lock);
 static DECLARE_WAIT_QUEUE_HEAD(kstaled_wait);
 
 static inline void kstaled_scan_page(struct page *page)
@@ -5728,15 +5729,19 @@ static inline void kstaled_scan_page(struct page *page)
 	put_page(page);
 }
 
-static void kstaled_scan_node(pg_data_t *pgdat)
+static bool kstaled_scan_node(pg_data_t *pgdat, int scan_seconds, bool reset)
 {
 	unsigned long flags;
-	unsigned long pfn, end;
+	unsigned long pfn, end, node_end;
 
 	pgdat_resize_lock(pgdat, &flags);
 
 	pfn = pgdat->node_start_pfn;
-	end = pfn + pgdat->node_spanned_pages;
+	node_end = pfn + pgdat->node_spanned_pages;
+	if (!reset && pfn < pgdat->node_idle_scan_pfn)
+		pfn = pgdat->node_idle_scan_pfn;
+	end = min(pfn + DIV_ROUND_UP(pgdat->node_spanned_pages, scan_seconds),
+		  node_end);
 
 	while (pfn < end) {
 		unsigned long contiguous = end;
@@ -5753,8 +5758,8 @@ static void kstaled_scan_node(pg_data_t *pgdat)
 #ifdef CONFIG_MEMORY_HOTPLUG
 				/* abort if the node got resized */
 				if (pfn < pgdat->node_start_pfn ||
-				    end > (pgdat->node_start_pfn +
-					   pgdat->node_spanned_pages))
+				    node_end > (pgdat->node_start_pfn +
+						pgdat->node_spanned_pages))
 					goto abort;
 #endif
 			}
@@ -5768,14 +5773,21 @@ static void kstaled_scan_node(pg_data_t *pgdat)
 
 abort:
 	pgdat_resize_unlock(pgdat, &flags);
+
+	pgdat->node_idle_scan_pfn = pfn;
+	return pfn >= node_end;
 }
 
 static int kstaled(void *dummy)
 {
+	int delayed = 0;
+	bool reset = true;
+
 	while (1) {
 		int scan_seconds;
 		int nid;
-		struct mem_cgroup *mem;
+		long earlier, delta;
+		bool scan_done;
 
 		wait_event_interruptible(kstaled_wait,
 				 (scan_seconds = kstaled_scan_seconds) > 0);
@@ -5788,21 +5800,60 @@ static int kstaled(void *dummy)
 		 */
 		BUG_ON(scan_seconds <= 0);
 
-		for_each_mem_cgroup_all(mem)
-			memset(&mem->idle_scan_stats, 0,
-			       sizeof(mem->idle_scan_stats));
+		earlier = jiffies;
 
+		scan_done = true;
 		for_each_node_state(nid, N_HIGH_MEMORY)
-			kstaled_scan_node(NODE_DATA(nid));
+			scan_done &= kstaled_scan_node(NODE_DATA(nid),
+						       scan_seconds, reset);
+
+		if (scan_done) {
+			struct mem_cgroup *mem;
+
+			for_each_mem_cgroup_all(mem) {
+				write_seqcount_begin(&mem->idle_page_stats_lock);
+				mem->idle_page_stats = mem->idle_scan_stats;
+				mem->idle_page_scans++;
+				write_seqcount_end(&mem->idle_page_stats_lock);
+				memset(&mem->idle_scan_stats, 0,
+				       sizeof(mem->idle_scan_stats));
+			}
+		}
 
-		for_each_mem_cgroup_all(mem) {
-			write_seqcount_begin(&mem->idle_page_stats_lock);
-			mem->idle_page_stats = mem->idle_scan_stats;
-			mem->idle_page_scans++;
-			write_seqcount_end(&mem->idle_page_stats_lock);
+		delta = jiffies - earlier;
+		if (delta < HZ / 2) {
+			delayed = 0;
+			schedule_timeout_interruptible(HZ - delta);
+		} else {
+			/*
+			 * Emergency throttle if we're taking too long.
+			 * We are supposed to scan an entire slice in 1 second.
+			 * If we keep taking longer for 10 consecutive times,
+			 * scale back our scan_seconds.
+			 *
+			 * If someone changed kstaled_scan_seconds while we
+			 * were running, hope they know what they're doing and
+			 * assume they've eliminated any delays.
+			 */
+			bool updated = false;
+			spin_lock(&kstaled_scan_seconds_lock);
+			if (scan_seconds != kstaled_scan_seconds)
+				delayed = 0;
+			else if (++delayed == 10) {
+				delayed = 0;
+				scan_seconds *= 2;
+				kstaled_scan_seconds = scan_seconds;
+				updated = true;
+			}
+			spin_unlock(&kstaled_scan_seconds_lock);
+			if (updated)
+				pr_warning("kstaled taking too long, "
+					   "scan_seconds now %d\n",
+					   scan_seconds);
+			schedule_timeout_interruptible(HZ / 2);
 		}
 
-		schedule_timeout_interruptible(scan_seconds * HZ);
+		reset = scan_done;
 	}
 
 	BUG();
@@ -5826,7 +5877,9 @@ static ssize_t kstaled_scan_seconds_store(struct kobject *kobj,
 	err = strict_strtoul(buf, 10, &input);
 	if (err)
 		return -EINVAL;
+	spin_lock(&kstaled_scan_seconds_lock);
 	kstaled_scan_seconds = input;
+	spin_unlock(&kstaled_scan_seconds_lock);
 	wake_up_interruptible(&kstaled_wait);
 	return count;
 }
-- 
1.7.3.1


WARNING: multiple messages have this Message-ID (diff)
From: Michel Lespinasse <walken@google.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>,
	Rik van Riel <riel@redhat.com>,
	Johannes Weiner <jweiner@redhat.com>,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Hugh Dickins <hughd@google.com>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Michael Wolf <mjwolf@us.ibm.com>
Subject: [PATCH 6/8] kstaled: rate limit pages scanned per second.
Date: Fri, 16 Sep 2011 20:39:11 -0700	[thread overview]
Message-ID: <1316230753-8693-7-git-send-email-walken@google.com> (raw)
In-Reply-To: <1316230753-8693-1-git-send-email-walken@google.com>

Scan some number of pages from each node every second, instead of trying to
scan the entime memory at once and being idle for the rest of the configured
interval.


Signed-off-by: Michel Lespinasse <walken@google.com>
---
 include/linux/mmzone.h |    3 ++
 mm/memcontrol.c        |   85 +++++++++++++++++++++++++++++++++++++++---------
 2 files changed, 72 insertions(+), 16 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6657106..272fbed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -631,6 +631,9 @@ typedef struct pglist_data {
 	unsigned long node_present_pages; /* total number of physical pages */
 	unsigned long node_spanned_pages; /* total size of physical page
 					     range, including holes */
+#ifdef CONFIG_KSTALED
+	unsigned long node_idle_scan_pfn;
+#endif
 	int node_id;
 	wait_queue_head_t kswapd_wait;
 	struct task_struct *kswapd;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0fdc278..4a76fdcf 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5617,6 +5617,7 @@ __setup("swapaccount=", enable_swap_account);
 #ifdef CONFIG_KSTALED
 
 static unsigned int kstaled_scan_seconds;
+static DEFINE_SPINLOCK(kstaled_scan_seconds_lock);
 static DECLARE_WAIT_QUEUE_HEAD(kstaled_wait);
 
 static inline void kstaled_scan_page(struct page *page)
@@ -5728,15 +5729,19 @@ static inline void kstaled_scan_page(struct page *page)
 	put_page(page);
 }
 
-static void kstaled_scan_node(pg_data_t *pgdat)
+static bool kstaled_scan_node(pg_data_t *pgdat, int scan_seconds, bool reset)
 {
 	unsigned long flags;
-	unsigned long pfn, end;
+	unsigned long pfn, end, node_end;
 
 	pgdat_resize_lock(pgdat, &flags);
 
 	pfn = pgdat->node_start_pfn;
-	end = pfn + pgdat->node_spanned_pages;
+	node_end = pfn + pgdat->node_spanned_pages;
+	if (!reset && pfn < pgdat->node_idle_scan_pfn)
+		pfn = pgdat->node_idle_scan_pfn;
+	end = min(pfn + DIV_ROUND_UP(pgdat->node_spanned_pages, scan_seconds),
+		  node_end);
 
 	while (pfn < end) {
 		unsigned long contiguous = end;
@@ -5753,8 +5758,8 @@ static void kstaled_scan_node(pg_data_t *pgdat)
 #ifdef CONFIG_MEMORY_HOTPLUG
 				/* abort if the node got resized */
 				if (pfn < pgdat->node_start_pfn ||
-				    end > (pgdat->node_start_pfn +
-					   pgdat->node_spanned_pages))
+				    node_end > (pgdat->node_start_pfn +
+						pgdat->node_spanned_pages))
 					goto abort;
 #endif
 			}
@@ -5768,14 +5773,21 @@ static void kstaled_scan_node(pg_data_t *pgdat)
 
 abort:
 	pgdat_resize_unlock(pgdat, &flags);
+
+	pgdat->node_idle_scan_pfn = pfn;
+	return pfn >= node_end;
 }
 
 static int kstaled(void *dummy)
 {
+	int delayed = 0;
+	bool reset = true;
+
 	while (1) {
 		int scan_seconds;
 		int nid;
-		struct mem_cgroup *mem;
+		long earlier, delta;
+		bool scan_done;
 
 		wait_event_interruptible(kstaled_wait,
 				 (scan_seconds = kstaled_scan_seconds) > 0);
@@ -5788,21 +5800,60 @@ static int kstaled(void *dummy)
 		 */
 		BUG_ON(scan_seconds <= 0);
 
-		for_each_mem_cgroup_all(mem)
-			memset(&mem->idle_scan_stats, 0,
-			       sizeof(mem->idle_scan_stats));
+		earlier = jiffies;
 
+		scan_done = true;
 		for_each_node_state(nid, N_HIGH_MEMORY)
-			kstaled_scan_node(NODE_DATA(nid));
+			scan_done &= kstaled_scan_node(NODE_DATA(nid),
+						       scan_seconds, reset);
+
+		if (scan_done) {
+			struct mem_cgroup *mem;
+
+			for_each_mem_cgroup_all(mem) {
+				write_seqcount_begin(&mem->idle_page_stats_lock);
+				mem->idle_page_stats = mem->idle_scan_stats;
+				mem->idle_page_scans++;
+				write_seqcount_end(&mem->idle_page_stats_lock);
+				memset(&mem->idle_scan_stats, 0,
+				       sizeof(mem->idle_scan_stats));
+			}
+		}
 
-		for_each_mem_cgroup_all(mem) {
-			write_seqcount_begin(&mem->idle_page_stats_lock);
-			mem->idle_page_stats = mem->idle_scan_stats;
-			mem->idle_page_scans++;
-			write_seqcount_end(&mem->idle_page_stats_lock);
+		delta = jiffies - earlier;
+		if (delta < HZ / 2) {
+			delayed = 0;
+			schedule_timeout_interruptible(HZ - delta);
+		} else {
+			/*
+			 * Emergency throttle if we're taking too long.
+			 * We are supposed to scan an entire slice in 1 second.
+			 * If we keep taking longer for 10 consecutive times,
+			 * scale back our scan_seconds.
+			 *
+			 * If someone changed kstaled_scan_seconds while we
+			 * were running, hope they know what they're doing and
+			 * assume they've eliminated any delays.
+			 */
+			bool updated = false;
+			spin_lock(&kstaled_scan_seconds_lock);
+			if (scan_seconds != kstaled_scan_seconds)
+				delayed = 0;
+			else if (++delayed == 10) {
+				delayed = 0;
+				scan_seconds *= 2;
+				kstaled_scan_seconds = scan_seconds;
+				updated = true;
+			}
+			spin_unlock(&kstaled_scan_seconds_lock);
+			if (updated)
+				pr_warning("kstaled taking too long, "
+					   "scan_seconds now %d\n",
+					   scan_seconds);
+			schedule_timeout_interruptible(HZ / 2);
 		}
 
-		schedule_timeout_interruptible(scan_seconds * HZ);
+		reset = scan_done;
 	}
 
 	BUG();
@@ -5826,7 +5877,9 @@ static ssize_t kstaled_scan_seconds_store(struct kobject *kobj,
 	err = strict_strtoul(buf, 10, &input);
 	if (err)
 		return -EINVAL;
+	spin_lock(&kstaled_scan_seconds_lock);
 	kstaled_scan_seconds = input;
+	spin_unlock(&kstaled_scan_seconds_lock);
 	wake_up_interruptible(&kstaled_wait);
 	return count;
 }
-- 
1.7.3.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2011-09-17  3:40 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-09-17  3:39 [PATCH 0/8] idle page tracking / working set estimation Michel Lespinasse
2011-09-17  3:39 ` Michel Lespinasse
2011-09-17  3:39 ` [PATCH 1/8] page_referenced: replace vm_flags parameter with struct pr_info Michel Lespinasse
2011-09-17  3:39   ` Michel Lespinasse
2011-09-17  3:44   ` Joe Perches
2011-09-17  3:44     ` Joe Perches
2011-09-17  4:51     ` Michel Lespinasse
2011-09-17  4:51       ` Michel Lespinasse
2011-09-20 19:05   ` Rik van Riel
2011-09-20 19:05     ` Rik van Riel
2011-09-21  2:51     ` Michel Lespinasse
2011-09-21  2:51       ` Michel Lespinasse
2011-09-17  3:39 ` [PATCH 2/8] kstaled: documentation and config option Michel Lespinasse
2011-09-17  3:39   ` Michel Lespinasse
2011-09-20 21:23   ` Rik van Riel
2011-09-20 21:23     ` Rik van Riel
2011-09-23 19:27   ` Rik van Riel
2011-09-23 19:27     ` Rik van Riel
2011-09-17  3:39 ` [PATCH 3/8] kstaled: page_referenced_kstaled() and supporting infrastructure Michel Lespinasse
2011-09-17  3:39   ` Michel Lespinasse
2011-09-20 19:36   ` Peter Zijlstra
2011-09-20 19:36     ` Peter Zijlstra
2011-09-17  3:39 ` [PATCH 4/8] kstaled: minimalistic implementation Michel Lespinasse
2011-09-17  3:39   ` Michel Lespinasse
2011-09-22 23:14   ` Andrew Morton
2011-09-22 23:14     ` Andrew Morton
2011-09-23  8:37     ` Michel Lespinasse
2011-09-23  8:37       ` Michel Lespinasse
2011-09-17  3:39 ` [PATCH 5/8] kstaled: skip non-RAM regions Michel Lespinasse
2011-09-17  3:39   ` Michel Lespinasse
2011-09-17  3:39 ` Michel Lespinasse [this message]
2011-09-17  3:39   ` [PATCH 6/8] kstaled: rate limit pages scanned per second Michel Lespinasse
2011-09-22 23:15   ` Andrew Morton
2011-09-22 23:15     ` Andrew Morton
2011-09-23 10:18     ` Michel Lespinasse
2011-09-23 10:18       ` Michel Lespinasse
2011-09-17  3:39 ` [PATCH 7/8] kstaled: add histogram sampling functionality Michel Lespinasse
2011-09-17  3:39   ` Michel Lespinasse
2011-09-22 23:15   ` Andrew Morton
2011-09-22 23:15     ` Andrew Morton
2011-09-23 10:26     ` Michel Lespinasse
2011-09-23 10:26       ` Michel Lespinasse
2011-09-17  3:39 ` [PATCH 8/8] kstaled: add incrementally updating stale page count Michel Lespinasse
2011-09-17  3:39   ` Michel Lespinasse
2011-09-22 23:13 ` [PATCH 0/8] idle page tracking / working set estimation Andrew Morton
2011-09-22 23:13   ` Andrew Morton
2011-09-23  1:23   ` Michel Lespinasse
2011-09-23  1:23     ` Michel Lespinasse
2011-09-27 10:03 ` Balbir Singh
2011-09-27 10:03   ` Balbir Singh
2011-09-27 10:14   ` Michel Lespinasse
2011-09-27 10:14     ` Michel Lespinasse
2011-09-27 16:50     ` Balbir Singh
2011-09-27 16:50       ` Balbir Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1316230753-8693-7-git-send-email-walken@google.com \
    --to=walken@google.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=dave@linux.vnet.ibm.com \
    --cc=hughd@google.com \
    --cc=jweiner@redhat.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mjwolf@us.ibm.com \
    --cc=riel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.