All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mel Gorman <mgorman@techsingularity.net>
To: Andrew Morton <akpm@linux-foundation.org>, Linux-MM <linux-mm@kvack.org>
Cc: Rik van Riel <riel@surriel.com>, Vlastimil Babka <vbabka@suse.cz>,
	Johannes Weiner <hannes@cmpxchg.org>,
	LKML <linux-kernel@vger.kernel.org>,
	Mel Gorman <mgorman@techsingularity.net>
Subject: [PATCH 26/31] mm, page_alloc: remove fair zone allocation policy
Date: Fri,  1 Jul 2016 21:01:34 +0100	[thread overview]
Message-ID: <1467403299-25786-27-git-send-email-mgorman@techsingularity.net> (raw)
In-Reply-To: <1467403299-25786-1-git-send-email-mgorman@techsingularity.net>

The fair zone allocation policy interleaves allocation requests between
zones to avoid an age inversion problem whereby new pages are reclaimed to
balance a zone.  Reclaim is now node-based so this should no longer be an
issue and the fair zone allocation policy is not free.  This patch removes
it.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
---
 include/linux/mmzone.h |  5 ----
 mm/internal.h          |  1 -
 mm/page_alloc.c        | 75 +-------------------------------------------------
 mm/vmstat.c            |  4 +--
 4 files changed, 2 insertions(+), 83 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index bb6902b73d16..facee6b83440 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -110,7 +110,6 @@ struct zone_padding {
 enum zone_stat_item {
 	/* First 128 byte cacheline (assuming 64 bit words) */
 	NR_FREE_PAGES,
-	NR_ALLOC_BATCH,
 	NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
 	NR_ZONE_LRU_ANON = NR_ZONE_LRU_BASE,
 	NR_ZONE_LRU_FILE,
@@ -515,10 +514,6 @@ struct zone {
 	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
 } ____cacheline_internodealigned_in_smp;
 
-enum zone_flags {
-	ZONE_FAIR_DEPLETED,		/* fair zone policy batch depleted */
-};
-
 enum pgdat_flags {
 	PGDAT_CONGESTED,		/* zone has many dirty pages backed by
 					 * a congested BDI
diff --git a/mm/internal.h b/mm/internal.h
index 1e21b2d3838d..28932cd6a195 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -467,7 +467,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
 #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
-#define ALLOC_FAIR		0x100 /* fair zone allocation */
 
 enum ttu_flags;
 struct tlbflush_unmap_batch;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eb31f114d0d8..d4815a30965b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2630,7 +2630,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
 			else
 				page = list_first_entry(list, struct page, lru);
 
-			__dec_zone_state(zone, NR_ALLOC_BATCH);
 			list_del(&page->lru);
 			pcp->count--;
 
@@ -2656,15 +2655,10 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
 		spin_unlock(&zone->lock);
 		if (!page)
 			goto failed;
-		__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
 		__mod_zone_freepage_state(zone, -(1 << order),
 					  get_pcppage_migratetype(page));
 	}
 
-	if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
-	    !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
-		set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-
 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
 	zone_statistics(preferred_zone, zone, gfp_flags);
 	local_irq_restore(flags);
@@ -2875,40 +2869,18 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
 }
 
 #ifdef CONFIG_NUMA
-static bool zone_local(struct zone *local_zone, struct zone *zone)
-{
-	return local_zone->node == zone->node;
-}
-
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
 				RECLAIM_DISTANCE;
 }
 #else	/* CONFIG_NUMA */
-static bool zone_local(struct zone *local_zone, struct zone *zone)
-{
-	return true;
-}
-
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
 	return true;
 }
 #endif	/* CONFIG_NUMA */
 
-static void reset_alloc_batches(struct zone *preferred_zone)
-{
-	struct zone *zone = preferred_zone->zone_pgdat->node_zones;
-
-	do {
-		mod_zone_page_state(zone, NR_ALLOC_BATCH,
-			high_wmark_pages(zone) - low_wmark_pages(zone) -
-			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
-		clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-	} while (zone++ != preferred_zone);
-}
-
 /*
  * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
@@ -2919,10 +2891,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 {
 	struct zoneref *z = ac->preferred_zoneref;
 	struct zone *zone;
-	bool fair_skipped = false;
-	bool apply_fair = (alloc_flags & ALLOC_FAIR);
-
-zonelist_scan:
 	/*
 	 * Scan zonelist, looking for a zone with enough free.
 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
@@ -2937,23 +2905,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 			!__cpuset_zone_allowed(zone, gfp_mask))
 				continue;
 		/*
-		 * Distribute pages in proportion to the individual
-		 * zone size to ensure fair page aging.  The zone a
-		 * page was allocated in should have no effect on the
-		 * time the page has in memory before being reclaimed.
-		 */
-		if (apply_fair) {
-			if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
-				fair_skipped = true;
-				continue;
-			}
-			if (!zone_local(ac->preferred_zoneref->zone, zone)) {
-				if (fair_skipped)
-					goto reset_fair;
-				apply_fair = false;
-			}
-		}
-		/*
 		 * When allocating a page cache page for writing, we
 		 * want to get it from a node that is within its dirty
 		 * limit, such that no single node holds more than its
@@ -3024,23 +2975,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		}
 	}
 
-	/*
-	 * The first pass makes sure allocations are spread fairly within the
-	 * local node.  However, the local node might have free pages left
-	 * after the fairness batches are exhausted, and remote zones haven't
-	 * even been considered yet.  Try once more without fairness, and
-	 * include remote zones now, before entering the slowpath and waking
-	 * kswapd: prefer spilling to a remote zone over swapping locally.
-	 */
-	if (fair_skipped) {
-reset_fair:
-		apply_fair = false;
-		fair_skipped = false;
-		reset_alloc_batches(ac->preferred_zoneref->zone);
-		z = ac->preferred_zoneref;
-		goto zonelist_scan;
-	}
-
 	return NULL;
 }
 
@@ -3789,7 +3723,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 {
 	struct page *page;
 	unsigned int cpuset_mems_cookie;
-	unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
+	unsigned int alloc_flags = ALLOC_WMARK_LOW;
 	gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
 	struct alloc_context ac = {
 		.high_zoneidx = gfp_zone(gfp_mask),
@@ -6001,9 +5935,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
 		zone_seqlock_init(zone);
 		zone_pcp_init(zone);
 
-		/* For bootup, initialized properly in watermark setup */
-		mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
-
 		if (!size)
 			continue;
 
@@ -6856,10 +6787,6 @@ static void __setup_per_zone_wmarks(void)
 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
 
-		__mod_zone_page_state(zone, NR_ALLOC_BATCH,
-			high_wmark_pages(zone) - low_wmark_pages(zone) -
-			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
-
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e544d7e7d8f0..905ea9ae2d5a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -908,7 +908,6 @@ int fragmentation_index(struct zone *zone, unsigned int order)
 const char * const vmstat_text[] = {
 	/* enum zone_stat_item countes */
 	"nr_free_pages",
-	"nr_alloc_batch",
 	"nr_zone_anon_lru",
 	"nr_zone_file_lru",
 	"nr_zone_write_pending",
@@ -1619,10 +1618,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
 		val = atomic_long_read(&vm_zone_stat[i]);
 		if (val < 0) {
 			switch (i) {
-			case NR_ALLOC_BATCH:
 			case NR_PAGES_SCANNED:
 				/*
-				 * These are often seen to go negative in
+				 * This is often seen to go negative in
 				 * recent kernels, but not to go permanently
 				 * negative.  Whilst it would be nicer not to
 				 * have exceptions, rooting them out would be
-- 
2.6.4

WARNING: multiple messages have this Message-ID (diff)
From: Mel Gorman <mgorman@techsingularity.net>
To: Andrew Morton <akpm@linux-foundation.org>, Linux-MM <linux-mm@kvack.org>
Cc: Rik van Riel <riel@surriel.com>, Vlastimil Babka <vbabka@suse.cz>,
	Johannes Weiner <hannes@cmpxchg.org>,
	LKML <linux-kernel@vger.kernel.org>,
	Mel Gorman <mgorman@techsingularity.net>
Subject: [PATCH 26/31] mm, page_alloc: remove fair zone allocation policy
Date: Fri,  1 Jul 2016 21:01:34 +0100	[thread overview]
Message-ID: <1467403299-25786-27-git-send-email-mgorman@techsingularity.net> (raw)
In-Reply-To: <1467403299-25786-1-git-send-email-mgorman@techsingularity.net>

The fair zone allocation policy interleaves allocation requests between
zones to avoid an age inversion problem whereby new pages are reclaimed to
balance a zone.  Reclaim is now node-based so this should no longer be an
issue and the fair zone allocation policy is not free.  This patch removes
it.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
---
 include/linux/mmzone.h |  5 ----
 mm/internal.h          |  1 -
 mm/page_alloc.c        | 75 +-------------------------------------------------
 mm/vmstat.c            |  4 +--
 4 files changed, 2 insertions(+), 83 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index bb6902b73d16..facee6b83440 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -110,7 +110,6 @@ struct zone_padding {
 enum zone_stat_item {
 	/* First 128 byte cacheline (assuming 64 bit words) */
 	NR_FREE_PAGES,
-	NR_ALLOC_BATCH,
 	NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
 	NR_ZONE_LRU_ANON = NR_ZONE_LRU_BASE,
 	NR_ZONE_LRU_FILE,
@@ -515,10 +514,6 @@ struct zone {
 	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
 } ____cacheline_internodealigned_in_smp;
 
-enum zone_flags {
-	ZONE_FAIR_DEPLETED,		/* fair zone policy batch depleted */
-};
-
 enum pgdat_flags {
 	PGDAT_CONGESTED,		/* zone has many dirty pages backed by
 					 * a congested BDI
diff --git a/mm/internal.h b/mm/internal.h
index 1e21b2d3838d..28932cd6a195 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -467,7 +467,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
 #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
-#define ALLOC_FAIR		0x100 /* fair zone allocation */
 
 enum ttu_flags;
 struct tlbflush_unmap_batch;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eb31f114d0d8..d4815a30965b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2630,7 +2630,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
 			else
 				page = list_first_entry(list, struct page, lru);
 
-			__dec_zone_state(zone, NR_ALLOC_BATCH);
 			list_del(&page->lru);
 			pcp->count--;
 
@@ -2656,15 +2655,10 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
 		spin_unlock(&zone->lock);
 		if (!page)
 			goto failed;
-		__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
 		__mod_zone_freepage_state(zone, -(1 << order),
 					  get_pcppage_migratetype(page));
 	}
 
-	if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
-	    !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
-		set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-
 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
 	zone_statistics(preferred_zone, zone, gfp_flags);
 	local_irq_restore(flags);
@@ -2875,40 +2869,18 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
 }
 
 #ifdef CONFIG_NUMA
-static bool zone_local(struct zone *local_zone, struct zone *zone)
-{
-	return local_zone->node == zone->node;
-}
-
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
 				RECLAIM_DISTANCE;
 }
 #else	/* CONFIG_NUMA */
-static bool zone_local(struct zone *local_zone, struct zone *zone)
-{
-	return true;
-}
-
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
 	return true;
 }
 #endif	/* CONFIG_NUMA */
 
-static void reset_alloc_batches(struct zone *preferred_zone)
-{
-	struct zone *zone = preferred_zone->zone_pgdat->node_zones;
-
-	do {
-		mod_zone_page_state(zone, NR_ALLOC_BATCH,
-			high_wmark_pages(zone) - low_wmark_pages(zone) -
-			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
-		clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-	} while (zone++ != preferred_zone);
-}
-
 /*
  * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
@@ -2919,10 +2891,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 {
 	struct zoneref *z = ac->preferred_zoneref;
 	struct zone *zone;
-	bool fair_skipped = false;
-	bool apply_fair = (alloc_flags & ALLOC_FAIR);
-
-zonelist_scan:
 	/*
 	 * Scan zonelist, looking for a zone with enough free.
 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
@@ -2937,23 +2905,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 			!__cpuset_zone_allowed(zone, gfp_mask))
 				continue;
 		/*
-		 * Distribute pages in proportion to the individual
-		 * zone size to ensure fair page aging.  The zone a
-		 * page was allocated in should have no effect on the
-		 * time the page has in memory before being reclaimed.
-		 */
-		if (apply_fair) {
-			if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
-				fair_skipped = true;
-				continue;
-			}
-			if (!zone_local(ac->preferred_zoneref->zone, zone)) {
-				if (fair_skipped)
-					goto reset_fair;
-				apply_fair = false;
-			}
-		}
-		/*
 		 * When allocating a page cache page for writing, we
 		 * want to get it from a node that is within its dirty
 		 * limit, such that no single node holds more than its
@@ -3024,23 +2975,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		}
 	}
 
-	/*
-	 * The first pass makes sure allocations are spread fairly within the
-	 * local node.  However, the local node might have free pages left
-	 * after the fairness batches are exhausted, and remote zones haven't
-	 * even been considered yet.  Try once more without fairness, and
-	 * include remote zones now, before entering the slowpath and waking
-	 * kswapd: prefer spilling to a remote zone over swapping locally.
-	 */
-	if (fair_skipped) {
-reset_fair:
-		apply_fair = false;
-		fair_skipped = false;
-		reset_alloc_batches(ac->preferred_zoneref->zone);
-		z = ac->preferred_zoneref;
-		goto zonelist_scan;
-	}
-
 	return NULL;
 }
 
@@ -3789,7 +3723,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 {
 	struct page *page;
 	unsigned int cpuset_mems_cookie;
-	unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
+	unsigned int alloc_flags = ALLOC_WMARK_LOW;
 	gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
 	struct alloc_context ac = {
 		.high_zoneidx = gfp_zone(gfp_mask),
@@ -6001,9 +5935,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
 		zone_seqlock_init(zone);
 		zone_pcp_init(zone);
 
-		/* For bootup, initialized properly in watermark setup */
-		mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
-
 		if (!size)
 			continue;
 
@@ -6856,10 +6787,6 @@ static void __setup_per_zone_wmarks(void)
 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
 
-		__mod_zone_page_state(zone, NR_ALLOC_BATCH,
-			high_wmark_pages(zone) - low_wmark_pages(zone) -
-			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
-
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e544d7e7d8f0..905ea9ae2d5a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -908,7 +908,6 @@ int fragmentation_index(struct zone *zone, unsigned int order)
 const char * const vmstat_text[] = {
 	/* enum zone_stat_item countes */
 	"nr_free_pages",
-	"nr_alloc_batch",
 	"nr_zone_anon_lru",
 	"nr_zone_file_lru",
 	"nr_zone_write_pending",
@@ -1619,10 +1618,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
 		val = atomic_long_read(&vm_zone_stat[i]);
 		if (val < 0) {
 			switch (i) {
-			case NR_ALLOC_BATCH:
 			case NR_PAGES_SCANNED:
 				/*
-				 * These are often seen to go negative in
+				 * This is often seen to go negative in
 				 * recent kernels, but not to go permanently
 				 * negative.  Whilst it would be nicer not to
 				 * have exceptions, rooting them out would be
-- 
2.6.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-07-01 20:06 UTC|newest]

Thread overview: 181+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-01 20:01 [PATCH 00/31] Move LRU page reclaim from zones to nodes v8 Mel Gorman
2016-07-01 20:01 ` Mel Gorman
2016-07-01 20:01 ` [PATCH 01/31] mm, vmstat: add infrastructure for per-node vmstats Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-04 23:50   ` Minchan Kim
2016-07-04 23:50     ` Minchan Kim
2016-07-05  8:14     ` Mel Gorman
2016-07-05  8:14       ` Mel Gorman
2016-07-06  0:15       ` Minchan Kim
2016-07-06  0:15         ` Minchan Kim
2016-07-01 20:01 ` [PATCH 02/31] mm, vmscan: move lru_lock to the node Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-05  0:03   ` Minchan Kim
2016-07-05  0:03     ` Minchan Kim
2016-07-01 20:01 ` [PATCH 03/31] mm, vmscan: move LRU lists to node Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-05  1:19   ` Minchan Kim
2016-07-05  1:19     ` Minchan Kim
2016-07-05 10:14     ` Mel Gorman
2016-07-05 10:14       ` Mel Gorman
2016-07-01 20:01 ` [PATCH 04/31] mm, vmscan: begin reclaiming pages on a per-node basis Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-07  1:12   ` Joonsoo Kim
2016-07-07  1:12     ` Joonsoo Kim
2016-07-07  9:48     ` Mel Gorman
2016-07-07  9:48       ` Mel Gorman
2016-07-08  2:28       ` Joonsoo Kim
2016-07-08  2:28         ` Joonsoo Kim
2016-07-08 10:05         ` Mel Gorman
2016-07-08 10:05           ` Mel Gorman
2016-07-14  6:28           ` Joonsoo Kim
2016-07-14  6:28             ` Joonsoo Kim
2016-07-14  7:48             ` Vlastimil Babka
2016-07-14  7:48               ` Vlastimil Babka
2016-07-18  4:52               ` Joonsoo Kim
2016-07-18  4:52                 ` Joonsoo Kim
2016-07-18 12:11             ` Mel Gorman
2016-07-18 12:11               ` Mel Gorman
2016-07-18 14:27               ` Mel Gorman
2016-07-18 14:27                 ` Mel Gorman
2016-07-19  8:30                 ` Joonsoo Kim
2016-07-19  8:30                   ` Joonsoo Kim
2016-07-19 14:25                   ` Mel Gorman
2016-07-19 14:25                     ` Mel Gorman
2016-07-01 20:01 ` [PATCH 05/31] mm, vmscan: have kswapd only scan based on the highest requested zone Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 06/31] mm, vmscan: make kswapd reclaim in terms of nodes Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 07/31] mm, vmscan: remove balance gap Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 08/31] mm, vmscan: simplify the logic deciding whether kswapd sleeps Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-05  5:59   ` Minchan Kim
2016-07-05  5:59     ` Minchan Kim
2016-07-05 10:26     ` Mel Gorman
2016-07-05 10:26       ` Mel Gorman
2016-07-06  0:30       ` Minchan Kim
2016-07-06  0:30         ` Minchan Kim
2016-07-06  8:31         ` Mel Gorman
2016-07-06  8:31           ` Mel Gorman
2016-07-07  5:51           ` Minchan Kim
2016-07-07  5:51             ` Minchan Kim
2016-07-07  9:56             ` Mel Gorman
2016-07-07  9:56               ` Mel Gorman
2016-07-07  1:20   ` Joonsoo Kim
2016-07-07  1:20     ` Joonsoo Kim
2016-07-07 10:17     ` Mel Gorman
2016-07-07 10:17       ` Mel Gorman
2016-07-08  2:44       ` Joonsoo Kim
2016-07-08  2:44         ` Joonsoo Kim
2016-07-08 10:11         ` Mel Gorman
2016-07-08 10:11           ` Mel Gorman
2016-07-14  5:23           ` Joonsoo Kim
2016-07-14  5:23             ` Joonsoo Kim
2016-07-14  8:32             ` Vlastimil Babka
2016-07-14  8:32               ` Vlastimil Babka
2016-07-18  5:07               ` Joonsoo Kim
2016-07-18  5:07                 ` Joonsoo Kim
2016-07-18  6:51                 ` Vlastimil Babka
2016-07-18  6:51                   ` Vlastimil Babka
2016-07-18  7:24                   ` Joonsoo Kim
2016-07-18  7:24                     ` Joonsoo Kim
2016-07-14  9:05             ` Mel Gorman
2016-07-14  9:05               ` Mel Gorman
2016-07-18  5:03               ` Joonsoo Kim
2016-07-18  5:03                 ` Joonsoo Kim
2016-07-01 20:01 ` [PATCH 09/31] mm, vmscan: by default have direct reclaim only shrink once per node Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-07  1:43   ` Joonsoo Kim
2016-07-07  1:43     ` Joonsoo Kim
2016-07-07 10:27     ` Mel Gorman
2016-07-07 10:27       ` Mel Gorman
2016-07-01 20:01 ` [PATCH 10/31] mm, vmscan: remove duplicate logic clearing node congestion and dirty state Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 11/31] mm: vmscan: do not reclaim from kswapd if there is any eligible zone Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-05  6:11   ` Minchan Kim
2016-07-05  6:11     ` Minchan Kim
2016-07-05 10:38     ` Mel Gorman
2016-07-05 10:38       ` Mel Gorman
2016-07-06  1:25       ` Minchan Kim
2016-07-06  1:25         ` Minchan Kim
2016-07-06  8:42         ` Mel Gorman
2016-07-06  8:42           ` Mel Gorman
2016-07-07  6:27           ` Minchan Kim
2016-07-07  6:27             ` Minchan Kim
2016-07-07 10:55             ` Mel Gorman
2016-07-07 10:55               ` Mel Gorman
2016-07-01 20:01 ` [PATCH 12/31] mm, vmscan: make shrink_node decisions more node-centric Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-05  6:24   ` Minchan Kim
2016-07-05  6:24     ` Minchan Kim
2016-07-05 10:40     ` Mel Gorman
2016-07-05 10:40       ` Mel Gorman
2016-07-01 20:01 ` [PATCH 13/31] mm, memcg: move memcg limit enforcement from zones to nodes Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 14/31] mm, workingset: make working set detection node-aware Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 15/31] mm, page_alloc: consider dirtyable memory in terms of nodes Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 16/31] mm: move page mapped accounting to the node Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 17/31] mm: rename NR_ANON_PAGES to NR_ANON_MAPPED Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 18/31] mm: move most file-based accounting to the node Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 19/31] mm: move vmscan writes and file write " Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 20/31] mm, vmscan: only wakeup kswapd once per node for the requested classzone Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-07  1:24   ` Joonsoo Kim
2016-07-07  1:24     ` Joonsoo Kim
2016-07-07 10:58     ` Mel Gorman
2016-07-07 10:58       ` Mel Gorman
2016-07-01 20:01 ` [PATCH 21/31] mm, page_alloc: Wake kswapd based on the highest eligible zone Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 22/31] mm: convert zone_reclaim to node_reclaim Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 23/31] mm, vmscan: Avoid passing in classzone_idx unnecessarily to shrink_node Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 24/31] mm, vmscan: Avoid passing in classzone_idx unnecessarily to compaction_ready Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 25/31] mm, vmscan: add classzone information to tracepoints Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` Mel Gorman [this message]
2016-07-01 20:01   ` [PATCH 26/31] mm, page_alloc: remove fair zone allocation policy Mel Gorman
2016-07-01 20:01 ` [PATCH 27/31] mm: page_alloc: cache the last node whose dirty limit is reached Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 28/31] mm: vmstat: replace __count_zone_vm_events with a zone id equivalent Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 29/31] mm: vmstat: account per-zone stalls and pages skipped during reclaim Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 30/31] mm, vmstat: print node-based stats in zoneinfo file Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-01 20:01 ` [PATCH 31/31] mm, vmstat: Remove zone and node double accounting by approximating retries Mel Gorman
2016-07-01 20:01   ` Mel Gorman
2016-07-06  0:02   ` Minchan Kim
2016-07-06  0:02     ` Minchan Kim
2016-07-06  8:58     ` Mel Gorman
2016-07-06  8:58       ` Mel Gorman
2016-07-06  9:33       ` Mel Gorman
2016-07-06  9:33         ` Mel Gorman
2016-07-07  6:47       ` Minchan Kim
2016-07-07  6:47         ` Minchan Kim
2016-07-06 18:12   ` Dave Hansen
2016-07-06 18:12     ` Dave Hansen
2016-07-07 11:26     ` Mel Gorman
2016-07-07 11:26       ` Mel Gorman
2016-07-04  1:37 ` [PATCH 00/31] Move LRU page reclaim from zones to nodes v8 Minchan Kim
2016-07-04  1:37   ` Minchan Kim
2016-07-04  4:34   ` Mel Gorman
2016-07-04  4:34     ` Mel Gorman
2016-07-04  8:04     ` Minchan Kim
2016-07-04  8:04       ` Minchan Kim
2016-07-04  8:04       ` Minchan Kim
2016-07-04  9:55       ` Mel Gorman
2016-07-04  9:55         ` Mel Gorman
2016-07-06  1:51         ` Minchan Kim
2016-07-06  1:51           ` Minchan Kim
  -- strict thread matches above, loose matches on Subject: below --
2016-07-01 15:37 Mel Gorman
2016-07-01 15:37 ` [PATCH 26/31] mm, page_alloc: remove fair zone allocation policy Mel Gorman
2016-07-01 15:37   ` Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1467403299-25786-27-git-send-email-mgorman@techsingularity.net \
    --to=mgorman@techsingularity.net \
    --cc=akpm@linux-foundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=riel@surriel.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.