From: Mel Gorman <mgorman@techsingularity.net>
To: Linux-MM <linux-mm@kvack.org>
Cc: Rik van Riel <riel@surriel.com>, Vlastimil Babka <vbabka@suse.cz>,
Johannes Weiner <hannes@cmpxchg.org>,
LKML <linux-kernel@vger.kernel.org>
Subject: [PATCH 26/27] mm, page_alloc: Remove fair zone allocation policy
Date: Tue, 23 Feb 2016 15:21:11 +0000 [thread overview]
Message-ID: <20160223152111.GL2854@techsingularity.net> (raw)
In-Reply-To: <1456239890-20737-1-git-send-email-mgorman@techsingularity.net>
The fair zone allocation policy interleaves allocation requests between
zones to avoid an age inversion problem whereby new pages are reclaimed
to balance a zone. Reclaim is now node-based so this should no longer be
an issue and the fair zone allocation policy is not free. This patch
removes it.
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
---
include/linux/mmzone.h | 2 --
mm/internal.h | 1 -
mm/page_alloc.c | 76 +-------------------------------------------------
mm/vmstat.c | 1 -
4 files changed, 1 insertion(+), 79 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7372d59c5e3b..ccba10bd3241 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -117,7 +117,6 @@ struct zone_padding {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
- NR_ALLOC_BATCH,
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
@@ -515,7 +514,6 @@ struct zone {
enum zone_flags {
ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
- ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
};
enum pgdat_flags {
diff --git a/mm/internal.h b/mm/internal.h
index 1498e5c850f1..ad623eb6caa3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -469,7 +469,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
-#define ALLOC_FAIR 0x100 /* fair zone allocation */
enum ttu_flags;
struct tlbflush_unmap_batch;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e0cb0a6fe6c4..4fcd6298b9a1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2372,11 +2372,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
get_pcppage_migratetype(page));
}
- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
- !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
- set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
@@ -2561,40 +2556,18 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
}
#ifdef CONFIG_NUMA
-static bool zone_local(struct zone *local_zone, struct zone *zone)
-{
- return local_zone->node == zone->node;
-}
-
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
RECLAIM_DISTANCE;
}
#else /* CONFIG_NUMA */
-static bool zone_local(struct zone *local_zone, struct zone *zone)
-{
- return true;
-}
-
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return true;
}
#endif /* CONFIG_NUMA */
-static void reset_alloc_batches(struct zone *preferred_zone)
-{
- struct zone *zone = preferred_zone->zone_pgdat->node_zones;
-
- do {
- mod_zone_page_state(zone, NR_ALLOC_BATCH,
- high_wmark_pages(zone) - low_wmark_pages(zone) -
- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
- clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
- } while (zone++ != preferred_zone);
-}
-
/*
* get_page_from_freelist goes through the zonelist trying to allocate
* a page.
@@ -2607,11 +2580,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
struct zoneref *z;
struct page *page = NULL;
struct zone *zone;
- int nr_fair_skipped = 0;
- bool zonelist_rescan;
-
-zonelist_scan:
- zonelist_rescan = false;
/*
* Scan zonelist, looking for a zone with enough free.
@@ -2626,20 +2594,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
!cpuset_zone_allowed(zone, gfp_mask))
continue;
/*
- * Distribute pages in proportion to the individual
- * zone size to ensure fair page aging. The zone a
- * page was allocated in should have no effect on the
- * time the page has in memory before being reclaimed.
- */
- if (alloc_flags & ALLOC_FAIR) {
- if (!zone_local(ac->preferred_zone, zone))
- break;
- if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
- nr_fair_skipped++;
- continue;
- }
- }
- /*
* When allocating a page cache page for writing, we
* want to get it from a zone that is within its dirty
* limit, such that no single zone holds more than its
@@ -2718,27 +2672,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
}
}
- /*
- * The first pass makes sure allocations are spread fairly within the
- * local node. However, the local node might have free pages left
- * after the fairness batches are exhausted, and remote zones haven't
- * even been considered yet. Try once more without fairness, and
- * include remote zones now, before entering the slowpath and waking
- * kswapd: prefer spilling to a remote zone over swapping locally.
- */
- if (alloc_flags & ALLOC_FAIR) {
- alloc_flags &= ~ALLOC_FAIR;
- if (nr_fair_skipped) {
- zonelist_rescan = true;
- reset_alloc_batches(ac->preferred_zone);
- }
- if (nr_online_nodes > 1)
- zonelist_rescan = true;
- }
-
- if (zonelist_rescan)
- goto zonelist_scan;
-
return NULL;
}
@@ -3404,7 +3337,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zoneref *preferred_zoneref;
struct page *page = NULL;
unsigned int cpuset_mems_cookie;
- int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
+ int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = {
.high_zoneidx = gfp_zone(gfp_mask),
@@ -5583,9 +5516,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
zone_seqlock_init(zone);
zone_pcp_init(zone);
- /* For bootup, initialized properly in watermark setup */
- mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
-
if (!size)
continue;
@@ -6425,10 +6355,6 @@ static void __setup_per_zone_wmarks(void)
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
- __mod_zone_page_state(zone, NR_ALLOC_BATCH,
- high_wmark_pages(zone) - low_wmark_pages(zone) -
- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
-
spin_unlock_irqrestore(&zone->lock, flags);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4d8617b02032..34fe40824213 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -942,7 +942,6 @@ int fragmentation_index(struct zone *zone, unsigned int order)
const char * const vmstat_text[] = {
/* enum zone_stat_item countes */
"nr_free_pages",
- "nr_alloc_batch",
"nr_mlock",
"nr_slab_reclaimable",
"nr_slab_unreclaimable",
--
2.6.4
--
Mel Gorman
SUSE Labs
next prev parent reply other threads:[~2016-02-23 15:21 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-02-23 15:04 [RFC PATCH 00/27] Move LRU page reclaim from zones to nodes v2 Mel Gorman
2016-02-23 15:04 ` [PATCH 01/27] mm, page_alloc: Use ac->classzone_idx instead of zone_idx(preferred_zone) Mel Gorman
2016-02-23 18:04 ` Johannes Weiner
2016-03-03 10:37 ` Vlastimil Babka
2016-02-23 15:04 ` [PATCH 02/27] mm, vmscan: Check if cpusets are enabled during direct reclaim Mel Gorman
2016-02-23 18:06 ` Johannes Weiner
2016-03-03 11:31 ` Vlastimil Babka
2016-03-09 11:59 ` Mel Gorman
2016-03-09 12:30 ` Vlastimil Babka
2016-02-23 15:04 ` [PATCH 03/27] mm, vmstat: Add infrastructure for per-node vmstats Mel Gorman
2016-02-23 18:13 ` Johannes Weiner
2016-02-24 9:19 ` Mel Gorman
2016-02-23 15:04 ` [PATCH 04/27] mm, vmscan: Move lru_lock to the node Mel Gorman
2016-02-23 18:40 ` Johannes Weiner
2016-02-23 15:04 ` [PATCH 05/27] mm, vmscan: Move LRU lists to node Mel Gorman
2016-02-23 18:42 ` Johannes Weiner
2016-02-23 15:04 ` [PATCH 06/27] mm, vmscan: Begin reclaiming pages on a per-node basis Mel Gorman
2016-02-23 18:57 ` Johannes Weiner
2016-02-23 19:03 ` Johannes Weiner
2016-02-24 10:21 ` Mel Gorman
2016-02-23 15:04 ` [PATCH 07/27] mm, vmscan: Have kswapd only scan based on the highest requested zone Mel Gorman
2016-02-25 22:17 ` Johannes Weiner
2016-02-23 15:04 ` [PATCH 08/27] mm, vmscan: Make kswapd reclaim in terms of nodes Mel Gorman
2016-02-28 16:16 ` Johannes Weiner
2016-03-03 13:46 ` Vlastimil Babka
2016-03-09 14:45 ` Mel Gorman
2016-02-23 15:04 ` [PATCH 09/27] mm, vmscan: Simplify the logic deciding whether kswapd sleeps Mel Gorman
2016-02-28 16:16 ` Johannes Weiner
2016-02-23 15:04 ` [PATCH 10/27] mm, vmscan: By default have direct reclaim only shrink once per node Mel Gorman
2016-02-28 16:17 ` Johannes Weiner
2016-02-23 15:04 ` [PATCH 11/27] mm, vmscan: Clear congestion, dirty and need for compaction on a per-node basis Mel Gorman
2016-02-23 15:04 ` [PATCH 12/27] mm: vmscan: Do not reclaim from kswapd if there is any eligible zone Mel Gorman
2016-02-23 15:04 ` [PATCH 13/27] mm, vmscan: Make shrink_node decisions more node-centric Mel Gorman
2016-02-23 15:04 ` [PATCH 14/27] mm, memcg: Move memcg limit enforcement from zones to nodes Mel Gorman
2016-02-23 15:04 ` [PATCH 15/27] mm, workingset: Make working set detection node-aware Mel Gorman
2016-02-28 16:17 ` Johannes Weiner
2016-02-23 15:17 ` [PATCH 16/27] mm, page_alloc: Consider dirtyable memory in terms of nodes Mel Gorman
2016-02-28 16:17 ` Johannes Weiner
2016-02-23 15:18 ` [PATCH 17/27] mm: Move page mapped accounting to the node Mel Gorman
2016-02-23 15:18 ` [PATCH 18/27] mm: Rename NR_ANON_PAGES to NR_ANON_MAPPED Mel Gorman
2016-02-23 15:18 ` [PATCH 19/27] mm: Move most file-based accounting to the node Mel Gorman
2016-02-23 15:19 ` [PATCH 20/27] mm: Move vmscan writes and file write " Mel Gorman
2016-02-23 15:19 ` [PATCH 21/27] mm, vmscan: Update classzone_idx if buffer_heads_over_limit Mel Gorman
2016-02-23 15:19 ` [PATCH 22/27] mm, vmscan: Only wakeup kswapd once per node for the requested classzone Mel Gorman
2016-02-23 15:20 ` [PATCH 23/27] mm, vmscan: Account in vmstat for pages skipped during reclaim Mel Gorman
2016-02-23 15:20 ` [PATCH 24/27] mm: Convert zone_reclaim to node_reclaim Mel Gorman
2016-02-23 15:20 ` [PATCH 25/27] mm, vmscan: Add classzone information to tracepoints Mel Gorman
2016-02-23 15:21 ` Mel Gorman [this message]
2016-02-23 15:21 ` [PATCH 27/27] mm: page_alloc: Cache the last node whose dirty limit is reached Mel Gorman
2016-02-23 17:15 ` [RFC PATCH 00/27] Move LRU page reclaim from zones to nodes v2 Christoph Lameter
2016-02-23 20:04 ` Johannes Weiner
2016-02-23 20:19 ` Mel Gorman
2016-02-23 20:59 ` Johannes Weiner
2016-02-23 21:58 ` Mel Gorman
2016-02-24 0:12 ` Johannes Weiner
2016-02-24 10:46 ` Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160223152111.GL2854@techsingularity.net \
--to=mgorman@techsingularity.net \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=riel@surriel.com \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).