From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932336AbcGHJhF (ORCPT ); Fri, 8 Jul 2016 05:37:05 -0400 Received: from outbound-smtp09.blacknight.com ([46.22.139.14]:60985 "EHLO outbound-smtp09.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754559AbcGHJgq (ORCPT ); Fri, 8 Jul 2016 05:36:46 -0400 From: Mel Gorman To: Andrew Morton , Linux-MM Cc: Rik van Riel , Vlastimil Babka , Johannes Weiner , Minchan Kim , Joonsoo Kim , LKML , Mel Gorman Subject: [PATCH 08/34] mm, vmscan: remove balance gap Date: Fri, 8 Jul 2016 10:34:44 +0100 Message-Id: <1467970510-21195-9-git-send-email-mgorman@techsingularity.net> X-Mailer: git-send-email 2.6.4 In-Reply-To: <1467970510-21195-1-git-send-email-mgorman@techsingularity.net> References: <1467970510-21195-1-git-send-email-mgorman@techsingularity.net> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org The balance gap was introduced to apply equal pressure to all zones when reclaiming for a higher zone. With node-based LRU, the need for the balance gap is removed and the code is dead so remove it. [vbabka@suse.cz: Also remove KSWAPD_ZONE_BALANCE_GAP_RATIO] Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka --- include/linux/swap.h | 9 --------- mm/vmscan.c | 19 ++++++++----------- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index c82f916008b7..916e2eddecd6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -157,15 +157,6 @@ enum { #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX -/* - * Ratio between zone->managed_pages and the "gap" that above the per-zone - * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that - * do not meet the (high_wmark + gap) watermark, even which already met the - * high_wmark, in order to provide better per-zone lru behavior. We are ok to - * spend not more than 1% of the memory for this zone balancing "gap". - */ -#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 - #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 7b382b90b145..a52167eabc96 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2518,7 +2518,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc, */ static inline bool compaction_ready(struct zone *zone, int order, int classzone_idx) { - unsigned long balance_gap, watermark; + unsigned long watermark; bool watermark_ok; /* @@ -2527,9 +2527,7 @@ static inline bool compaction_ready(struct zone *zone, int order, int classzone_ * there is a buffer of free pages available to give compaction * a reasonable chance of completing and allocating the page */ - balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( - zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); - watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); + watermark = high_wmark_pages(zone) + (2UL << order); watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, classzone_idx); /* @@ -3000,10 +2998,9 @@ static void age_active_anon(struct pglist_data *pgdat, } while (memcg); } -static bool zone_balanced(struct zone *zone, int order, - unsigned long balance_gap, int classzone_idx) +static bool zone_balanced(struct zone *zone, int order, int classzone_idx) { - unsigned long mark = high_wmark_pages(zone) + balance_gap; + unsigned long mark = high_wmark_pages(zone); return zone_watermark_ok_safe(zone, order, mark, classzone_idx); } @@ -3045,7 +3042,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, if (!populated_zone(zone)) continue; - if (zone_balanced(zone, order, 0, classzone_idx)) + if (zone_balanced(zone, order, classzone_idx)) return true; } @@ -3148,7 +3145,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) break; } - if (!zone_balanced(zone, order, 0, 0)) { + if (!zone_balanced(zone, order, 0)) { classzone_idx = i; break; } else { @@ -3216,7 +3213,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) if (!populated_zone(zone)) continue; - if (zone_balanced(zone, sc.order, 0, classzone_idx)) { + if (zone_balanced(zone, sc.order, classzone_idx)) { clear_bit(PGDAT_CONGESTED, &pgdat->flags); clear_bit(PGDAT_DIRTY, &pgdat->flags); goto out; @@ -3427,7 +3424,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) } if (!waitqueue_active(&pgdat->kswapd_wait)) return; - if (zone_balanced(zone, order, 0, 0)) + if (zone_balanced(zone, order, 0)) return; trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); -- 2.6.4 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f72.google.com (mail-wm0-f72.google.com [74.125.82.72]) by kanga.kvack.org (Postfix) with ESMTP id F1BAC6B0262 for ; Fri, 8 Jul 2016 05:36:44 -0400 (EDT) Received: by mail-wm0-f72.google.com with SMTP id f126so8291217wma.3 for ; Fri, 08 Jul 2016 02:36:44 -0700 (PDT) Received: from outbound-smtp05.blacknight.com (outbound-smtp05.blacknight.com. [81.17.249.38]) by mx.google.com with ESMTPS id c1si3759948wje.208.2016.07.08.02.36.43 for (version=TLS1 cipher=AES128-SHA bits=128/128); Fri, 08 Jul 2016 02:36:43 -0700 (PDT) Received: from mail.blacknight.com (pemlinmail01.blacknight.ie [81.17.254.10]) by outbound-smtp05.blacknight.com (Postfix) with ESMTPS id 4E95E21004C for ; Fri, 8 Jul 2016 09:36:43 +0000 (UTC) From: Mel Gorman Subject: [PATCH 08/34] mm, vmscan: remove balance gap Date: Fri, 8 Jul 2016 10:34:44 +0100 Message-Id: <1467970510-21195-9-git-send-email-mgorman@techsingularity.net> In-Reply-To: <1467970510-21195-1-git-send-email-mgorman@techsingularity.net> References: <1467970510-21195-1-git-send-email-mgorman@techsingularity.net> Sender: owner-linux-mm@kvack.org List-ID: To: Andrew Morton , Linux-MM Cc: Rik van Riel , Vlastimil Babka , Johannes Weiner , Minchan Kim , Joonsoo Kim , LKML , Mel Gorman The balance gap was introduced to apply equal pressure to all zones when reclaiming for a higher zone. With node-based LRU, the need for the balance gap is removed and the code is dead so remove it. [vbabka@suse.cz: Also remove KSWAPD_ZONE_BALANCE_GAP_RATIO] Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka --- include/linux/swap.h | 9 --------- mm/vmscan.c | 19 ++++++++----------- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index c82f916008b7..916e2eddecd6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -157,15 +157,6 @@ enum { #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX -/* - * Ratio between zone->managed_pages and the "gap" that above the per-zone - * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that - * do not meet the (high_wmark + gap) watermark, even which already met the - * high_wmark, in order to provide better per-zone lru behavior. We are ok to - * spend not more than 1% of the memory for this zone balancing "gap". - */ -#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 - #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 7b382b90b145..a52167eabc96 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2518,7 +2518,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc, */ static inline bool compaction_ready(struct zone *zone, int order, int classzone_idx) { - unsigned long balance_gap, watermark; + unsigned long watermark; bool watermark_ok; /* @@ -2527,9 +2527,7 @@ static inline bool compaction_ready(struct zone *zone, int order, int classzone_ * there is a buffer of free pages available to give compaction * a reasonable chance of completing and allocating the page */ - balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( - zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); - watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); + watermark = high_wmark_pages(zone) + (2UL << order); watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, classzone_idx); /* @@ -3000,10 +2998,9 @@ static void age_active_anon(struct pglist_data *pgdat, } while (memcg); } -static bool zone_balanced(struct zone *zone, int order, - unsigned long balance_gap, int classzone_idx) +static bool zone_balanced(struct zone *zone, int order, int classzone_idx) { - unsigned long mark = high_wmark_pages(zone) + balance_gap; + unsigned long mark = high_wmark_pages(zone); return zone_watermark_ok_safe(zone, order, mark, classzone_idx); } @@ -3045,7 +3042,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, if (!populated_zone(zone)) continue; - if (zone_balanced(zone, order, 0, classzone_idx)) + if (zone_balanced(zone, order, classzone_idx)) return true; } @@ -3148,7 +3145,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) break; } - if (!zone_balanced(zone, order, 0, 0)) { + if (!zone_balanced(zone, order, 0)) { classzone_idx = i; break; } else { @@ -3216,7 +3213,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) if (!populated_zone(zone)) continue; - if (zone_balanced(zone, sc.order, 0, classzone_idx)) { + if (zone_balanced(zone, sc.order, classzone_idx)) { clear_bit(PGDAT_CONGESTED, &pgdat->flags); clear_bit(PGDAT_DIRTY, &pgdat->flags); goto out; @@ -3427,7 +3424,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) } if (!waitqueue_active(&pgdat->kswapd_wait)) return; - if (zone_balanced(zone, order, 0, 0)) + if (zone_balanced(zone, order, 0)) return; trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); -- 2.6.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org