From: Alex Shi <alex.shi@linux.alibaba.com>
To: akpm@linux-foundation.org, mgorman@techsingularity.net,
tj@kernel.org, hughd@google.com, khlebnikov@yandex-team.ru,
daniel.m.jordan@oracle.com, yang.shi@linux.alibaba.com,
willy@infradead.org, hannes@cmpxchg.org, lkp@intel.com,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
cgroups@vger.kernel.org, shakeelb@google.com,
iamjoonsoo.kim@lge.com, richard.weiyang@gmail.com
Subject: [PATCH v14 09/20] mm/swap: fold vm event PGROTATED into pagevec_move_tail_fn
Date: Fri, 3 Jul 2020 13:07:42 +0800 [thread overview]
Message-ID: <1593752873-4493-10-git-send-email-alex.shi@linux.alibaba.com> (raw)
In-Reply-To: <1593752873-4493-1-git-send-email-alex.shi@linux.alibaba.com>
Fold the PGROTATED event collection into pagevec_move_tail_fn call back
func like other funcs does in pagevec_lru_move_fn. Now all usage of
pagevec_lru_move_fn are same and no needs of the 3rd parameter.
It's simply the calling.
[lkp@intel.com: found a build issue in the original patch, thanks]
Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
---
mm/swap.c | 66 +++++++++++++++++++++++----------------------------------------
1 file changed, 24 insertions(+), 42 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index 7701d855873d..dc8b02cdddcb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -204,8 +204,7 @@ int get_kernel_page(unsigned long start, int write, struct page **pages)
EXPORT_SYMBOL_GPL(get_kernel_page);
static void pagevec_lru_move_fn(struct pagevec *pvec,
- void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
- void *arg)
+ void (*move_fn)(struct page *page, struct lruvec *lruvec))
{
int i;
struct pglist_data *pgdat = NULL;
@@ -224,7 +223,7 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
}
lruvec = mem_cgroup_page_lruvec(page, pgdat);
- (*move_fn)(page, lruvec, arg);
+ (*move_fn)(page, lruvec);
}
if (pgdat)
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
@@ -232,35 +231,23 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
pagevec_reinit(pvec);
}
-static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
- void *arg)
+static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
{
- int *pgmoved = arg;
-
if (PageLRU(page) && !PageUnevictable(page)) {
del_page_from_lru_list(page, lruvec, page_lru(page));
ClearPageActive(page);
add_page_to_lru_list_tail(page, lruvec, page_lru(page));
- (*pgmoved) += hpage_nr_pages(page);
+ __count_vm_events(PGROTATED, hpage_nr_pages(page));
}
}
/*
- * pagevec_move_tail() must be called with IRQ disabled.
- * Otherwise this may cause nasty races.
- */
-static void pagevec_move_tail(struct pagevec *pvec)
-{
- int pgmoved = 0;
-
- pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
- __count_vm_events(PGROTATED, pgmoved);
-}
-
-/*
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
* inactive list.
+ *
+ * pagevec_move_tail_fn() must be called with IRQ disabled.
+ * Otherwise this may cause nasty races.
*/
void rotate_reclaimable_page(struct page *page)
{
@@ -273,7 +260,7 @@ void rotate_reclaimable_page(struct page *page)
local_lock_irqsave(&lru_rotate.lock, flags);
pvec = this_cpu_ptr(&lru_rotate.pvec);
if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_move_tail(pvec);
+ pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
local_unlock_irqrestore(&lru_rotate.lock, flags);
}
}
@@ -315,8 +302,7 @@ void lru_note_cost_page(struct page *page)
page_is_file_lru(page), hpage_nr_pages(page));
}
-static void __activate_page(struct page *page, struct lruvec *lruvec,
- void *arg)
+static void __activate_page(struct page *page, struct lruvec *lruvec)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int lru = page_lru_base_type(page);
@@ -340,7 +326,7 @@ static void activate_page_drain(int cpu)
struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, __activate_page, NULL);
+ pagevec_lru_move_fn(pvec, __activate_page);
}
static bool need_activate_page_drain(int cpu)
@@ -358,7 +344,7 @@ void activate_page(struct page *page)
pvec = this_cpu_ptr(&lru_pvecs.activate_page);
get_page(page);
if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_lru_move_fn(pvec, __activate_page, NULL);
+ pagevec_lru_move_fn(pvec, __activate_page);
local_unlock(&lru_pvecs.lock);
}
}
@@ -374,7 +360,7 @@ void activate_page(struct page *page)
page = compound_head(page);
spin_lock_irq(&pgdat->lru_lock);
- __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
+ __activate_page(page, mem_cgroup_page_lruvec(page, pgdat));
spin_unlock_irq(&pgdat->lru_lock);
}
#endif
@@ -526,8 +512,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
* be write it out by flusher threads as this is much more effective
* than the single-page writeout from reclaim.
*/
-static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
- void *arg)
+static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
{
int lru;
bool active;
@@ -574,8 +559,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
}
}
-static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
- void *arg)
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
{
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
int lru = page_lru_base_type(page);
@@ -592,8 +576,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
}
}
-static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
- void *arg)
+static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
@@ -636,21 +619,21 @@ void lru_add_drain_cpu(int cpu)
/* No harm done if a racing interrupt already did this */
local_lock_irqsave(&lru_rotate.lock, flags);
- pagevec_move_tail(pvec);
+ pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
local_unlock_irqrestore(&lru_rotate.lock, flags);
}
pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+ pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn);
pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
+ pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
activate_page_drain(cpu);
}
@@ -679,7 +662,7 @@ void deactivate_file_page(struct page *page)
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+ pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
local_unlock(&lru_pvecs.lock);
}
}
@@ -701,7 +684,7 @@ void deactivate_page(struct page *page)
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
get_page(page);
if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn);
local_unlock(&lru_pvecs.lock);
}
}
@@ -723,7 +706,7 @@ void mark_page_lazyfree(struct page *page)
pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
get_page(page);
if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
+ pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
local_unlock(&lru_pvecs.lock);
}
}
@@ -933,8 +916,7 @@ void __pagevec_release(struct pagevec *pvec)
}
EXPORT_SYMBOL(__pagevec_release);
-static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
- void *arg)
+static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
{
enum lru_list lru;
int was_unevictable = TestClearPageUnevictable(page);
@@ -993,7 +975,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
*/
void __pagevec_lru_add(struct pagevec *pvec)
{
- pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
+ pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn);
}
/**
--
1.8.3.1
next prev parent reply other threads:[~2020-07-03 5:09 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-03 5:07 [PATCH v14 00/20] per memcg lru lock Alex Shi
2020-07-03 5:07 ` [PATCH v14 01/20] mm/vmscan: remove unnecessary lruvec adding Alex Shi
2020-07-03 5:07 ` [PATCH v14 02/20] mm/page_idle: no unlikely double check for idle page counting Alex Shi
2020-07-03 5:07 ` [PATCH v14 03/20] mm/compaction: correct the comments of compact_defer_shift Alex Shi
2020-07-03 5:07 ` [PATCH v14 04/20] mm/compaction: rename compact_deferred as compact_should_defer Alex Shi
2020-07-03 5:07 ` [PATCH v14 05/20] mm/thp: move lru_add_page_tail func to huge_memory.c Alex Shi
2020-07-03 5:07 ` [PATCH v14 06/20] mm/thp: clean up lru_add_page_tail Alex Shi
2020-07-03 5:07 ` [PATCH v14 07/20] mm/thp: narrow lru locking Alex Shi
2020-07-06 9:15 ` Alex Shi
2020-07-06 11:35 ` Matthew Wilcox
2020-07-07 4:52 ` Hugh Dickins
2020-07-09 14:02 ` Alex Shi
2020-07-09 15:48 ` Kirill A. Shutemov
2020-07-10 8:23 ` Alex Shi
[not found] ` <20200710112831.jrv4hzjzjqtxtc7u@box>
2020-07-10 14:09 ` Alex Shi
2020-07-07 10:51 ` Alex Shi
2020-07-03 5:07 ` [PATCH v14 08/20] mm/memcg: add debug checking in lock_page_memcg Alex Shi
2020-07-03 5:07 ` Alex Shi [this message]
2020-07-03 5:07 ` [PATCH v14 10/20] mm/lru: move lru_lock holding in func lru_note_cost_page Alex Shi
2020-07-03 5:07 ` [PATCH v14 11/20] mm/lru: move lock into lru_note_cost Alex Shi
2020-07-03 5:07 ` [PATCH v14 12/20] mm/lru: introduce TestClearPageLRU Alex Shi
2020-07-03 5:07 ` [PATCH v14 13/20] mm/compaction: do page isolation first in compaction Alex Shi
2020-07-03 5:07 ` [PATCH v14 14/20] mm/mlock: reorder isolation sequence during munlock Alex Shi
2020-07-03 5:07 ` [PATCH v14 15/20] mm/swap: serialize memcg changes during pagevec_lru_move_fn Alex Shi
2020-07-03 9:13 ` Konstantin Khlebnikov
2020-07-04 11:34 ` Alex Shi
2020-07-04 11:39 ` Matthew Wilcox
2020-07-04 13:12 ` Alex Shi
2020-07-04 13:33 ` Matthew Wilcox
2020-07-04 15:47 ` Alex Shi
2020-07-03 5:07 ` [PATCH v14 16/20] mm/lru: replace pgdat lru_lock with lruvec lock Alex Shi
2020-07-03 5:07 ` [PATCH v14 17/20] mm/lru: introduce the relock_page_lruvec function Alex Shi
2020-07-03 5:07 ` [PATCH v14 18/20] mm/vmscan: use relock for move_pages_to_lru Alex Shi
2020-07-03 5:07 ` [PATCH v14 19/20] mm/pgdat: remove pgdat lru_lock Alex Shi
2020-07-03 5:07 ` [PATCH v14 20/20] mm/lru: revise the comments of lru_lock Alex Shi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1593752873-4493-10-git-send-email-alex.shi@linux.alibaba.com \
--to=alex.shi@linux.alibaba.com \
--cc=akpm@linux-foundation.org \
--cc=cgroups@vger.kernel.org \
--cc=daniel.m.jordan@oracle.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=khlebnikov@yandex-team.ru \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lkp@intel.com \
--cc=mgorman@techsingularity.net \
--cc=richard.weiyang@gmail.com \
--cc=shakeelb@google.com \
--cc=tj@kernel.org \
--cc=willy@infradead.org \
--cc=yang.shi@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).