* [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
@ 2020-05-27 18:29 ` Shakeel Butt
0 siblings, 0 replies; 8+ messages in thread
From: Shakeel Butt @ 2020-05-27 18:29 UTC (permalink / raw)
To: Mel Gorman, Johannes Weiner, Roman Gushchin, Michal Hocko
Cc: Andrew Morton, Minchan Kim, Rik van Riel, linux-mm, linux-kernel,
Shakeel Butt
From: Johannes Weiner <hannes@cmpxchg.org>
Currently, THP are counted as single pages until they are split right
before being swapped out. However, at that point the VM is already in
the middle of reclaim, and adjusting the LRU balance then is useless.
Always account THP by the number of basepages, and remove the fixup
from the splitting path.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Shakeel Butt <shakeelb@google.com>
---
mm/swap.c | 23 +++++++++--------------
1 file changed, 9 insertions(+), 14 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index 4eb179ee0b72..b75c0ce90418 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -262,14 +262,14 @@ void rotate_reclaimable_page(struct page *page)
}
}
-static void update_page_reclaim_stat(struct lruvec *lruvec,
- int file, int rotated)
+static void update_page_reclaim_stat(struct lruvec *lruvec, int file,
+ int rotated, int nr_pages)
{
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
- reclaim_stat->recent_scanned[file]++;
+ reclaim_stat->recent_scanned[file] += nr_pages;
if (rotated)
- reclaim_stat->recent_rotated[file]++;
+ reclaim_stat->recent_rotated[file] += nr_pages;
}
static void __activate_page(struct page *page, struct lruvec *lruvec,
@@ -288,7 +288,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
- update_page_reclaim_stat(lruvec, file, 1);
+ update_page_reclaim_stat(lruvec, file, 1, nr_pages);
}
}
@@ -546,7 +546,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
}
- update_page_reclaim_stat(lruvec, file, 0);
+ update_page_reclaim_stat(lruvec, file, 0, nr_pages);
}
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -564,7 +564,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
- update_page_reclaim_stat(lruvec, file, 0);
+ update_page_reclaim_stat(lruvec, file, 0, nr_pages);
}
}
@@ -590,7 +590,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGLAZYFREE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
- update_page_reclaim_stat(lruvec, 1, 0);
+ update_page_reclaim_stat(lruvec, 1, 0, nr_pages);
}
}
@@ -899,8 +899,6 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *list)
{
- const int file = 0;
-
VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
@@ -926,9 +924,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
add_page_to_lru_list_tail(page_tail, lruvec,
page_lru(page_tail));
}
-
- if (!PageUnevictable(page))
- update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -973,7 +968,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
if (page_evictable(page)) {
lru = page_lru(page);
update_page_reclaim_stat(lruvec, page_is_file_lru(page),
- PageActive(page));
+ PageActive(page), nr_pages);
if (was_unevictable)
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
--
2.27.0.rc0.183.gde8f92d652-goog
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
@ 2020-05-27 18:29 ` Shakeel Butt
0 siblings, 0 replies; 8+ messages in thread
From: Shakeel Butt @ 2020-05-27 18:29 UTC (permalink / raw)
To: Mel Gorman, Johannes Weiner, Roman Gushchin, Michal Hocko
Cc: Andrew Morton, Minchan Kim, Rik van Riel, linux-mm, linux-kernel,
Shakeel Butt
From: Johannes Weiner <hannes@cmpxchg.org>
Currently, THP are counted as single pages until they are split right
before being swapped out. However, at that point the VM is already in
the middle of reclaim, and adjusting the LRU balance then is useless.
Always account THP by the number of basepages, and remove the fixup
from the splitting path.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Shakeel Butt <shakeelb@google.com>
---
mm/swap.c | 23 +++++++++--------------
1 file changed, 9 insertions(+), 14 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index 4eb179ee0b72..b75c0ce90418 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -262,14 +262,14 @@ void rotate_reclaimable_page(struct page *page)
}
}
-static void update_page_reclaim_stat(struct lruvec *lruvec,
- int file, int rotated)
+static void update_page_reclaim_stat(struct lruvec *lruvec, int file,
+ int rotated, int nr_pages)
{
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
- reclaim_stat->recent_scanned[file]++;
+ reclaim_stat->recent_scanned[file] += nr_pages;
if (rotated)
- reclaim_stat->recent_rotated[file]++;
+ reclaim_stat->recent_rotated[file] += nr_pages;
}
static void __activate_page(struct page *page, struct lruvec *lruvec,
@@ -288,7 +288,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
- update_page_reclaim_stat(lruvec, file, 1);
+ update_page_reclaim_stat(lruvec, file, 1, nr_pages);
}
}
@@ -546,7 +546,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
}
- update_page_reclaim_stat(lruvec, file, 0);
+ update_page_reclaim_stat(lruvec, file, 0, nr_pages);
}
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -564,7 +564,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
- update_page_reclaim_stat(lruvec, file, 0);
+ update_page_reclaim_stat(lruvec, file, 0, nr_pages);
}
}
@@ -590,7 +590,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGLAZYFREE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
- update_page_reclaim_stat(lruvec, 1, 0);
+ update_page_reclaim_stat(lruvec, 1, 0, nr_pages);
}
}
@@ -899,8 +899,6 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *list)
{
- const int file = 0;
-
VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
@@ -926,9 +924,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
add_page_to_lru_list_tail(page_tail, lruvec,
page_lru(page_tail));
}
-
- if (!PageUnevictable(page))
- update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -973,7 +968,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
if (page_evictable(page)) {
lru = page_lru(page);
update_page_reclaim_stat(lruvec, page_is_file_lru(page),
- PageActive(page));
+ PageActive(page), nr_pages);
if (was_unevictable)
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
--
2.27.0.rc0.183.gde8f92d652-goog
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
2020-05-27 18:29 ` Shakeel Butt
(?)
@ 2020-05-27 19:41 ` Johannes Weiner
2020-05-27 19:46 ` Shakeel Butt
2020-05-27 20:46 ` Andrew Morton
-1 siblings, 2 replies; 8+ messages in thread
From: Johannes Weiner @ 2020-05-27 19:41 UTC (permalink / raw)
To: Shakeel Butt
Cc: Mel Gorman, Roman Gushchin, Michal Hocko, Andrew Morton,
Minchan Kim, Rik van Riel, linux-mm, linux-kernel
On Wed, May 27, 2020 at 11:29:58AM -0700, Shakeel Butt wrote:
> From: Johannes Weiner <hannes@cmpxchg.org>
>
> Currently, THP are counted as single pages until they are split right
> before being swapped out. However, at that point the VM is already in
> the middle of reclaim, and adjusting the LRU balance then is useless.
>
> Always account THP by the number of basepages, and remove the fixup
> from the splitting path.
>
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> Signed-off-by: Shakeel Butt <shakeelb@google.com>
This is now already in mm as part of the "mm: balance LRU lists based
on relative thrashing" series that I sent out last week and where it
was originally from.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
2020-05-27 19:41 ` Johannes Weiner
@ 2020-05-27 19:46 ` Shakeel Butt
2020-05-27 20:46 ` Andrew Morton
1 sibling, 0 replies; 8+ messages in thread
From: Shakeel Butt @ 2020-05-27 19:46 UTC (permalink / raw)
To: Johannes Weiner
Cc: Mel Gorman, Roman Gushchin, Michal Hocko, Andrew Morton,
Minchan Kim, Rik van Riel, Linux MM, LKML
On Wed, May 27, 2020 at 12:42 PM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Wed, May 27, 2020 at 11:29:58AM -0700, Shakeel Butt wrote:
> > From: Johannes Weiner <hannes@cmpxchg.org>
> >
> > Currently, THP are counted as single pages until they are split right
> > before being swapped out. However, at that point the VM is already in
> > the middle of reclaim, and adjusting the LRU balance then is useless.
> >
> > Always account THP by the number of basepages, and remove the fixup
> > from the splitting path.
> >
> > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> > Signed-off-by: Shakeel Butt <shakeelb@google.com>
>
> This is now already in mm as part of the "mm: balance LRU lists based
> on relative thrashing" series that I sent out last week and where it
> was originally from.
Oh sorry, I missed it.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
@ 2020-05-27 19:46 ` Shakeel Butt
0 siblings, 0 replies; 8+ messages in thread
From: Shakeel Butt @ 2020-05-27 19:46 UTC (permalink / raw)
To: Johannes Weiner
Cc: Mel Gorman, Roman Gushchin, Michal Hocko, Andrew Morton,
Minchan Kim, Rik van Riel, Linux MM, LKML
On Wed, May 27, 2020 at 12:42 PM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Wed, May 27, 2020 at 11:29:58AM -0700, Shakeel Butt wrote:
> > From: Johannes Weiner <hannes@cmpxchg.org>
> >
> > Currently, THP are counted as single pages until they are split right
> > before being swapped out. However, at that point the VM is already in
> > the middle of reclaim, and adjusting the LRU balance then is useless.
> >
> > Always account THP by the number of basepages, and remove the fixup
> > from the splitting path.
> >
> > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> > Signed-off-by: Shakeel Butt <shakeelb@google.com>
>
> This is now already in mm as part of the "mm: balance LRU lists based
> on relative thrashing" series that I sent out last week and where it
> was originally from.
Oh sorry, I missed it.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
2020-05-27 19:41 ` Johannes Weiner
2020-05-27 19:46 ` Shakeel Butt
@ 2020-05-27 20:46 ` Andrew Morton
2020-05-27 21:35 ` Shakeel Butt
1 sibling, 1 reply; 8+ messages in thread
From: Andrew Morton @ 2020-05-27 20:46 UTC (permalink / raw)
To: Johannes Weiner
Cc: Shakeel Butt, Mel Gorman, Roman Gushchin, Michal Hocko,
Minchan Kim, Rik van Riel, linux-mm, linux-kernel
On Wed, 27 May 2020 15:41:48 -0400 Johannes Weiner <hannes@cmpxchg.org> wrote:
> On Wed, May 27, 2020 at 11:29:58AM -0700, Shakeel Butt wrote:
> > From: Johannes Weiner <hannes@cmpxchg.org>
> >
> > Currently, THP are counted as single pages until they are split right
> > before being swapped out. However, at that point the VM is already in
> > the middle of reclaim, and adjusting the LRU balance then is useless.
> >
> > Always account THP by the number of basepages, and remove the fixup
> > from the splitting path.
> >
> > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> > Signed-off-by: Shakeel Butt <shakeelb@google.com>
>
> This is now already in mm as part of the "mm: balance LRU lists based
> on relative thrashing" series that I sent out last week and where it
> was originally from.
Yup. I hope [1/3] and [2/3] weren't urgent? Due to a horrid merge
mismatchmishmashmess I've staged them behind lots of other things,
notably
thp/khugepaged improvements and CoW semantics
http://lkml.kernel.org/r/20200416160026.16538-1-kirill.shutemov@linux.intel.com
and mm: memcontrol: charge swapin pages on instantiation
http://lkml.kernel.org/r/20200508183105.225460-1-hannes@cmpxchg.org
and mm: balance LRU lists based on relative thrashing
http://lkml.kernel.org/r/20200520232525.798933-1-hannes@cmpxchg.org
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
2020-05-27 20:46 ` Andrew Morton
@ 2020-05-27 21:35 ` Shakeel Butt
0 siblings, 0 replies; 8+ messages in thread
From: Shakeel Butt @ 2020-05-27 21:35 UTC (permalink / raw)
To: Andrew Morton
Cc: Johannes Weiner, Mel Gorman, Roman Gushchin, Michal Hocko,
Minchan Kim, Rik van Riel, Linux MM, LKML
On Wed, May 27, 2020 at 1:46 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Wed, 27 May 2020 15:41:48 -0400 Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> > On Wed, May 27, 2020 at 11:29:58AM -0700, Shakeel Butt wrote:
> > > From: Johannes Weiner <hannes@cmpxchg.org>
> > >
> > > Currently, THP are counted as single pages until they are split right
> > > before being swapped out. However, at that point the VM is already in
> > > the middle of reclaim, and adjusting the LRU balance then is useless.
> > >
> > > Always account THP by the number of basepages, and remove the fixup
> > > from the splitting path.
> > >
> > > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> > > Signed-off-by: Shakeel Butt <shakeelb@google.com>
> >
> > This is now already in mm as part of the "mm: balance LRU lists based
> > on relative thrashing" series that I sent out last week and where it
> > was originally from.
>
> Yup. I hope [1/3] and [2/3] weren't urgent?
No urgency.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
@ 2020-05-27 21:35 ` Shakeel Butt
0 siblings, 0 replies; 8+ messages in thread
From: Shakeel Butt @ 2020-05-27 21:35 UTC (permalink / raw)
To: Andrew Morton
Cc: Johannes Weiner, Mel Gorman, Roman Gushchin, Michal Hocko,
Minchan Kim, Rik van Riel, Linux MM, LKML
On Wed, May 27, 2020 at 1:46 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Wed, 27 May 2020 15:41:48 -0400 Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> > On Wed, May 27, 2020 at 11:29:58AM -0700, Shakeel Butt wrote:
> > > From: Johannes Weiner <hannes@cmpxchg.org>
> > >
> > > Currently, THP are counted as single pages until they are split right
> > > before being swapped out. However, at that point the VM is already in
> > > the middle of reclaim, and adjusting the LRU balance then is useless.
> > >
> > > Always account THP by the number of basepages, and remove the fixup
> > > from the splitting path.
> > >
> > > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> > > Signed-off-by: Shakeel Butt <shakeelb@google.com>
> >
> > This is now already in mm as part of the "mm: balance LRU lists based
> > on relative thrashing" series that I sent out last week and where it
> > was originally from.
>
> Yup. I hope [1/3] and [2/3] weren't urgent?
No urgency.
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2020-05-27 21:36 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-27 18:29 [PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages Shakeel Butt
2020-05-27 18:29 ` Shakeel Butt
2020-05-27 19:41 ` Johannes Weiner
2020-05-27 19:46 ` Shakeel Butt
2020-05-27 19:46 ` Shakeel Butt
2020-05-27 20:46 ` Andrew Morton
2020-05-27 21:35 ` Shakeel Butt
2020-05-27 21:35 ` Shakeel Butt
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.