* [PATCH 1/3] mm: Remove last argument of reuse_swap_page()
@ 2021-12-20 20:59 Matthew Wilcox (Oracle)
2021-12-20 20:59 ` [PATCH 2/3] mm: Remove the total_mapcount argument from page_trans_huge_map_swapcount() Matthew Wilcox (Oracle)
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Matthew Wilcox (Oracle) @ 2021-12-20 20:59 UTC (permalink / raw)
To: Linus Torvalds
Cc: Matthew Wilcox (Oracle), David Hildenbrand, Andrew Morton, linux-mm
None of the callers care about the total_map_swapcount() any more.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/swap.h | 6 +++---
mm/huge_memory.c | 2 +-
mm/khugepaged.c | 2 +-
mm/memory.c | 2 +-
mm/swapfile.c | 8 +-------
5 files changed, 7 insertions(+), 13 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d1ea44b31f19..bdccbf1efa61 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -514,7 +514,7 @@ extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern bool reuse_swap_page(struct page *, int *);
+extern bool reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
@@ -680,8 +680,8 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page, total_map_swapcount) \
- (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
+#define reuse_swap_page(page) \
+ (page_trans_huge_mapcount(page, NULL) == 1)
static inline int try_to_free_swap(struct page *page)
{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e5483347291c..b61fbe95c856 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1322,7 +1322,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
* We can only reuse the page if nobody else maps the huge page or it's
* part.
*/
- if (reuse_swap_page(page, NULL)) {
+ if (reuse_swap_page(page)) {
pmd_t entry;
entry = pmd_mkyoung(orig_pmd);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e99101162f1a..11794bdf513a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -681,7 +681,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
goto out;
}
if (!pte_write(pteval) && PageSwapCache(page) &&
- !reuse_swap_page(page, NULL)) {
+ !reuse_swap_page(page)) {
/*
* Page is in the swap cache and cannot be re-used.
* It cannot be collapsed into a THP.
diff --git a/mm/memory.c b/mm/memory.c
index 8f1de811a1dc..dd85fd07cb24 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3626,7 +3626,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
pte = mk_pte(page, vma->vm_page_prot);
- if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
+ if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
vmf->flags &= ~FAULT_FLAG_WRITE;
ret |= VM_FAULT_WRITE;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e59e08ef46e1..a4f48189300a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1668,12 +1668,8 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
* to it. And as a side-effect, free up its swap: because the old content
* on disk will never be read, and seeking back there to write new content
* later would only waste time away from clustering.
- *
- * NOTE: total_map_swapcount should not be relied upon by the caller if
- * reuse_swap_page() returns false, but it may be always overwritten
- * (see the other implementation for CONFIG_SWAP=n).
*/
-bool reuse_swap_page(struct page *page, int *total_map_swapcount)
+bool reuse_swap_page(struct page *page)
{
int count, total_mapcount, total_swapcount;
@@ -1682,8 +1678,6 @@ bool reuse_swap_page(struct page *page, int *total_map_swapcount)
return false;
count = page_trans_huge_map_swapcount(page, &total_mapcount,
&total_swapcount);
- if (total_map_swapcount)
- *total_map_swapcount = total_mapcount + total_swapcount;
if (count == 1 && PageSwapCache(page) &&
(likely(!PageTransCompound(page)) ||
/* The remaining swap count will be freed soon */
--
2.33.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/3] mm: Remove the total_mapcount argument from page_trans_huge_map_swapcount()
2021-12-20 20:59 [PATCH 1/3] mm: Remove last argument of reuse_swap_page() Matthew Wilcox (Oracle)
@ 2021-12-20 20:59 ` Matthew Wilcox (Oracle)
2021-12-20 20:59 ` [PATCH 3/3] mm: Remove the total_mapcount argument from page_trans_huge_mapcount() Matthew Wilcox (Oracle)
2021-12-21 15:56 ` [PATCH 1/3] mm: Remove last argument of reuse_swap_page() William Kucharski
2 siblings, 0 replies; 4+ messages in thread
From: Matthew Wilcox (Oracle) @ 2021-12-20 20:59 UTC (permalink / raw)
To: Linus Torvalds
Cc: Matthew Wilcox (Oracle), David Hildenbrand, Andrew Morton, linux-mm
Now that we don't report it to the caller of reuse_swap_page(), we
don't need to request it from page_trans_huge_map_swapcount().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/swapfile.c | 32 ++++++++++++--------------------
1 file changed, 12 insertions(+), 20 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a4f48189300a..cb1a04135804 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1601,31 +1601,30 @@ static bool page_swapped(struct page *page)
return false;
}
-static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
+static int page_trans_huge_map_swapcount(struct page *page,
int *total_swapcount)
{
- int i, map_swapcount, _total_mapcount, _total_swapcount;
+ int i, map_swapcount, _total_swapcount;
unsigned long offset = 0;
struct swap_info_struct *si;
struct swap_cluster_info *ci = NULL;
unsigned char *map = NULL;
- int mapcount, swapcount = 0;
+ int swapcount = 0;
/* hugetlbfs shouldn't call it */
VM_BUG_ON_PAGE(PageHuge(page), page);
if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
- mapcount = page_trans_huge_mapcount(page, total_mapcount);
if (PageSwapCache(page))
swapcount = page_swapcount(page);
if (total_swapcount)
*total_swapcount = swapcount;
- return mapcount + swapcount;
+ return swapcount + page_trans_huge_mapcount(page, NULL);
}
page = compound_head(page);
- _total_mapcount = _total_swapcount = map_swapcount = 0;
+ _total_swapcount = map_swapcount = 0;
if (PageSwapCache(page)) {
swp_entry_t entry;
@@ -1639,8 +1638,7 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
if (map)
ci = lock_cluster(si, offset);
for (i = 0; i < HPAGE_PMD_NR; i++) {
- mapcount = atomic_read(&page[i]._mapcount) + 1;
- _total_mapcount += mapcount;
+ int mapcount = atomic_read(&page[i]._mapcount) + 1;
if (map) {
swapcount = swap_count(map[offset + i]);
_total_swapcount += swapcount;
@@ -1648,19 +1646,14 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
map_swapcount = max(map_swapcount, mapcount + swapcount);
}
unlock_cluster(ci);
- if (PageDoubleMap(page)) {
+
+ if (PageDoubleMap(page))
map_swapcount -= 1;
- _total_mapcount -= HPAGE_PMD_NR;
- }
- mapcount = compound_mapcount(page);
- map_swapcount += mapcount;
- _total_mapcount += mapcount;
- if (total_mapcount)
- *total_mapcount = _total_mapcount;
+
if (total_swapcount)
*total_swapcount = _total_swapcount;
- return map_swapcount;
+ return map_swapcount + compound_mapcount(page);
}
/*
@@ -1671,13 +1664,12 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
*/
bool reuse_swap_page(struct page *page)
{
- int count, total_mapcount, total_swapcount;
+ int count, total_swapcount;
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (unlikely(PageKsm(page)))
return false;
- count = page_trans_huge_map_swapcount(page, &total_mapcount,
- &total_swapcount);
+ count = page_trans_huge_map_swapcount(page, &total_swapcount);
if (count == 1 && PageSwapCache(page) &&
(likely(!PageTransCompound(page)) ||
/* The remaining swap count will be freed soon */
--
2.33.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 3/3] mm: Remove the total_mapcount argument from page_trans_huge_mapcount()
2021-12-20 20:59 [PATCH 1/3] mm: Remove last argument of reuse_swap_page() Matthew Wilcox (Oracle)
2021-12-20 20:59 ` [PATCH 2/3] mm: Remove the total_mapcount argument from page_trans_huge_map_swapcount() Matthew Wilcox (Oracle)
@ 2021-12-20 20:59 ` Matthew Wilcox (Oracle)
2021-12-21 15:56 ` [PATCH 1/3] mm: Remove last argument of reuse_swap_page() William Kucharski
2 siblings, 0 replies; 4+ messages in thread
From: Matthew Wilcox (Oracle) @ 2021-12-20 20:59 UTC (permalink / raw)
To: Linus Torvalds
Cc: Matthew Wilcox (Oracle), David Hildenbrand, Andrew Morton, linux-mm
All callers pass NULL, so we can stop calculating the value we would
store in it.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mm.h | 10 +++-------
include/linux/swap.h | 2 +-
mm/huge_memory.c | 30 ++++++++++--------------------
mm/swapfile.c | 2 +-
4 files changed, 15 insertions(+), 29 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a7e4a9e7d807..286eb4155c80 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -840,19 +840,15 @@ static inline int page_mapcount(struct page *page)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page);
-int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
+int page_trans_huge_mapcount(struct page *page);
#else
static inline int total_mapcount(struct page *page)
{
return page_mapcount(page);
}
-static inline int page_trans_huge_mapcount(struct page *page,
- int *total_mapcount)
+static inline int page_trans_huge_mapcount(struct page *page)
{
- int mapcount = page_mapcount(page);
- if (total_mapcount)
- *total_mapcount = mapcount;
- return mapcount;
+ return page_mapcount(page);
}
#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index bdccbf1efa61..1d38d9475c4d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -681,7 +681,7 @@ static inline int swp_swapcount(swp_entry_t entry)
}
#define reuse_swap_page(page) \
- (page_trans_huge_mapcount(page, NULL) == 1)
+ (page_trans_huge_mapcount(page) == 1)
static inline int try_to_free_swap(struct page *page)
{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b61fbe95c856..6ed86a8f6a5b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2542,38 +2542,28 @@ int total_mapcount(struct page *page)
* need full accuracy to avoid breaking page pinning, because
* page_trans_huge_mapcount() is slower than page_mapcount().
*/
-int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
+int page_trans_huge_mapcount(struct page *page)
{
- int i, ret, _total_mapcount, mapcount;
+ int i, ret;
/* hugetlbfs shouldn't call it */
VM_BUG_ON_PAGE(PageHuge(page), page);
- if (likely(!PageTransCompound(page))) {
- mapcount = atomic_read(&page->_mapcount) + 1;
- if (total_mapcount)
- *total_mapcount = mapcount;
- return mapcount;
- }
+ if (likely(!PageTransCompound(page)))
+ return atomic_read(&page->_mapcount) + 1;
page = compound_head(page);
- _total_mapcount = ret = 0;
+ ret = 0;
for (i = 0; i < thp_nr_pages(page); i++) {
- mapcount = atomic_read(&page[i]._mapcount) + 1;
+ int mapcount = atomic_read(&page[i]._mapcount) + 1;
ret = max(ret, mapcount);
- _total_mapcount += mapcount;
}
- if (PageDoubleMap(page)) {
+
+ if (PageDoubleMap(page))
ret -= 1;
- _total_mapcount -= thp_nr_pages(page);
- }
- mapcount = compound_mapcount(page);
- ret += mapcount;
- _total_mapcount += mapcount;
- if (total_mapcount)
- *total_mapcount = _total_mapcount;
- return ret;
+
+ return ret + compound_mapcount(page);
}
/* Racy check whether the huge page can be split */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index cb1a04135804..7d19c0facce2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1619,7 +1619,7 @@ static int page_trans_huge_map_swapcount(struct page *page,
swapcount = page_swapcount(page);
if (total_swapcount)
*total_swapcount = swapcount;
- return swapcount + page_trans_huge_mapcount(page, NULL);
+ return swapcount + page_trans_huge_mapcount(page);
}
page = compound_head(page);
--
2.33.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH 1/3] mm: Remove last argument of reuse_swap_page()
2021-12-20 20:59 [PATCH 1/3] mm: Remove last argument of reuse_swap_page() Matthew Wilcox (Oracle)
2021-12-20 20:59 ` [PATCH 2/3] mm: Remove the total_mapcount argument from page_trans_huge_map_swapcount() Matthew Wilcox (Oracle)
2021-12-20 20:59 ` [PATCH 3/3] mm: Remove the total_mapcount argument from page_trans_huge_mapcount() Matthew Wilcox (Oracle)
@ 2021-12-21 15:56 ` William Kucharski
2 siblings, 0 replies; 4+ messages in thread
From: William Kucharski @ 2021-12-21 15:56 UTC (permalink / raw)
To: Matthew Wilcox (Oracle)
Cc: Linus Torvalds, David Hildenbrand, Andrew Morton, linux-mm
I really like the cleanups and the removal of code no one is using anyway.
For the series:
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
> On Dec 20, 2021, at 1:59 PM, Matthew Wilcox (Oracle) <willy@infradead.org> wrote:
>
> None of the callers care about the total_map_swapcount() any more.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> include/linux/swap.h | 6 +++---
> mm/huge_memory.c | 2 +-
> mm/khugepaged.c | 2 +-
> mm/memory.c | 2 +-
> mm/swapfile.c | 8 +-------
> 5 files changed, 7 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index d1ea44b31f19..bdccbf1efa61 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -514,7 +514,7 @@ extern int __swp_swapcount(swp_entry_t entry);
> extern int swp_swapcount(swp_entry_t entry);
> extern struct swap_info_struct *page_swap_info(struct page *);
> extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
> -extern bool reuse_swap_page(struct page *, int *);
> +extern bool reuse_swap_page(struct page *);
> extern int try_to_free_swap(struct page *);
> struct backing_dev_info;
> extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
> @@ -680,8 +680,8 @@ static inline int swp_swapcount(swp_entry_t entry)
> return 0;
> }
>
> -#define reuse_swap_page(page, total_map_swapcount) \
> - (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
> +#define reuse_swap_page(page) \
> + (page_trans_huge_mapcount(page, NULL) == 1)
>
> static inline int try_to_free_swap(struct page *page)
> {
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index e5483347291c..b61fbe95c856 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1322,7 +1322,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
> * We can only reuse the page if nobody else maps the huge page or it's
> * part.
> */
> - if (reuse_swap_page(page, NULL)) {
> + if (reuse_swap_page(page)) {
> pmd_t entry;
> entry = pmd_mkyoung(orig_pmd);
> entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index e99101162f1a..11794bdf513a 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -681,7 +681,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> goto out;
> }
> if (!pte_write(pteval) && PageSwapCache(page) &&
> - !reuse_swap_page(page, NULL)) {
> + !reuse_swap_page(page)) {
> /*
> * Page is in the swap cache and cannot be re-used.
> * It cannot be collapsed into a THP.
> diff --git a/mm/memory.c b/mm/memory.c
> index 8f1de811a1dc..dd85fd07cb24 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3626,7 +3626,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
> dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
> pte = mk_pte(page, vma->vm_page_prot);
> - if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
> + if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
> pte = maybe_mkwrite(pte_mkdirty(pte), vma);
> vmf->flags &= ~FAULT_FLAG_WRITE;
> ret |= VM_FAULT_WRITE;
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index e59e08ef46e1..a4f48189300a 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1668,12 +1668,8 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
> * to it. And as a side-effect, free up its swap: because the old content
> * on disk will never be read, and seeking back there to write new content
> * later would only waste time away from clustering.
> - *
> - * NOTE: total_map_swapcount should not be relied upon by the caller if
> - * reuse_swap_page() returns false, but it may be always overwritten
> - * (see the other implementation for CONFIG_SWAP=n).
> */
> -bool reuse_swap_page(struct page *page, int *total_map_swapcount)
> +bool reuse_swap_page(struct page *page)
> {
> int count, total_mapcount, total_swapcount;
>
> @@ -1682,8 +1678,6 @@ bool reuse_swap_page(struct page *page, int *total_map_swapcount)
> return false;
> count = page_trans_huge_map_swapcount(page, &total_mapcount,
> &total_swapcount);
> - if (total_map_swapcount)
> - *total_map_swapcount = total_mapcount + total_swapcount;
> if (count == 1 && PageSwapCache(page) &&
> (likely(!PageTransCompound(page)) ||
> /* The remaining swap count will be freed soon */
> --
> 2.33.0
>
>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-12-21 15:57 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-20 20:59 [PATCH 1/3] mm: Remove last argument of reuse_swap_page() Matthew Wilcox (Oracle)
2021-12-20 20:59 ` [PATCH 2/3] mm: Remove the total_mapcount argument from page_trans_huge_map_swapcount() Matthew Wilcox (Oracle)
2021-12-20 20:59 ` [PATCH 3/3] mm: Remove the total_mapcount argument from page_trans_huge_mapcount() Matthew Wilcox (Oracle)
2021-12-21 15:56 ` [PATCH 1/3] mm: Remove last argument of reuse_swap_page() William Kucharski
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.