From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
linux-kernel@vger.kernel.org
Subject: [PATCH 59/75] mm/rmap: Convert rmap_walk() to take a folio
Date: Fri, 4 Feb 2022 19:58:36 +0000 [thread overview]
Message-ID: <20220204195852.1751729-60-willy@infradead.org> (raw)
In-Reply-To: <20220204195852.1751729-1-willy@infradead.org>
This ripples all the way through to every calling and called function
from rmap.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/ksm.h | 4 +-
include/linux/rmap.h | 11 ++--
mm/damon/paddr.c | 17 +++---
mm/folio-compat.c | 5 --
mm/huge_memory.c | 2 +-
mm/ksm.c | 12 ++--
mm/migrate.c | 12 ++--
mm/page_idle.c | 9 ++-
mm/rmap.c | 128 ++++++++++++++++++++-----------------------
9 files changed, 91 insertions(+), 109 deletions(-)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index a38a5bca1ba5..0b4f17418f64 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm)
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
-void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
#else /* !CONFIG_KSM */
@@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}
-static inline void rmap_walk_ksm(struct page *page,
+static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 71798112a575..4e4c4412b295 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -268,7 +268,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
/*
* Called by memory-failure.c to kill processes.
*/
-struct anon_vma *page_lock_anon_vma_read(struct page *page);
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
@@ -288,15 +287,15 @@ struct rmap_walk_control {
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
- bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
- int (*done)(struct page *page);
- struct anon_vma *(*anon_lock)(struct page *page);
+ int (*done)(struct folio *folio);
+ struct anon_vma *(*anon_lock)(struct folio *folio);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
-void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
-void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 05e85a131a49..d336eafb74f8 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -16,7 +16,7 @@
#include "../internal.h"
#include "prmtv-common.h"
-static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
+static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct page_vma_mapped_walk pvmw = {
@@ -24,7 +24,7 @@ static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
.address = addr,
};
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte)
@@ -41,7 +41,7 @@ static void damon_pa_mkold(unsigned long paddr)
struct page *page = damon_get_page(PHYS_PFN(paddr));
struct rmap_walk_control rwc = {
.rmap_one = __damon_pa_mkold,
- .anon_lock = page_lock_anon_vma_read,
+ .anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
@@ -58,7 +58,7 @@ static void damon_pa_mkold(unsigned long paddr)
if (need_lock && !folio_trylock(folio))
goto out;
- rmap_walk(&folio->page, &rwc);
+ rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
@@ -91,17 +91,16 @@ struct damon_pa_access_chk_result {
bool accessed;
};
-static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
+static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
- struct folio *folio = page_folio(page);
struct damon_pa_access_chk_result *result = arg;
struct page_vma_mapped_walk pvmw = {
.vma = vma,
.address = addr,
};
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
result->accessed = false;
result->page_sz = PAGE_SIZE;
while (page_vma_mapped_walk(&pvmw)) {
@@ -141,7 +140,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
struct rmap_walk_control rwc = {
.arg = &result,
.rmap_one = __damon_pa_young,
- .anon_lock = page_lock_anon_vma_read,
+ .anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
@@ -164,7 +163,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
return NULL;
}
- rmap_walk(&folio->page, &rwc);
+ rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index e04fba5e45e5..3804fd8c1f20 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -185,8 +185,3 @@ void page_mlock(struct page *page)
{
folio_mlock(page_folio(page));
}
-
-struct anon_vma *page_lock_anon_vma_read(struct page *page)
-{
- return folio_lock_anon_vma_read(page_folio(page));
-}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7a0f4aaf7838..f711dabc9c62 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2601,7 +2601,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
* reference to it and then lock the anon_vma for write. This
- * is similar to page_lock_anon_vma_read except the write lock
+ * is similar to folio_lock_anon_vma_read except the write lock
* is taken to serialise against parallel split or collapse
* operations.
*/
diff --git a/mm/ksm.c b/mm/ksm.c
index 212186dbc89f..0ec3d9035419 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2601,21 +2601,21 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page;
}
-void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
+void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
int search_new_forks = 0;
- VM_BUG_ON_PAGE(!PageKsm(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
/*
* Rely on the page lock to protect against concurrent modifications
* to that page's node of the stable tree.
*/
- VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- stable_node = page_stable_node(page);
+ stable_node = folio_stable_node(folio);
if (!stable_node)
return;
again:
@@ -2650,11 +2650,11 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
- if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
+ if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
anon_vma_unlock_read(anon_vma);
return;
}
- if (rwc->done && rwc->done(page)) {
+ if (rwc->done && rwc->done(folio)) {
anon_vma_unlock_read(anon_vma);
return;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 4daa8298c79a..e9f369a8ee15 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -173,18 +173,16 @@ void putback_movable_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
-static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, void *old)
+static bool remove_migration_pte(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr, void *old)
{
- struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.vma = vma,
.address = addr,
.flags = PVMW_SYNC | PVMW_MIGRATION,
};
- VM_BUG_ON_PAGE(PageTail(page), page);
- pvmw_set_page(&pvmw, old);
+ pvmw_set_folio(&pvmw, old);
while (page_vma_mapped_walk(&pvmw)) {
pte_t pte;
swp_entry_t entry;
@@ -278,9 +276,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
};
if (locked)
- rmap_walk_locked(&dst->page, &rwc);
+ rmap_walk_locked(dst, &rwc);
else
- rmap_walk(&dst->page, &rwc);
+ rmap_walk(dst, &rwc);
}
/*
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 35e53db430df..3563c3850795 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -46,18 +46,17 @@ static struct page *page_idle_get_page(unsigned long pfn)
return page;
}
-static bool page_idle_clear_pte_refs_one(struct page *page,
+static bool page_idle_clear_pte_refs_one(struct folio *folio,
struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
- struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.vma = vma,
.address = addr,
};
bool referenced = false;
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
@@ -97,7 +96,7 @@ static void page_idle_clear_pte_refs(struct page *page)
*/
static const struct rmap_walk_control rwc = {
.rmap_one = page_idle_clear_pte_refs_one,
- .anon_lock = page_lock_anon_vma_read,
+ .anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
@@ -108,7 +107,7 @@ static void page_idle_clear_pte_refs(struct page *page)
if (need_lock && !folio_trylock(folio))
return;
- rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);
+ rmap_walk(folio, (struct rmap_walk_control *)&rwc);
if (need_lock)
folio_unlock(folio);
diff --git a/mm/rmap.c b/mm/rmap.c
index 8bbbbea483cf..1ade44970ab1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -107,15 +107,15 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
VM_BUG_ON(atomic_read(&anon_vma->refcount));
/*
- * Synchronize against page_lock_anon_vma_read() such that
+ * Synchronize against folio_lock_anon_vma_read() such that
* we can safely hold the lock without the anon_vma getting
* freed.
*
* Relies on the full mb implied by the atomic_dec_and_test() from
* put_anon_vma() against the acquire barrier implied by
- * down_read_trylock() from page_lock_anon_vma_read(). This orders:
+ * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
*
- * page_lock_anon_vma_read() VS put_anon_vma()
+ * folio_lock_anon_vma_read() VS put_anon_vma()
* down_read_trylock() atomic_dec_and_test()
* LOCK MB
* atomic_read() rwsem_is_locked()
@@ -168,7 +168,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
* allocate a new one.
*
* Anon-vma allocations are very subtle, because we may have
- * optimistically looked up an anon_vma in page_lock_anon_vma_read()
+ * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
* and that may actually touch the rwsem even in the newly
* allocated vma (it depends on RCU to make sure that the
* anon_vma isn't actually destroyed).
@@ -799,10 +799,9 @@ struct page_referenced_arg {
/*
* arg: page_referenced_arg will be passed
*/
-static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
+static bool page_referenced_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- struct folio *folio = page_folio(page);
struct page_referenced_arg *pra = arg;
struct page_vma_mapped_walk pvmw = {
.vma = vma,
@@ -810,7 +809,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
};
int referenced = 0;
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
while (page_vma_mapped_walk(&pvmw)) {
address = pvmw.address;
@@ -895,7 +894,7 @@ int folio_referenced(struct folio *folio, int is_locked,
struct rmap_walk_control rwc = {
.rmap_one = page_referenced_one,
.arg = (void *)&pra,
- .anon_lock = page_lock_anon_vma_read,
+ .anon_lock = folio_lock_anon_vma_read,
};
*vm_flags = 0;
@@ -920,7 +919,7 @@ int folio_referenced(struct folio *folio, int is_locked,
rwc.invalid_vma = invalid_page_referenced_vma;
}
- rmap_walk(&folio->page, &rwc);
+ rmap_walk(folio, &rwc);
*vm_flags = pra.vm_flags;
if (we_locked)
@@ -929,10 +928,9 @@ int folio_referenced(struct folio *folio, int is_locked,
return pra.referenced;
}
-static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
+static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.vma = vma,
.address = address,
@@ -941,7 +939,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
int *cleaned = arg;
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
/*
* We have to assume the worse case ie pmd for invalidation. Note that
* the folio can not be freed from this function.
@@ -1031,7 +1029,7 @@ int folio_mkclean(struct folio *folio)
if (!mapping)
return 0;
- rmap_walk(&folio->page, &rwc);
+ rmap_walk(folio, &rwc);
return cleaned;
}
@@ -1422,10 +1420,9 @@ void page_remove_rmap(struct page *page, bool compound)
/*
* @arg: enum ttu_flags will be passed to this argument
*/
-static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
.vma = vma,
@@ -1437,7 +1434,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
@@ -1690,9 +1687,9 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
return vma_is_temporary_stack(vma);
}
-static int page_not_mapped(struct page *page)
+static int page_not_mapped(struct folio *folio)
{
- return !page_mapped(page);
+ return !folio_mapped(folio);
}
/**
@@ -1712,13 +1709,13 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags)
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
.done = page_not_mapped,
- .anon_lock = page_lock_anon_vma_read,
+ .anon_lock = folio_lock_anon_vma_read,
};
if (flags & TTU_RMAP_LOCKED)
- rmap_walk_locked(&folio->page, &rwc);
+ rmap_walk_locked(folio, &rwc);
else
- rmap_walk(&folio->page, &rwc);
+ rmap_walk(folio, &rwc);
}
/*
@@ -1727,10 +1724,9 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags)
* If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
* containing migration entries.
*/
-static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
+static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
.vma = vma,
@@ -1742,7 +1738,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
@@ -1976,7 +1972,7 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
.rmap_one = try_to_migrate_one,
.arg = (void *)flags,
.done = page_not_mapped,
- .anon_lock = page_lock_anon_vma_read,
+ .anon_lock = folio_lock_anon_vma_read,
};
/*
@@ -2002,25 +1998,24 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
rwc.invalid_vma = invalid_migration_vma;
if (flags & TTU_RMAP_LOCKED)
- rmap_walk_locked(&folio->page, &rwc);
+ rmap_walk_locked(folio, &rwc);
else
- rmap_walk(&folio->page, &rwc);
+ rmap_walk(folio, &rwc);
}
/*
* Walks the vma's mapping a page and mlocks the page if any locked vma's are
* found. Once one is found the page is locked and the scan can be terminated.
*/
-static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
+static bool page_mlock_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *unused)
{
- struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.vma = vma,
.address = address,
};
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
/* An un-locked vma doesn't have any pages to lock, continue the scan */
if (!(vma->vm_flags & VM_LOCKED))
return true;
@@ -2064,7 +2059,7 @@ void folio_mlock(struct folio *folio)
struct rmap_walk_control rwc = {
.rmap_one = page_mlock_one,
.done = page_not_mapped,
- .anon_lock = page_lock_anon_vma_read,
+ .anon_lock = folio_lock_anon_vma_read,
};
@@ -2077,7 +2072,7 @@ void folio_mlock(struct folio *folio)
if (folio_test_large(folio) && folio_test_anon(folio))
return;
- rmap_walk(&folio->page, &rwc);
+ rmap_walk(folio, &rwc);
}
#ifdef CONFIG_DEVICE_PRIVATE
@@ -2088,10 +2083,9 @@ struct make_exclusive_args {
bool valid;
};
-static bool page_make_device_exclusive_one(struct page *page,
+static bool page_make_device_exclusive_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *priv)
{
- struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
.vma = vma,
@@ -2105,7 +2099,7 @@ static bool page_make_device_exclusive_one(struct page *page,
swp_entry_t entry;
pte_t swp_pte;
- pvmw_set_page(&pvmw, page);
+ pvmw_set_folio(&pvmw, folio);
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
vma->vm_mm, address, min(vma->vm_end,
address + folio_size(folio)),
@@ -2199,7 +2193,7 @@ static bool folio_make_device_exclusive(struct folio *folio,
struct rmap_walk_control rwc = {
.rmap_one = page_make_device_exclusive_one,
.done = page_not_mapped,
- .anon_lock = page_lock_anon_vma_read,
+ .anon_lock = folio_lock_anon_vma_read,
.arg = &args,
};
@@ -2210,7 +2204,7 @@ static bool folio_make_device_exclusive(struct folio *folio,
if (!folio_test_anon(folio))
return false;
- rmap_walk(&folio->page, &rwc);
+ rmap_walk(folio, &rwc);
return args.valid && !folio_mapcount(folio);
}
@@ -2278,17 +2272,16 @@ void __put_anon_vma(struct anon_vma *anon_vma)
anon_vma_free(root);
}
-static struct anon_vma *rmap_walk_anon_lock(struct page *page,
+static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
struct rmap_walk_control *rwc)
{
- struct folio *folio = page_folio(page);
struct anon_vma *anon_vma;
if (rwc->anon_lock)
- return rwc->anon_lock(page);
+ return rwc->anon_lock(folio);
/*
- * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
+ * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
* because that depends on page_mapped(); but not all its usages
* are holding mmap_lock. Users without mmap_lock are required to
* take a reference count to prevent the anon_vma disappearing
@@ -2315,10 +2308,9 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
*/
-static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
+static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc,
bool locked)
{
- struct folio *folio = page_folio(page);
struct anon_vma *anon_vma;
pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
@@ -2328,17 +2320,17 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
/* anon_vma disappear under us? */
VM_BUG_ON_FOLIO(!anon_vma, folio);
} else {
- anon_vma = rmap_walk_anon_lock(page, rwc);
+ anon_vma = rmap_walk_anon_lock(folio, rwc);
}
if (!anon_vma)
return;
- pgoff_start = page_to_pgoff(page);
- pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
+ pgoff_start = folio_pgoff(folio);
+ pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
- unsigned long address = vma_address(page, vma);
+ unsigned long address = vma_address(&folio->page, vma);
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
@@ -2346,9 +2338,9 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
- if (!rwc->rmap_one(page, vma, address, rwc->arg))
+ if (!rwc->rmap_one(folio, vma, address, rwc->arg))
break;
- if (rwc->done && rwc->done(page))
+ if (rwc->done && rwc->done(folio))
break;
}
@@ -2369,10 +2361,10 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
*/
-static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
+static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc,
bool locked)
{
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping = folio_mapping(folio);
pgoff_t pgoff_start, pgoff_end;
struct vm_area_struct *vma;
@@ -2382,18 +2374,18 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
* structure at mapping cannot be freed and reused yet,
* so we can safely take mapping->i_mmap_rwsem.
*/
- VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (!mapping)
return;
- pgoff_start = page_to_pgoff(page);
- pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
+ pgoff_start = folio_pgoff(folio);
+ pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
if (!locked)
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
- unsigned long address = vma_address(page, vma);
+ unsigned long address = vma_address(&folio->page, vma);
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
@@ -2401,9 +2393,9 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
- if (!rwc->rmap_one(page, vma, address, rwc->arg))
+ if (!rwc->rmap_one(folio, vma, address, rwc->arg))
goto done;
- if (rwc->done && rwc->done(page))
+ if (rwc->done && rwc->done(folio))
goto done;
}
@@ -2412,25 +2404,25 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
i_mmap_unlock_read(mapping);
}
-void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
+void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
{
- if (unlikely(PageKsm(page)))
- rmap_walk_ksm(page, rwc);
- else if (PageAnon(page))
- rmap_walk_anon(page, rwc, false);
+ if (unlikely(folio_test_ksm(folio)))
+ rmap_walk_ksm(folio, rwc);
+ else if (folio_test_anon(folio))
+ rmap_walk_anon(folio, rwc, false);
else
- rmap_walk_file(page, rwc, false);
+ rmap_walk_file(folio, rwc, false);
}
/* Like rmap_walk, but caller holds relevant rmap lock */
-void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
+void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
{
/* no ksm support for now */
- VM_BUG_ON_PAGE(PageKsm(page), page);
- if (PageAnon(page))
- rmap_walk_anon(page, rwc, true);
+ VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
+ if (folio_test_anon(folio))
+ rmap_walk_anon(folio, rwc, true);
else
- rmap_walk_file(page, rwc, true);
+ rmap_walk_file(folio, rwc, true);
}
#ifdef CONFIG_HUGETLB_PAGE
--
2.34.1
next prev parent reply other threads:[~2022-02-04 19:59 UTC|newest]
Thread overview: 115+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-04 19:57 [PATCH 00/75] MM folio patches for 5.18 Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 01/75] mm/gup: Increment the page refcount before the pincount Matthew Wilcox (Oracle)
2022-02-04 21:13 ` John Hubbard
2022-02-04 21:28 ` Matthew Wilcox
2022-02-07 7:45 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 02/75] mm/gup: Remove for_each_compound_range() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 03/75] mm/gup: Remove for_each_compound_head() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 04/75] mm/gup: Change the calling convention for compound_range_next() Matthew Wilcox (Oracle)
2022-02-07 7:45 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 05/75] mm/gup: Optimise compound_range_next() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 06/75] mm/gup: Change the calling convention for compound_next() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 07/75] mm/gup: Fix some contiguous memmap assumptions Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 08/75] mm/gup: Remove an assumption of a contiguous memmap Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 09/75] mm/gup: Handle page split race more efficiently Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 10/75] mm/gup: Remove hpage_pincount_add() Matthew Wilcox (Oracle)
2022-02-04 21:29 ` John Hubbard
2022-02-07 7:46 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 11/75] mm/gup: Remove hpage_pincount_sub() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 12/75] mm: Make compound_pincount always available Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 13/75] mm: Add folio_pincount_ptr() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 14/75] mm: Turn page_maybe_dma_pinned() into folio_maybe_dma_pinned() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 15/75] mm/gup: Add try_get_folio() and try_grab_folio() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 16/75] mm/gup: Convert try_grab_page() to use a folio Matthew Wilcox (Oracle)
2022-02-06 2:12 ` John Hubbard
2022-02-07 7:47 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 17/75] mm: Remove page_cache_add_speculative() and page_cache_get_speculative() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 18/75] mm/gup: Add gup_put_folio() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 19/75] mm/hugetlb: Use try_grab_folio() instead of try_grab_compound_head() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 20/75] mm/gup: Convert gup_pte_range() to use a folio Matthew Wilcox (Oracle)
2022-02-06 14:52 ` Mark Hemment
2022-02-11 20:20 ` Matthew Wilcox
2022-02-04 19:57 ` [PATCH 21/75] mm/gup: Convert gup_hugepte() " Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 22/75] mm/gup: Convert gup_huge_pmd() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 23/75] mm/gup: Convert gup_huge_pud() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 24/75] mm/gup: Convert gup_huge_pgd() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 25/75] mm/gup: Turn compound_next() into gup_folio_next() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 26/75] mm/gup: Turn compound_range_next() into gup_folio_range_next() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 27/75] mm: Turn isolate_lru_page() into folio_isolate_lru() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 28/75] mm/gup: Convert check_and_migrate_movable_pages() to use a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 29/75] mm/workingset: Convert workingset_eviction() to take " Matthew Wilcox (Oracle)
2022-02-07 7:49 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 30/75] mm/memcg: Convert mem_cgroup_swapout() " Matthew Wilcox (Oracle)
2022-02-07 7:49 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 31/75] mm: Add lru_to_folio() Matthew Wilcox (Oracle)
2022-02-07 7:50 ` Christoph Hellwig
2022-02-11 20:24 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 32/75] mm: Turn putback_lru_page() into folio_putback_lru() Matthew Wilcox (Oracle)
2022-02-07 7:50 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 33/75] mm/vmscan: Convert __remove_mapping() to take a folio Matthew Wilcox (Oracle)
2022-02-07 7:51 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 34/75] mm/vmscan: Turn page_check_dirty_writeback() into folio_check_dirty_writeback() Matthew Wilcox (Oracle)
2022-02-07 7:51 ` Christoph Hellwig
2022-02-12 1:49 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 35/75] mm: Turn head_compound_mapcount() into folio_entire_mapcount() Matthew Wilcox (Oracle)
2022-02-07 7:52 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 36/75] mm: Add folio_mapcount() Matthew Wilcox (Oracle)
2022-02-07 7:53 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 37/75] mm: Add split_folio_to_list() Matthew Wilcox (Oracle)
2022-02-07 7:54 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 38/75] mm: Add folio_is_zone_device() and folio_is_device_private() Matthew Wilcox (Oracle)
2022-02-07 7:54 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 39/75] mm: Add folio_pgoff() Matthew Wilcox (Oracle)
2022-02-07 7:55 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 40/75] mm: Add pvmw_set_page() and pvmw_set_folio() Matthew Wilcox (Oracle)
2022-02-07 7:55 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 41/75] hexagon: Add pmd_pfn() Matthew Wilcox (Oracle)
2022-02-06 18:13 ` Mike Rapoport
2022-02-06 20:46 ` Matthew Wilcox
2022-02-06 21:33 ` Mike Rapoport
2022-02-06 22:05 ` Matthew Wilcox
2022-02-07 14:24 ` Mike Rapoport
2022-02-04 19:58 ` [PATCH 42/75] mm: Convert page_vma_mapped_walk to work on PFNs Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 43/75] mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio Matthew Wilcox (Oracle)
2022-02-07 7:57 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 44/75] mm/rmap: Use a folio in page_mkclean_one() Matthew Wilcox (Oracle)
2022-02-07 7:57 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 45/75] mm/rmap: Turn page_referenced() into folio_referenced() Matthew Wilcox (Oracle)
2022-02-07 7:58 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 46/75] mm/mlock: Turn clear_page_mlock() into folio_end_mlock() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 47/75] mm/mlock: Turn mlock_vma_page() into mlock_vma_folio() Matthew Wilcox (Oracle)
2022-02-07 10:46 ` Mike Rapoport
2022-02-04 19:58 ` [PATCH 48/75] mm/rmap: Turn page_mlock() into folio_mlock() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 49/75] mm/mlock: Turn munlock_vma_page() into munlock_vma_folio() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 50/75] mm/huge_memory: Convert __split_huge_pmd() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 51/75] mm/rmap: Convert try_to_unmap() " Matthew Wilcox (Oracle)
2022-02-09 14:24 ` Mauricio Faria de Oliveira
2022-02-09 14:29 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 52/75] mm/rmap: Convert try_to_migrate() to folios Matthew Wilcox (Oracle)
2022-02-09 15:27 ` Zi Yan
2022-02-04 19:58 ` [PATCH 53/75] mm/rmap: Convert make_device_exclusive_range() to use folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 54/75] mm/migrate: Convert remove_migration_ptes() to folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 55/75] mm/damon: Convert damon_pa_mkold() to use a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 56/75] mm/damon: Convert damon_pa_young() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 57/75] mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 58/75] mm: Turn page_anon_vma() into folio_anon_vma() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` Matthew Wilcox (Oracle) [this message]
2022-02-04 19:58 ` [PATCH 60/75] mm/rmap: Constify the rmap_walk_control argument Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 61/75] mm/vmscan: Free non-shmem folios without splitting them Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 62/75] mm/vmscan: Optimise shrink_page_list for non-PMD-sized folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 63/75] mm/vmscan: Account large folios correctly Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 64/75] mm/vmscan: Turn page_check_references() into folio_check_references() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 65/75] mm/vmscan: Convert pageout() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 66/75] mm: Turn can_split_huge_page() into can_split_folio() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 67/75] mm/filemap: Allow large folios to be added to the page cache Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 68/75] mm: Fix READ_ONLY_THP warning Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 69/75] mm: Make large folios depend on THP Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 70/75] mm: Support arbitrary THP sizes Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 71/75] mm/readahead: Add large folio readahead Matthew Wilcox (Oracle)
2022-02-06 13:10 ` Mark Hemment
2022-02-04 19:58 ` [PATCH 72/75] mm/readahead: Align file mappings for non-DAX Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 73/75] mm/readahead: Switch to page_cache_ra_order Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 74/75] mm/filemap: Support VM_HUGEPAGE for file mappings Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 75/75] selftests/vm/transhuge-stress: Support file-backed PMD folios Matthew Wilcox (Oracle)
2022-02-13 22:31 ` [PATCH 00/75] MM folio patches for 5.18 John Hubbard
2022-02-14 4:33 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220204195852.1751729-60-willy@infradead.org \
--to=willy@infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).