From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
linux-kernel@vger.kernel.org
Subject: [PATCH 42/75] mm: Convert page_vma_mapped_walk to work on PFNs
Date: Fri, 4 Feb 2022 19:58:19 +0000 [thread overview]
Message-ID: <20220204195852.1751729-43-willy@infradead.org> (raw)
In-Reply-To: <20220204195852.1751729-1-willy@infradead.org>
page_mapped_in_vma() really just wants to walk one page, but as the
code stands, if passed the head page of a compound page, it will
walk every page in the compound page. Extract pfn/nr_pages/pgoff
from the struct page early, so they can be overridden by
page_mapped_in_vma().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/hugetlb.h | 5 ++++
include/linux/rmap.h | 17 ++++++++----
mm/internal.h | 15 ++++++-----
mm/migrate.c | 2 +-
mm/page_vma_mapped.c | 58 ++++++++++++++++++-----------------------
mm/rmap.c | 8 +++---
6 files changed, 56 insertions(+), 49 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d1897a69c540..6ba2f8e74fbb 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -970,6 +970,11 @@ static inline struct hstate *page_hstate(struct page *page)
return NULL;
}
+static inline struct hstate *size_to_hstate(unsigned long size)
+{
+ return NULL;
+}
+
static inline unsigned long huge_page_size(struct hstate *h)
{
return PAGE_SIZE;
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index e076aca3a203..29ea97c5e96a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -11,6 +11,7 @@
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -200,11 +201,13 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
-/* Look for migarion entries rather than present PTEs */
+/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
struct page_vma_mapped_walk {
- struct page *page;
+ unsigned long pfn;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
struct vm_area_struct *vma;
unsigned long address;
pmd_t *pmd;
@@ -216,19 +219,23 @@ struct page_vma_mapped_walk {
static inline void pvmw_set_page(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
- pvmw->page = page;
+ pvmw->pfn = page_to_pfn(page);
+ pvmw->nr_pages = compound_nr(page);
+ pvmw->pgoff = page_to_pgoff(page);
}
static inline void pvmw_set_folio(struct page_vma_mapped_walk *pvmw,
struct folio *folio)
{
- pvmw->page = &folio->page;
+ pvmw->pfn = folio_pfn(folio);
+ pvmw->nr_pages = folio_nr_pages(folio);
+ pvmw->pgoff = folio_pgoff(folio);
}
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
- if (pvmw->pte && !PageHuge(pvmw->page))
+ if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
diff --git a/mm/internal.h b/mm/internal.h
index b7a2195c12b1..7f1db0f1a8bc 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
+#include <linux/rmap.h>
#include <linux/tracepoint-defs.h>
struct folio_batch;
@@ -459,18 +460,20 @@ vma_address(struct page *page, struct vm_area_struct *vma)
}
/*
- * Then at what user virtual address will none of the page be found in vma?
+ * Then at what user virtual address will none of the range be found in vma?
* Assumes that vma_address() already returned a good starting address.
- * If page is a compound head, the entire compound page is considered.
*/
-static inline unsigned long
-vma_address_end(struct page *page, struct vm_area_struct *vma)
+static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
{
+ struct vm_area_struct *vma = pvmw->vma;
pgoff_t pgoff;
unsigned long address;
- VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
- pgoff = page_to_pgoff(page) + compound_nr(page);
+ /* Common case, plus ->pgoff is invalid for KSM */
+ if (pvmw->nr_pages == 1)
+ return pvmw->address + PAGE_SIZE;
+
+ pgoff = pvmw->pgoff + pvmw->nr_pages;
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
/* Check for address beyond vma (or wrapped through 0?) */
if (address < vma->vm_start || address > vma->vm_end)
diff --git a/mm/migrate.c b/mm/migrate.c
index 07464fd45925..766dc67874a1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -191,7 +191,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
if (PageKsm(page))
new = page;
else
- new = page - pvmw.page->index +
+ new = page - pvmw.pgoff +
linear_page_index(vma, pvmw.address);
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index f7b331081791..1187f9c1ec5b 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -53,18 +53,6 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
return true;
}
-static inline bool pfn_is_match(struct page *page, unsigned long pfn)
-{
- unsigned long page_pfn = page_to_pfn(page);
-
- /* normal page and hugetlbfs page */
- if (!PageTransCompound(page) || PageHuge(page))
- return page_pfn == pfn;
-
- /* THP can be referenced by any subpage */
- return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
-}
-
/**
* check_pte - check if @pvmw->page is mapped at the @pvmw->pte
* @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
@@ -116,7 +104,17 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
pfn = pte_pfn(*pvmw->pte);
}
- return pfn_is_match(pvmw->page, pfn);
+ return (pfn - pvmw->pfn) < pvmw->nr_pages;
+}
+
+/* Returns true if the two ranges overlap. Careful to not overflow. */
+static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
+{
+ if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
+ return false;
+ if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
+ return false;
+ return true;
}
static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
@@ -127,7 +125,7 @@ static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
}
/**
- * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
+ * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
* @pvmw->address
* @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
* must be set. pmd, pte and ptl must be NULL.
@@ -152,8 +150,8 @@ static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
*/
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
{
- struct mm_struct *mm = pvmw->vma->vm_mm;
- struct page *page = pvmw->page;
+ struct vm_area_struct *vma = pvmw->vma;
+ struct mm_struct *mm = vma->vm_mm;
unsigned long end;
pgd_t *pgd;
p4d_t *p4d;
@@ -164,32 +162,26 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (pvmw->pmd && !pvmw->pte)
return not_found(pvmw);
- if (unlikely(PageHuge(page))) {
+ if (unlikely(is_vm_hugetlb_page(vma))) {
+ unsigned long size = pvmw->nr_pages * PAGE_SIZE;
/* The only possible mapping was handled on last iteration */
if (pvmw->pte)
return not_found(pvmw);
/* when pud is not present, pte will be NULL */
- pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
+ pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
if (!pvmw->pte)
return false;
- pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
+ pvmw->ptl = huge_pte_lockptr(size_to_hstate(size), mm,
+ pvmw->pte);
spin_lock(pvmw->ptl);
if (!check_pte(pvmw))
return not_found(pvmw);
return true;
}
- /*
- * Seek to next pte only makes sense for THP.
- * But more important than that optimization, is to filter out
- * any PageKsm page: whose page->index misleads vma_address()
- * and vma_address_end() to disaster.
- */
- end = PageTransCompound(page) ?
- vma_address_end(page, pvmw->vma) :
- pvmw->address + PAGE_SIZE;
+ end = vma_address_end(pvmw);
if (pvmw->pte)
goto next_pte;
restart:
@@ -224,7 +216,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (likely(pmd_trans_huge(pmde))) {
if (pvmw->flags & PVMW_MIGRATION)
return not_found(pvmw);
- if (pmd_page(pmde) != page)
+ if (!check_pmd(pmd_pfn(pmde), pvmw))
return not_found(pvmw);
return true;
}
@@ -236,7 +228,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return not_found(pvmw);
entry = pmd_to_swp_entry(pmde);
if (!is_migration_entry(entry) ||
- pfn_swap_entry_to_page(entry) != page)
+ !check_pmd(swp_offset(entry), pvmw))
return not_found(pvmw);
return true;
}
@@ -250,7 +242,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
* cleared *pmd but not decremented compound_mapcount().
*/
if ((pvmw->flags & PVMW_SYNC) &&
- PageTransCompound(page)) {
+ transparent_hugepage_active(vma) &&
+ (pvmw->nr_pages >= HPAGE_PMD_NR)) {
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
spin_unlock(ptl);
@@ -307,7 +300,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
+ .pfn = page_to_pfn(page),
+ .nr_pages = 1,
.vma = vma,
.flags = PVMW_SYNC,
};
diff --git a/mm/rmap.c b/mm/rmap.c
index fa8478372e94..d62a6fcef318 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -946,7 +946,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, vma, vma->vm_mm, address,
- vma_address_end(page, vma));
+ vma_address_end(&pvmw));
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
@@ -1453,8 +1453,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
- range.end = PageKsm(page) ?
- address + PAGE_SIZE : vma_address_end(page, vma);
+ range.end = vma_address_end(&pvmw);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address, range.end);
if (PageHuge(page)) {
@@ -1757,8 +1756,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
- range.end = PageKsm(page) ?
- address + PAGE_SIZE : vma_address_end(page, vma);
+ range.end = vma_address_end(&pvmw);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address, range.end);
if (PageHuge(page)) {
--
2.34.1
next prev parent reply other threads:[~2022-02-04 20:22 UTC|newest]
Thread overview: 115+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-04 19:57 [PATCH 00/75] MM folio patches for 5.18 Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 01/75] mm/gup: Increment the page refcount before the pincount Matthew Wilcox (Oracle)
2022-02-04 21:13 ` John Hubbard
2022-02-04 21:28 ` Matthew Wilcox
2022-02-07 7:45 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 02/75] mm/gup: Remove for_each_compound_range() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 03/75] mm/gup: Remove for_each_compound_head() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 04/75] mm/gup: Change the calling convention for compound_range_next() Matthew Wilcox (Oracle)
2022-02-07 7:45 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 05/75] mm/gup: Optimise compound_range_next() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 06/75] mm/gup: Change the calling convention for compound_next() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 07/75] mm/gup: Fix some contiguous memmap assumptions Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 08/75] mm/gup: Remove an assumption of a contiguous memmap Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 09/75] mm/gup: Handle page split race more efficiently Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 10/75] mm/gup: Remove hpage_pincount_add() Matthew Wilcox (Oracle)
2022-02-04 21:29 ` John Hubbard
2022-02-07 7:46 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 11/75] mm/gup: Remove hpage_pincount_sub() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 12/75] mm: Make compound_pincount always available Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 13/75] mm: Add folio_pincount_ptr() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 14/75] mm: Turn page_maybe_dma_pinned() into folio_maybe_dma_pinned() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 15/75] mm/gup: Add try_get_folio() and try_grab_folio() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 16/75] mm/gup: Convert try_grab_page() to use a folio Matthew Wilcox (Oracle)
2022-02-06 2:12 ` John Hubbard
2022-02-07 7:47 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 17/75] mm: Remove page_cache_add_speculative() and page_cache_get_speculative() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 18/75] mm/gup: Add gup_put_folio() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 19/75] mm/hugetlb: Use try_grab_folio() instead of try_grab_compound_head() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 20/75] mm/gup: Convert gup_pte_range() to use a folio Matthew Wilcox (Oracle)
2022-02-06 14:52 ` Mark Hemment
2022-02-11 20:20 ` Matthew Wilcox
2022-02-04 19:57 ` [PATCH 21/75] mm/gup: Convert gup_hugepte() " Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 22/75] mm/gup: Convert gup_huge_pmd() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 23/75] mm/gup: Convert gup_huge_pud() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 24/75] mm/gup: Convert gup_huge_pgd() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 25/75] mm/gup: Turn compound_next() into gup_folio_next() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 26/75] mm/gup: Turn compound_range_next() into gup_folio_range_next() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 27/75] mm: Turn isolate_lru_page() into folio_isolate_lru() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 28/75] mm/gup: Convert check_and_migrate_movable_pages() to use a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 29/75] mm/workingset: Convert workingset_eviction() to take " Matthew Wilcox (Oracle)
2022-02-07 7:49 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 30/75] mm/memcg: Convert mem_cgroup_swapout() " Matthew Wilcox (Oracle)
2022-02-07 7:49 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 31/75] mm: Add lru_to_folio() Matthew Wilcox (Oracle)
2022-02-07 7:50 ` Christoph Hellwig
2022-02-11 20:24 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 32/75] mm: Turn putback_lru_page() into folio_putback_lru() Matthew Wilcox (Oracle)
2022-02-07 7:50 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 33/75] mm/vmscan: Convert __remove_mapping() to take a folio Matthew Wilcox (Oracle)
2022-02-07 7:51 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 34/75] mm/vmscan: Turn page_check_dirty_writeback() into folio_check_dirty_writeback() Matthew Wilcox (Oracle)
2022-02-07 7:51 ` Christoph Hellwig
2022-02-12 1:49 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 35/75] mm: Turn head_compound_mapcount() into folio_entire_mapcount() Matthew Wilcox (Oracle)
2022-02-07 7:52 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 36/75] mm: Add folio_mapcount() Matthew Wilcox (Oracle)
2022-02-07 7:53 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 37/75] mm: Add split_folio_to_list() Matthew Wilcox (Oracle)
2022-02-07 7:54 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 38/75] mm: Add folio_is_zone_device() and folio_is_device_private() Matthew Wilcox (Oracle)
2022-02-07 7:54 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 39/75] mm: Add folio_pgoff() Matthew Wilcox (Oracle)
2022-02-07 7:55 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 40/75] mm: Add pvmw_set_page() and pvmw_set_folio() Matthew Wilcox (Oracle)
2022-02-07 7:55 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 41/75] hexagon: Add pmd_pfn() Matthew Wilcox (Oracle)
2022-02-06 18:13 ` Mike Rapoport
2022-02-06 20:46 ` Matthew Wilcox
2022-02-06 21:33 ` Mike Rapoport
2022-02-06 22:05 ` Matthew Wilcox
2022-02-07 14:24 ` Mike Rapoport
2022-02-04 19:58 ` Matthew Wilcox (Oracle) [this message]
2022-02-04 19:58 ` [PATCH 43/75] mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio Matthew Wilcox (Oracle)
2022-02-07 7:57 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 44/75] mm/rmap: Use a folio in page_mkclean_one() Matthew Wilcox (Oracle)
2022-02-07 7:57 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 45/75] mm/rmap: Turn page_referenced() into folio_referenced() Matthew Wilcox (Oracle)
2022-02-07 7:58 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 46/75] mm/mlock: Turn clear_page_mlock() into folio_end_mlock() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 47/75] mm/mlock: Turn mlock_vma_page() into mlock_vma_folio() Matthew Wilcox (Oracle)
2022-02-07 10:46 ` Mike Rapoport
2022-02-04 19:58 ` [PATCH 48/75] mm/rmap: Turn page_mlock() into folio_mlock() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 49/75] mm/mlock: Turn munlock_vma_page() into munlock_vma_folio() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 50/75] mm/huge_memory: Convert __split_huge_pmd() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 51/75] mm/rmap: Convert try_to_unmap() " Matthew Wilcox (Oracle)
2022-02-09 14:24 ` Mauricio Faria de Oliveira
2022-02-09 14:29 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 52/75] mm/rmap: Convert try_to_migrate() to folios Matthew Wilcox (Oracle)
2022-02-09 15:27 ` Zi Yan
2022-02-04 19:58 ` [PATCH 53/75] mm/rmap: Convert make_device_exclusive_range() to use folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 54/75] mm/migrate: Convert remove_migration_ptes() to folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 55/75] mm/damon: Convert damon_pa_mkold() to use a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 56/75] mm/damon: Convert damon_pa_young() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 57/75] mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 58/75] mm: Turn page_anon_vma() into folio_anon_vma() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 59/75] mm/rmap: Convert rmap_walk() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 60/75] mm/rmap: Constify the rmap_walk_control argument Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 61/75] mm/vmscan: Free non-shmem folios without splitting them Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 62/75] mm/vmscan: Optimise shrink_page_list for non-PMD-sized folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 63/75] mm/vmscan: Account large folios correctly Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 64/75] mm/vmscan: Turn page_check_references() into folio_check_references() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 65/75] mm/vmscan: Convert pageout() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 66/75] mm: Turn can_split_huge_page() into can_split_folio() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 67/75] mm/filemap: Allow large folios to be added to the page cache Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 68/75] mm: Fix READ_ONLY_THP warning Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 69/75] mm: Make large folios depend on THP Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 70/75] mm: Support arbitrary THP sizes Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 71/75] mm/readahead: Add large folio readahead Matthew Wilcox (Oracle)
2022-02-06 13:10 ` Mark Hemment
2022-02-04 19:58 ` [PATCH 72/75] mm/readahead: Align file mappings for non-DAX Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 73/75] mm/readahead: Switch to page_cache_ra_order Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 74/75] mm/filemap: Support VM_HUGEPAGE for file mappings Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 75/75] selftests/vm/transhuge-stress: Support file-backed PMD folios Matthew Wilcox (Oracle)
2022-02-13 22:31 ` [PATCH 00/75] MM folio patches for 5.18 John Hubbard
2022-02-14 4:33 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220204195852.1751729-43-willy@infradead.org \
--to=willy@infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).