linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH v9 76/96] mm/page_alloc: Add __alloc_folio, __alloc_folio_node and alloc_folio
Date: Wed,  5 May 2021 16:06:08 +0100	[thread overview]
Message-ID: <20210505150628.111735-77-willy@infradead.org> (raw)
In-Reply-To: <20210505150628.111735-1-willy@infradead.org>

These wrappers are mostly for type safety, but they also ensure that the
page allocator allocates a compound page and initialises the deferred
list if the page is large enough to have one.  While the new allocation
functions cost 65 bytes of text, they save dozens of bytes of text in
each of their callers, due to not having to call prep_transhuge_page().
Overall, shrinks the kernel by 238 bytes.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/gfp.h | 16 +++++++++++++
 mm/khugepaged.c     | 32 ++++++++++----------------
 mm/mempolicy.c      | 10 ++++++++
 mm/migrate.c        | 56 +++++++++++++++++++++------------------------
 mm/page_alloc.c     | 12 ++++++++++
 5 files changed, 76 insertions(+), 50 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a503d928e684..76086c798cb1 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -511,6 +511,8 @@ static inline void arch_alloc_page(struct page *page, int order) { }
 
 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 		nodemask_t *nodemask);
+struct folio *__alloc_folio(gfp_t gfp, unsigned int order, int preferred_nid,
+		nodemask_t *nodemask);
 
 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
 				nodemask_t *nodemask, int nr_pages,
@@ -543,6 +545,15 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
 	return __alloc_pages(gfp_mask, order, nid, NULL);
 }
 
+static inline
+struct folio *__alloc_folio_node(gfp_t gfp, unsigned int order, int nid)
+{
+	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+	VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
+
+	return __alloc_folio(gfp, order, nid, NULL);
+}
+
 /*
  * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
  * prefer the current CPU's closest node. Otherwise node must be valid and
@@ -559,6 +570,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 
 #ifdef CONFIG_NUMA
 struct page *alloc_pages(gfp_t gfp, unsigned int order);
+struct folio *alloc_folio(gfp_t gfp, unsigned order);
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
 			struct vm_area_struct *vma, unsigned long addr,
 			int node, bool hugepage);
@@ -569,6 +581,10 @@ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
 {
 	return alloc_pages_node(numa_node_id(), gfp_mask, order);
 }
+static inline struct folio *alloc_folio(gfp_t gfp, unsigned int order)
+{
+	return __alloc_folio_node(gfp, order, numa_node_id());
+}
 #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
 	alloc_pages(gfp_mask, order)
 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6c0185fdd815..9dde71607f7c 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -877,18 +877,20 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 static struct page *
 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 {
+	struct folio *folio;
+
 	VM_BUG_ON_PAGE(*hpage, *hpage);
 
-	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
-	if (unlikely(!*hpage)) {
+	folio = __alloc_folio_node(gfp, HPAGE_PMD_ORDER, node);
+	if (unlikely(!folio)) {
 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 		*hpage = ERR_PTR(-ENOMEM);
 		return NULL;
 	}
 
-	prep_transhuge_page(*hpage);
 	count_vm_event(THP_COLLAPSE_ALLOC);
-	return *hpage;
+	*hpage = &folio->page;
+	return &folio->page;
 }
 #else
 static int khugepaged_find_target_node(void)
@@ -896,24 +898,14 @@ static int khugepaged_find_target_node(void)
 	return 0;
 }
 
-static inline struct page *alloc_khugepaged_hugepage(void)
-{
-	struct page *page;
-
-	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
-			   HPAGE_PMD_ORDER);
-	if (page)
-		prep_transhuge_page(page);
-	return page;
-}
-
 static struct page *khugepaged_alloc_hugepage(bool *wait)
 {
-	struct page *hpage;
+	struct folio *folio;
 
 	do {
-		hpage = alloc_khugepaged_hugepage();
-		if (!hpage) {
+		folio = alloc_folio(alloc_hugepage_khugepaged_gfpmask(),
+					HPAGE_PMD_ORDER);
+		if (!folio) {
 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 			if (!*wait)
 				return NULL;
@@ -922,9 +914,9 @@ static struct page *khugepaged_alloc_hugepage(bool *wait)
 			khugepaged_alloc_sleep();
 		} else
 			count_vm_event(THP_COLLAPSE_ALLOC);
-	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
+	} while (unlikely(!folio) && likely(khugepaged_enabled()));
 
-	return hpage;
+	return &folio->page;
 }
 
 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d79fa299b70c..382fec380f28 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2277,6 +2277,16 @@ struct page *alloc_pages(gfp_t gfp, unsigned order)
 }
 EXPORT_SYMBOL(alloc_pages);
 
+struct folio *alloc_folio(gfp_t gfp, unsigned order)
+{
+	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
+
+	if (page && order > 1)
+		prep_transhuge_page(page);
+	return (struct folio *)page;
+}
+EXPORT_SYMBOL(alloc_folio);
+
 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 {
 	struct mempolicy *pol = mpol_dup(vma_policy(src));
diff --git a/mm/migrate.c b/mm/migrate.c
index b234c3f3acb7..0b9cadbad900 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1562,7 +1562,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
 	struct migration_target_control *mtc;
 	gfp_t gfp_mask;
 	unsigned int order = 0;
-	struct page *new_page = NULL;
+	struct folio *new_folio = NULL;
 	int nid;
 	int zidx;
 
@@ -1592,12 +1592,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
 		gfp_mask |= __GFP_HIGHMEM;
 
-	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
+	new_folio = __alloc_folio(gfp_mask, order, nid, mtc->nmask);
 
-	if (new_page && PageTransHuge(new_page))
-		prep_transhuge_page(new_page);
-
-	return new_page;
+	return &new_folio->page;
 }
 
 #ifdef CONFIG_NUMA
@@ -2155,35 +2152,34 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 	spinlock_t *ptl;
 	pg_data_t *pgdat = NODE_DATA(node);
 	int isolated = 0;
-	struct page *new_page = NULL;
+	struct folio *new_folio = NULL;
 	int page_lru = page_is_file_lru(page);
 	unsigned long start = address & HPAGE_PMD_MASK;
 
-	new_page = alloc_pages_node(node,
-		(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
-		HPAGE_PMD_ORDER);
-	if (!new_page)
+	new_folio = __alloc_folio_node(node,
+			(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
+			HPAGE_PMD_ORDER);
+	if (!new_folio)
 		goto out_fail;
-	prep_transhuge_page(new_page);
 
 	isolated = numamigrate_isolate_page(pgdat, page);
 	if (!isolated) {
-		put_page(new_page);
+		folio_put(new_folio);
 		goto out_fail;
 	}
 
 	/* Prepare a page as a migration target */
-	__SetPageLocked(new_page);
+	__folio_set_locked_flag(new_folio);
 	if (PageSwapBacked(page))
-		__SetPageSwapBacked(new_page);
+		__folio_set_swapbacked_flag(new_folio);
 
 	/* anon mapping, we can simply copy page->mapping to the new page: */
-	new_page->mapping = page->mapping;
-	new_page->index = page->index;
+	new_folio->mapping = page->mapping;
+	new_folio->index = page->index;
 	/* flush the cache before copying using the kernel virtual address */
 	flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
-	migrate_page_copy(new_page, page);
-	WARN_ON(PageLRU(new_page));
+	migrate_page_copy(&new_folio->page, page);
+	WARN_ON(folio_lru(new_folio));
 
 	/* Recheck the target PMD */
 	ptl = pmd_lock(mm, pmd);
@@ -2191,13 +2187,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 		spin_unlock(ptl);
 
 		/* Reverse changes made by migrate_page_copy() */
-		if (TestClearPageActive(new_page))
+		if (folio_test_clear_active_flag(new_folio))
 			SetPageActive(page);
-		if (TestClearPageUnevictable(new_page))
+		if (folio_test_clear_unevictable_flag(new_folio))
 			SetPageUnevictable(page);
 
-		unlock_page(new_page);
-		put_page(new_page);		/* Free it */
+		folio_unlock(new_folio);
+		folio_put(new_folio);		/* Free it */
 
 		/* Retake the callers reference and putback on LRU */
 		get_page(page);
@@ -2208,7 +2204,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 		goto out_unlock;
 	}
 
-	entry = mk_huge_pmd(new_page, vma->vm_page_prot);
+	entry = mk_huge_pmd(&new_folio->page, vma->vm_page_prot);
 	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 
 	/*
@@ -2219,7 +2215,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 	 * new page and page_add_new_anon_rmap guarantee the copy is
 	 * visible before the pagetable update.
 	 */
-	page_add_anon_rmap(new_page, vma, start, true);
+	page_add_anon_rmap(&new_folio->page, vma, start, true);
 	/*
 	 * At this point the pmd is numa/protnone (i.e. non present) and the TLB
 	 * has already been flushed globally.  So no TLB can be currently
@@ -2235,17 +2231,17 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 	update_mmu_cache_pmd(vma, address, &entry);
 
 	page_ref_unfreeze(page, 2);
-	mlock_migrate_page(new_page, page);
+	mlock_migrate_page(&new_folio->page, page);
 	page_remove_rmap(page, true);
-	set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
+	set_page_owner_migrate_reason(&new_folio->page, MR_NUMA_MISPLACED);
 
 	spin_unlock(ptl);
 
 	/* Take an "isolate" reference and put new page on the LRU. */
-	get_page(new_page);
-	putback_lru_page(new_page);
+	folio_get(new_folio);
+	putback_lru_page(&new_folio->page);
 
-	unlock_page(new_page);
+	folio_unlock(new_folio);
 	unlock_page(page);
 	put_page(page);			/* Drop the rmap reference */
 	put_page(page);			/* Drop the LRU isolation reference */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5a1e5b624594..6b5d3f993a41 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5225,6 +5225,18 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 }
 EXPORT_SYMBOL(__alloc_pages);
 
+struct folio *__alloc_folio(gfp_t gfp, unsigned int order, int preferred_nid,
+		nodemask_t *nodemask)
+{
+	struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
+			preferred_nid, nodemask);
+
+	if (page && order > 1)
+		prep_transhuge_page(page);
+	return (struct folio *)page;
+}
+EXPORT_SYMBOL(__alloc_folio);
+
 /*
  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
  * address cannot represent highmem pages. Use alloc_pages and then kmap if
-- 
2.30.2


  parent reply	other threads:[~2021-05-05 17:33 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-05 15:04 [PATCH v9 00/96] Memory folios Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 01/96] mm: Optimise nth_page for contiguous memmap Matthew Wilcox (Oracle)
2021-05-05 17:24   ` Vlastimil Babka
2021-05-05 15:04 ` [PATCH v9 02/96] mm: Make __dump_page static Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 03/96] mm/debug: Factor PagePoisoned out of __dump_page Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 04/96] mm/page_owner: Constify dump_page_owner Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 05/96] mm: Make compound_head const-preserving Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 06/96] mm: Constify get_pfnblock_flags_mask and get_pfnblock_migratetype Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 07/96] mm: Constify page_count and page_ref_count Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 08/96] mm: Fix struct page layout on 32-bit systems Matthew Wilcox (Oracle)
2021-05-05 17:33   ` Vlastimil Babka
2021-05-05 15:05 ` [PATCH v9 09/96] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 10/96] mm: Add folio_pgdat and folio_zone Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 11/96] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 12/96] mm/debug: Add VM_BUG_ON_FOLIO and VM_WARN_ON_ONCE_FOLIO Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 13/96] mm: Add folio reference count functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 14/96] mm: Add folio_put Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 15/96] mm: Add folio_get Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 16/96] mm: Add folio flag manipulation functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 17/96] mm: Add folio_young() and folio_idle() Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 18/96] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 19/96] mm/filemap: Add folio_index, folio_file_page and folio_contains Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 20/96] mm/filemap: Add folio_next_index Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 21/96] mm/filemap: Add folio_offset and folio_file_offset Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 22/96] mm/util: Add folio_mapping and folio_file_mapping Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 23/96] mm: Add folio_mapcount Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 24/96] mm/memcg: Add folio wrappers for various functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 25/96] mm/filemap: Add folio_unlock Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 26/96] mm/filemap: Add folio_lock Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 27/96] mm/filemap: Add folio_lock_killable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 28/96] mm/filemap: Add __folio_lock_async Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 29/96] mm/filemap: Add __folio_lock_or_retry Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 30/96] mm/filemap: Add folio_wait_locked Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 31/96] mm/swap: Add folio_rotate_reclaimable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 32/96] mm/filemap: Add folio_end_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 33/96] mm/writeback: Add folio_wait_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 34/96] mm/writeback: Add folio_wait_stable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 35/96] mm/filemap: Add folio_wait_bit Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 36/96] mm/filemap: Add folio_wake_bit Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 37/96] mm/filemap: Convert page wait queues to be folios Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 38/96] mm/filemap: Add folio private_2 functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 39/96] fs/netfs: Add folio fscache functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 40/96] mm: Add folio_mapped Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 41/96] mm/workingset: Convert workingset_activation to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 42/96] mm/swap: Add folio_activate Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 43/96] mm/swap: Add folio_mark_accessed Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 44/96] mm/rmap: Add folio_mkclean Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 45/96] mm: Add kmap_local_folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 46/96] mm: Add flush_dcache_folio Matthew Wilcox (Oracle)
2021-05-05 23:35   ` kernel test robot
2021-05-06  2:33     ` Matthew Wilcox
2021-05-05 15:05 ` [PATCH v9 47/96] mm: Add arch_make_folio_accessible Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 48/96] mm/memcg: Remove 'page' parameter to mem_cgroup_charge_statistics Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 49/96] mm/memcg: Use the node id in mem_cgroup_update_tree Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 50/96] mm/memcg: Convert commit_charge to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 51/96] mm/memcg: Add folio_charge_cgroup Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 52/96] mm/memcg: Add folio_uncharge_cgroup Matthew Wilcox (Oracle)
2021-05-05 20:24   ` kernel test robot
2021-05-05 15:05 ` [PATCH v9 53/96] mm/memcg: Convert mem_cgroup_track_foreign_dirty_slowpath to folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 54/96] mm/writeback: Rename __add_wb_stat to wb_stat_mod Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 55/96] flex_proportions: Allow N events instead of 1 Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 56/96] mm/writeback: Change __wb_writeout_inc to __wb_writeout_add Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 57/96] mm/writeback: Convert test_clear_page_writeback to __folio_end_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 58/96] mm/writeback: Add folio_start_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 59/96] mm/writeback: Add folio_mark_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 60/96] mm/writeback: Use __set_page_dirty in __set_page_dirty_nobuffers Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 61/96] mm/writeback: Add __folio_mark_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 62/96] mm/writeback: Add filemap_dirty_folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 63/96] mm/writeback: Add folio_account_cleaned Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 64/96] mm/writeback: Add folio_cancel_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 65/96] mm/writeback: Add folio_clear_dirty_for_io Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 66/96] mm/writeback: Add folio_account_redirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 67/96] mm/writeback: Add folio_redirty_for_writepage Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 68/96] mm/filemap: Add i_blocks_per_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 69/96] mm/filemap: Add folio_mkwrite_check_truncate Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 70/96] mm/filemap: Add readahead_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 71/96] block: Add bio_add_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 72/96] block: Add bio_for_each_folio_all Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 73/96] mm/lru: Add folio_lru and folio_is_file_lru Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 74/96] mm/workingset: Convert workingset_refault to take a folio Matthew Wilcox (Oracle)
2021-05-05 20:17   ` kernel test robot
2021-05-05 20:57     ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 75/96] mm/lru: Add folio_add_lru Matthew Wilcox (Oracle)
2021-05-05 15:06 ` Matthew Wilcox (Oracle) [this message]
2021-05-05 15:06 ` [PATCH v9 77/96] mm/filemap: Add filemap_alloc_folio Matthew Wilcox (Oracle)
2021-05-06  0:00   ` kernel test robot
2021-05-06  2:28     ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 78/96] mm/filemap: Add folio_add_to_page_cache Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 79/96] mm/filemap: Convert mapping_get_entry to return a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 80/96] mm/filemap: Add filemap_get_folio and find_get_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 81/96] mm/filemap: Add filemap_get_stable_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 82/96] iomap: Convert to_iomap_page to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 83/96] iomap: Convert iomap_page_create " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 84/96] iomap: Convert iomap_page_release " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 85/96] iomap: Convert iomap_releasepage to use " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 86/96] iomap: Convert iomap_invalidatepage " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 87/96] iomap: Pass the iomap_page into iomap_set_range_uptodate Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 88/96] iomap: Use folio offsets instead of page offsets Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 89/96] iomap: Convert bio completions to use folios Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 90/96] iomap: Convert readahead and readpage to use a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 91/96] iomap: Convert iomap_page_mkwrite " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 92/96] iomap: Convert iomap_write_begin and iomap_write_end to folios Matthew Wilcox (Oracle)
2021-05-05 21:36   ` kernel test robot
2021-05-05 22:10     ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 93/96] iomap: Convert iomap_read_inline_data to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 94/96] iomap: Convert iomap_write_end_inline " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 95/96] iomap: Convert iomap_add_to_ioend " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 96/96] iomap: Convert iomap_do_writepage to use " Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210505150628.111735-77-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).