All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH v9 88/96] iomap: Use folio offsets instead of page offsets
Date: Wed,  5 May 2021 16:06:20 +0100	[thread overview]
Message-ID: <20210505150628.111735-89-willy@infradead.org> (raw)
In-Reply-To: <20210505150628.111735-1-willy@infradead.org>

Pass a folio around instead of the page, and make sure the offset
is relative to the start of the folio instead of the start of a page.
Also use size_t for offset & length to make it clear that these are byte
counts, and to support >2GB folios in the future.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 85 ++++++++++++++++++++++--------------------
 1 file changed, 44 insertions(+), 41 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index f40a22a696c6..b61fc7da3d65 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -75,18 +75,18 @@ static void iomap_page_release(struct folio *folio)
 }
 
 /*
- * Calculate the range inside the page that we actually need to read.
+ * Calculate the range inside the folio that we actually need to read.
  */
-static void
-iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
-		loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+		loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
 {
+	struct iomap_page *iop = to_iomap_page(folio);
 	loff_t orig_pos = *pos;
 	loff_t isize = i_size_read(inode);
 	unsigned block_bits = inode->i_blkbits;
 	unsigned block_size = (1 << block_bits);
-	unsigned poff = offset_in_page(*pos);
-	unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+	size_t poff = offset_in_folio(folio, *pos);
+	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
 	unsigned first = poff >> block_bits;
 	unsigned last = (poff + plen - 1) >> block_bits;
 
@@ -124,7 +124,7 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
 	 * page cache for blocks that are entirely outside of i_size.
 	 */
 	if (orig_pos <= isize && orig_pos + length > isize) {
-		unsigned end = offset_in_page(isize - 1) >> block_bits;
+		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
 
 		if (first <= end && last > end)
 			plen -= (last - end) * block_size;
@@ -134,31 +134,31 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
 	*lenp = plen;
 }
 
-static void iomap_iop_set_range_uptodate(struct page *page,
-		struct iomap_page *iop, unsigned off, unsigned len)
+static void iomap_iop_set_range_uptodate(struct folio *folio,
+		struct iomap_page *iop, size_t off, size_t len)
 {
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = folio->mapping->host;
 	unsigned first = off >> inode->i_blkbits;
 	unsigned last = (off + len - 1) >> inode->i_blkbits;
 	unsigned long flags;
 
 	spin_lock_irqsave(&iop->uptodate_lock, flags);
 	bitmap_set(iop->uptodate, first, last - first + 1);
-	if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
-		SetPageUptodate(page);
+	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
+		folio_mark_uptodate(folio);
 	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
 }
 
-static void iomap_set_range_uptodate(struct page *page,
-		struct iomap_page *iop, unsigned off, unsigned len)
+static void iomap_set_range_uptodate(struct folio *folio,
+		struct iomap_page *iop, size_t off, size_t len)
 {
-	if (PageError(page))
+	if (folio_error(folio))
 		return;
 
 	if (iop)
-		iomap_iop_set_range_uptodate(page, iop, off, len);
+		iomap_iop_set_range_uptodate(folio, iop, off, len);
 	else
-		SetPageUptodate(page);
+		folio_mark_uptodate(folio);
 }
 
 static void
@@ -169,15 +169,17 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
 	struct iomap_page *iop = to_iomap_page(folio);
 
 	if (unlikely(error)) {
-		ClearPageUptodate(page);
-		SetPageError(page);
+		folio_clear_uptodate_flag(folio);
+		folio_set_error_flag(folio);
 	} else {
-		iomap_set_range_uptodate(page, iop, bvec->bv_offset,
-						bvec->bv_len);
+		size_t off = (page - &folio->page) * PAGE_SIZE +
+				bvec->bv_offset;
+
+		iomap_set_range_uptodate(folio, iop, off, bvec->bv_len);
 	}
 
 	if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
-		unlock_page(page);
+		folio_unlock(folio);
 }
 
 static void
@@ -237,7 +239,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	struct iomap_page *iop = iomap_page_create(inode, folio);
 	bool same_page = false, is_contig = false;
 	loff_t orig_pos = pos;
-	unsigned poff, plen;
+	size_t poff, plen;
 	sector_t sector;
 
 	if (iomap->type == IOMAP_INLINE) {
@@ -246,14 +248,14 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		return PAGE_SIZE;
 	}
 
-	/* zero post-eof blocks as the page may be mapped */
-	iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
+	/* zero post-eof blocks as the folio may be mapped */
+	iomap_adjust_read_range(inode, folio, &pos, length, &poff, &plen);
 	if (plen == 0)
 		goto done;
 
 	if (iomap_block_needs_zeroing(inode, iomap, pos)) {
-		zero_user(page, poff, plen);
-		iomap_set_range_uptodate(page, iop, poff, plen);
+		zero_user(&folio->page, poff, plen);
+		iomap_set_range_uptodate(folio, iop, poff, plen);
 		goto done;
 	}
 
@@ -264,7 +266,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	/* Try to merge into a previous segment if we can */
 	sector = iomap_sector(iomap, pos);
 	if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
-		if (__bio_try_merge_page(ctx->bio, page, plen, poff,
+		if (__bio_try_merge_page(ctx->bio, &folio->page, plen, poff,
 				&same_page))
 			goto done;
 		is_contig = true;
@@ -296,7 +298,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		ctx->bio->bi_end_io = iomap_read_end_io;
 	}
 
-	bio_add_page(ctx->bio, page, plen, poff);
+	bio_add_folio(ctx->bio, folio, plen, poff);
 done:
 	/*
 	 * Move the caller beyond our range so that it keeps making progress.
@@ -531,9 +533,8 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
 		truncate_pagecache_range(inode, max(pos, i_size), pos + len);
 }
 
-static int
-iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
-		unsigned plen, struct iomap *iomap)
+static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
+		size_t poff, size_t plen, struct iomap *iomap)
 {
 	struct bio_vec bvec;
 	struct bio bio;
@@ -542,7 +543,7 @@ iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
 	bio.bi_opf = REQ_OP_READ;
 	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
 	bio_set_dev(&bio, iomap->bdev);
-	__bio_add_page(&bio, page, plen, poff);
+	bio_add_folio(&bio, folio, plen, poff);
 	return submit_bio_wait(&bio);
 }
 
@@ -555,14 +556,15 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
 	loff_t block_size = i_blocksize(inode);
 	loff_t block_start = round_down(pos, block_size);
 	loff_t block_end = round_up(pos + len, block_size);
-	unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+	size_t from = offset_in_folio(folio, pos), to = from + len;
+	size_t poff, plen;
 
-	if (PageUptodate(page))
+	if (folio_uptodate(folio))
 		return 0;
-	ClearPageError(page);
+	folio_clear_error_flag(folio);
 
 	do {
-		iomap_adjust_read_range(inode, iop, &block_start,
+		iomap_adjust_read_range(inode, folio, &block_start,
 				block_end - block_start, &poff, &plen);
 		if (plen == 0)
 			break;
@@ -575,14 +577,15 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
 		if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
 			if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
 				return -EIO;
-			zero_user_segments(page, poff, from, to, poff + plen);
+			zero_user_segments(&folio->page, poff, from, to,
+						poff + plen);
 		} else {
-			int status = iomap_read_page_sync(block_start, page,
+			int status = iomap_read_folio_sync(block_start, folio,
 					poff, plen, srcmap);
 			if (status)
 				return status;
 		}
-		iomap_set_range_uptodate(page, iop, poff, plen);
+		iomap_set_range_uptodate(folio, iop, poff, plen);
 	} while ((block_start += plen) < block_end);
 
 	return 0;
@@ -686,7 +689,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 */
 	if (unlikely(copied < len && !PageUptodate(page)))
 		return 0;
-	iomap_set_range_uptodate(page, iop, offset_in_page(pos), len);
+	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
 	iomap_set_page_dirty(page);
 	return copied;
 }
-- 
2.30.2


  parent reply	other threads:[~2021-05-05 17:54 UTC|newest]

Thread overview: 117+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-05 15:04 [PATCH v9 00/96] Memory folios Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 01/96] mm: Optimise nth_page for contiguous memmap Matthew Wilcox (Oracle)
2021-05-05 17:24   ` Vlastimil Babka
2021-05-05 15:04 ` [PATCH v9 02/96] mm: Make __dump_page static Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 03/96] mm/debug: Factor PagePoisoned out of __dump_page Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 04/96] mm/page_owner: Constify dump_page_owner Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 05/96] mm: Make compound_head const-preserving Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 06/96] mm: Constify get_pfnblock_flags_mask and get_pfnblock_migratetype Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 07/96] mm: Constify page_count and page_ref_count Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 08/96] mm: Fix struct page layout on 32-bit systems Matthew Wilcox (Oracle)
2021-05-05 17:33   ` Vlastimil Babka
2021-05-05 15:05 ` [PATCH v9 09/96] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 10/96] mm: Add folio_pgdat and folio_zone Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 11/96] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 12/96] mm/debug: Add VM_BUG_ON_FOLIO and VM_WARN_ON_ONCE_FOLIO Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 13/96] mm: Add folio reference count functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 14/96] mm: Add folio_put Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 15/96] mm: Add folio_get Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 16/96] mm: Add folio flag manipulation functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 17/96] mm: Add folio_young() and folio_idle() Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 18/96] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 19/96] mm/filemap: Add folio_index, folio_file_page and folio_contains Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 20/96] mm/filemap: Add folio_next_index Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 21/96] mm/filemap: Add folio_offset and folio_file_offset Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 22/96] mm/util: Add folio_mapping and folio_file_mapping Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 23/96] mm: Add folio_mapcount Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 24/96] mm/memcg: Add folio wrappers for various functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 25/96] mm/filemap: Add folio_unlock Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 26/96] mm/filemap: Add folio_lock Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 27/96] mm/filemap: Add folio_lock_killable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 28/96] mm/filemap: Add __folio_lock_async Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 29/96] mm/filemap: Add __folio_lock_or_retry Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 30/96] mm/filemap: Add folio_wait_locked Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 31/96] mm/swap: Add folio_rotate_reclaimable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 32/96] mm/filemap: Add folio_end_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 33/96] mm/writeback: Add folio_wait_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 34/96] mm/writeback: Add folio_wait_stable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 35/96] mm/filemap: Add folio_wait_bit Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 36/96] mm/filemap: Add folio_wake_bit Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 37/96] mm/filemap: Convert page wait queues to be folios Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 38/96] mm/filemap: Add folio private_2 functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 39/96] fs/netfs: Add folio fscache functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 40/96] mm: Add folio_mapped Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 41/96] mm/workingset: Convert workingset_activation to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 42/96] mm/swap: Add folio_activate Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 43/96] mm/swap: Add folio_mark_accessed Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 44/96] mm/rmap: Add folio_mkclean Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 45/96] mm: Add kmap_local_folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 46/96] mm: Add flush_dcache_folio Matthew Wilcox (Oracle)
2021-05-05 23:35   ` kernel test robot
2021-05-05 23:35     ` kernel test robot
2021-05-06  2:33     ` Matthew Wilcox
2021-05-06  2:33       ` Matthew Wilcox
2021-05-05 15:05 ` [PATCH v9 47/96] mm: Add arch_make_folio_accessible Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 48/96] mm/memcg: Remove 'page' parameter to mem_cgroup_charge_statistics Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 49/96] mm/memcg: Use the node id in mem_cgroup_update_tree Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 50/96] mm/memcg: Convert commit_charge to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 51/96] mm/memcg: Add folio_charge_cgroup Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 52/96] mm/memcg: Add folio_uncharge_cgroup Matthew Wilcox (Oracle)
2021-05-05 20:24   ` kernel test robot
2021-05-05 20:24     ` kernel test robot
2021-05-05 15:05 ` [PATCH v9 53/96] mm/memcg: Convert mem_cgroup_track_foreign_dirty_slowpath to folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 54/96] mm/writeback: Rename __add_wb_stat to wb_stat_mod Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 55/96] flex_proportions: Allow N events instead of 1 Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 56/96] mm/writeback: Change __wb_writeout_inc to __wb_writeout_add Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 57/96] mm/writeback: Convert test_clear_page_writeback to __folio_end_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 58/96] mm/writeback: Add folio_start_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 59/96] mm/writeback: Add folio_mark_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 60/96] mm/writeback: Use __set_page_dirty in __set_page_dirty_nobuffers Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 61/96] mm/writeback: Add __folio_mark_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 62/96] mm/writeback: Add filemap_dirty_folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 63/96] mm/writeback: Add folio_account_cleaned Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 64/96] mm/writeback: Add folio_cancel_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 65/96] mm/writeback: Add folio_clear_dirty_for_io Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 66/96] mm/writeback: Add folio_account_redirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 67/96] mm/writeback: Add folio_redirty_for_writepage Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 68/96] mm/filemap: Add i_blocks_per_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 69/96] mm/filemap: Add folio_mkwrite_check_truncate Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 70/96] mm/filemap: Add readahead_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 71/96] block: Add bio_add_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 72/96] block: Add bio_for_each_folio_all Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 73/96] mm/lru: Add folio_lru and folio_is_file_lru Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 74/96] mm/workingset: Convert workingset_refault to take a folio Matthew Wilcox (Oracle)
2021-05-05 20:17   ` kernel test robot
2021-05-05 20:17     ` kernel test robot
2021-05-05 20:57     ` Matthew Wilcox
2021-05-05 20:57       ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 75/96] mm/lru: Add folio_add_lru Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 76/96] mm/page_alloc: Add __alloc_folio, __alloc_folio_node and alloc_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 77/96] mm/filemap: Add filemap_alloc_folio Matthew Wilcox (Oracle)
2021-05-06  0:00   ` kernel test robot
2021-05-06  0:00     ` kernel test robot
2021-05-06  2:28     ` Matthew Wilcox
2021-05-06  2:28       ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 78/96] mm/filemap: Add folio_add_to_page_cache Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 79/96] mm/filemap: Convert mapping_get_entry to return a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 80/96] mm/filemap: Add filemap_get_folio and find_get_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 81/96] mm/filemap: Add filemap_get_stable_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 82/96] iomap: Convert to_iomap_page to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 83/96] iomap: Convert iomap_page_create " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 84/96] iomap: Convert iomap_page_release " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 85/96] iomap: Convert iomap_releasepage to use " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 86/96] iomap: Convert iomap_invalidatepage " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 87/96] iomap: Pass the iomap_page into iomap_set_range_uptodate Matthew Wilcox (Oracle)
2021-05-05 15:06 ` Matthew Wilcox (Oracle) [this message]
2021-05-05 15:06 ` [PATCH v9 89/96] iomap: Convert bio completions to use folios Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 90/96] iomap: Convert readahead and readpage to use a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 91/96] iomap: Convert iomap_page_mkwrite " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 92/96] iomap: Convert iomap_write_begin and iomap_write_end to folios Matthew Wilcox (Oracle)
2021-05-05 21:36   ` kernel test robot
2021-05-05 21:36     ` kernel test robot
2021-05-05 22:10     ` Matthew Wilcox
2021-05-05 22:10       ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 93/96] iomap: Convert iomap_read_inline_data to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 94/96] iomap: Convert iomap_write_end_inline " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 95/96] iomap: Convert iomap_add_to_ioend " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 96/96] iomap: Convert iomap_do_writepage to use " Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210505150628.111735-89-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.