All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: "Darrick J. Wong" <djwong@kernel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-xfs@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-block@vger.kernel.org,
	Jens Axboe <axboe@kernel.dk>,
	Christoph Hellwig <hch@infradead.org>
Subject: [PATCH 11/21] iomap: Use folio offsets instead of page offsets
Date: Mon,  1 Nov 2021 20:39:19 +0000	[thread overview]
Message-ID: <20211101203929.954622-12-willy@infradead.org> (raw)
In-Reply-To: <20211101203929.954622-1-willy@infradead.org>

Pass a folio around instead of the page, and make sure the offset
is relative to the start of the folio instead of the start of a page.
Also use size_t for offset & length to make it clear that these are byte
counts, and to support >2GB folios in the future.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
---
 fs/iomap/buffered-io.c | 79 ++++++++++++++++++++++--------------------
 1 file changed, 41 insertions(+), 38 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index d519972a11f1..dea577380215 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -75,18 +75,18 @@ static void iomap_page_release(struct folio *folio)
 }
 
 /*
- * Calculate the range inside the page that we actually need to read.
+ * Calculate the range inside the folio that we actually need to read.
  */
-static void
-iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
-		loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+		loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
 {
+	struct iomap_page *iop = to_iomap_page(folio);
 	loff_t orig_pos = *pos;
 	loff_t isize = i_size_read(inode);
 	unsigned block_bits = inode->i_blkbits;
 	unsigned block_size = (1 << block_bits);
-	unsigned poff = offset_in_page(*pos);
-	unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+	size_t poff = offset_in_folio(folio, *pos);
+	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
 	unsigned first = poff >> block_bits;
 	unsigned last = (poff + plen - 1) >> block_bits;
 
@@ -124,7 +124,7 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
 	 * page cache for blocks that are entirely outside of i_size.
 	 */
 	if (orig_pos <= isize && orig_pos + length > isize) {
-		unsigned end = offset_in_page(isize - 1) >> block_bits;
+		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
 
 		if (first <= end && last > end)
 			plen -= (last - end) * block_size;
@@ -134,31 +134,31 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
 	*lenp = plen;
 }
 
-static void iomap_iop_set_range_uptodate(struct page *page,
-		struct iomap_page *iop, unsigned off, unsigned len)
+static void iomap_iop_set_range_uptodate(struct folio *folio,
+		struct iomap_page *iop, size_t off, size_t len)
 {
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = folio->mapping->host;
 	unsigned first = off >> inode->i_blkbits;
 	unsigned last = (off + len - 1) >> inode->i_blkbits;
 	unsigned long flags;
 
 	spin_lock_irqsave(&iop->uptodate_lock, flags);
 	bitmap_set(iop->uptodate, first, last - first + 1);
-	if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
-		SetPageUptodate(page);
+	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
+		folio_mark_uptodate(folio);
 	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
 }
 
-static void iomap_set_range_uptodate(struct page *page,
-		struct iomap_page *iop, unsigned off, unsigned len)
+static void iomap_set_range_uptodate(struct folio *folio,
+		struct iomap_page *iop, size_t off, size_t len)
 {
-	if (PageError(page))
+	if (folio_test_error(folio))
 		return;
 
 	if (iop)
-		iomap_iop_set_range_uptodate(page, iop, off, len);
+		iomap_iop_set_range_uptodate(folio, iop, off, len);
 	else
-		SetPageUptodate(page);
+		folio_mark_uptodate(folio);
 }
 
 static void iomap_finish_folio_read(struct folio *folio, size_t offset,
@@ -170,7 +170,7 @@ static void iomap_finish_folio_read(struct folio *folio, size_t offset,
 		folio_clear_uptodate(folio);
 		folio_set_error(folio);
 	} else {
-		iomap_set_range_uptodate(&folio->page, iop, offset, len);
+		iomap_set_range_uptodate(folio, iop, offset, len);
 	}
 
 	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
@@ -202,6 +202,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
 	const struct iomap *iomap = iomap_iter_srcmap(iter);
 	size_t size = i_size_read(iter->inode) - iomap->offset;
 	size_t poff = offset_in_page(iomap->offset);
+	size_t offset = offset_in_folio(folio, iomap->offset);
 	void *addr;
 
 	if (PageUptodate(page))
@@ -214,7 +215,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
 		return -EIO;
 	if (WARN_ON_ONCE(size > iomap->length))
 		return -EIO;
-	if (poff > 0)
+	if (offset > 0)
 		iop = iomap_page_create(iter->inode, folio);
 	else
 		iop = to_iomap_page(folio);
@@ -223,7 +224,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
 	memcpy(addr, iomap->inline_data, size);
 	memset(addr + size, 0, PAGE_SIZE - poff - size);
 	kunmap_local(addr);
-	iomap_set_range_uptodate(page, iop, poff, PAGE_SIZE - poff);
+	iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
 	return PAGE_SIZE - poff;
 }
 
@@ -247,7 +248,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
 	struct folio *folio = page_folio(page);
 	struct iomap_page *iop;
 	loff_t orig_pos = pos;
-	unsigned poff, plen;
+	size_t poff, plen;
 	sector_t sector;
 
 	if (iomap->type == IOMAP_INLINE)
@@ -255,13 +256,13 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
 
 	/* zero post-eof blocks as the page may be mapped */
 	iop = iomap_page_create(iter->inode, folio);
-	iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen);
+	iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
 	if (plen == 0)
 		goto done;
 
 	if (iomap_block_needs_zeroing(iter, pos)) {
-		zero_user(page, poff, plen);
-		iomap_set_range_uptodate(page, iop, poff, plen);
+		zero_user(&folio->page, poff, plen);
+		iomap_set_range_uptodate(folio, iop, poff, plen);
 		goto done;
 	}
 
@@ -272,7 +273,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
 	sector = iomap_sector(iomap, pos);
 	if (!ctx->bio ||
 	    bio_end_sector(ctx->bio) != sector ||
-	    bio_add_page(ctx->bio, page, plen, poff) != plen) {
+	    !bio_add_folio(ctx->bio, folio, plen, poff)) {
 		gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
 		gfp_t orig_gfp = gfp;
 		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -296,8 +297,9 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
 		ctx->bio->bi_iter.bi_sector = sector;
 		bio_set_dev(ctx->bio, iomap->bdev);
 		ctx->bio->bi_end_io = iomap_read_end_io;
-		__bio_add_page(ctx->bio, page, plen, poff);
+		bio_add_folio(ctx->bio, folio, plen, poff);
 	}
+
 done:
 	/*
 	 * Move the caller beyond our range so that it keeps making progress.
@@ -524,9 +526,8 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
 		truncate_pagecache_range(inode, max(pos, i_size), pos + len);
 }
 
-static int
-iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
-		unsigned plen, const struct iomap *iomap)
+static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
+		size_t poff, size_t plen, const struct iomap *iomap)
 {
 	struct bio_vec bvec;
 	struct bio bio;
@@ -535,7 +536,7 @@ iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
 	bio.bi_opf = REQ_OP_READ;
 	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
 	bio_set_dev(&bio, iomap->bdev);
-	__bio_add_page(&bio, page, plen, poff);
+	bio_add_folio(&bio, folio, plen, poff);
 	return submit_bio_wait(&bio);
 }
 
@@ -548,14 +549,15 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 	loff_t block_size = i_blocksize(iter->inode);
 	loff_t block_start = round_down(pos, block_size);
 	loff_t block_end = round_up(pos + len, block_size);
-	unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+	size_t from = offset_in_folio(folio, pos), to = from + len;
+	size_t poff, plen;
 
-	if (PageUptodate(page))
+	if (folio_test_uptodate(folio))
 		return 0;
-	ClearPageError(page);
+	folio_clear_error(folio);
 
 	do {
-		iomap_adjust_read_range(iter->inode, iop, &block_start,
+		iomap_adjust_read_range(iter->inode, folio, &block_start,
 				block_end - block_start, &poff, &plen);
 		if (plen == 0)
 			break;
@@ -568,14 +570,15 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 		if (iomap_block_needs_zeroing(iter, block_start)) {
 			if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
 				return -EIO;
-			zero_user_segments(page, poff, from, to, poff + plen);
+			zero_user_segments(&folio->page, poff, from, to,
+						poff + plen);
 		} else {
-			int status = iomap_read_page_sync(block_start, page,
+			int status = iomap_read_folio_sync(block_start, folio,
 					poff, plen, srcmap);
 			if (status)
 				return status;
 		}
-		iomap_set_range_uptodate(page, iop, poff, plen);
+		iomap_set_range_uptodate(folio, iop, poff, plen);
 	} while ((block_start += plen) < block_end);
 
 	return 0;
@@ -667,7 +670,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 */
 	if (unlikely(copied < len && !PageUptodate(page)))
 		return 0;
-	iomap_set_range_uptodate(page, iop, offset_in_page(pos), len);
+	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
 	__set_page_dirty_nobuffers(page);
 	return copied;
 }
-- 
2.33.0


  parent reply	other threads:[~2021-11-01 21:01 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-01 20:39 [PATCH 00/21] iomap/xfs folio patches Matthew Wilcox (Oracle)
2021-11-01 20:39 ` [PATCH 01/21] fs: Remove FS_THP_SUPPORT Matthew Wilcox (Oracle)
2021-11-02  7:12   ` Christoph Hellwig
2021-11-01 20:39 ` [PATCH 02/21] block: Add bio_add_folio() Matthew Wilcox (Oracle)
2021-11-01 20:51   ` Jens Axboe
2021-11-02 14:15     ` Matthew Wilcox
2021-11-02  7:12   ` Christoph Hellwig
2021-11-03  1:25   ` wangjianjian (C)
2021-11-03  2:22     ` Matthew Wilcox
2021-11-01 20:39 ` [PATCH 03/21] block: Add bio_for_each_folio_all() Matthew Wilcox (Oracle)
2021-11-01 20:51   ` Jens Axboe
2021-11-02  7:13   ` Christoph Hellwig
2021-11-02 20:24     ` Matthew Wilcox
2021-11-02 22:24       ` Darrick J. Wong
2021-11-02 22:33         ` Jens Axboe
2021-11-02 22:37           ` Darrick J. Wong
2021-11-01 20:39 ` [PATCH 04/21] iomap: Convert to_iomap_page to take a folio Matthew Wilcox (Oracle)
2021-11-02  7:13   ` Christoph Hellwig
2021-11-01 20:39 ` [PATCH 05/21] iomap: Convert iomap_page_create " Matthew Wilcox (Oracle)
2021-11-02  7:14   ` Christoph Hellwig
2021-11-01 20:39 ` [PATCH 06/21] iomap: Convert iomap_page_release " Matthew Wilcox (Oracle)
2021-11-01 20:39 ` [PATCH 07/21] iomap: Convert iomap_releasepage to use " Matthew Wilcox (Oracle)
2021-11-02  7:14   ` Christoph Hellwig
2021-11-02 22:39     ` Darrick J. Wong
2021-11-01 20:39 ` [PATCH 08/21] iomap: Add iomap_invalidate_folio Matthew Wilcox (Oracle)
2021-11-02  7:15   ` Christoph Hellwig
2021-11-02 22:36   ` Darrick J. Wong
2021-11-01 20:39 ` [PATCH 09/21] iomap: Pass the iomap_page into iomap_set_range_uptodate Matthew Wilcox (Oracle)
2021-11-02  7:16   ` Christoph Hellwig
2021-11-01 20:39 ` [PATCH 10/21] iomap: Convert bio completions to use folios Matthew Wilcox (Oracle)
2021-11-02  7:17   ` Christoph Hellwig
2021-11-01 20:39 ` Matthew Wilcox (Oracle) [this message]
2021-11-02  7:18   ` [PATCH 11/21] iomap: Use folio offsets instead of page offsets Christoph Hellwig
2021-11-01 20:39 ` [PATCH 12/21] iomap: Convert iomap_read_inline_data to take a folio Matthew Wilcox (Oracle)
2021-11-02  7:18   ` Christoph Hellwig
2021-11-01 20:39 ` [PATCH 13/21] iomap: Convert readahead and readpage to use " Matthew Wilcox (Oracle)
2021-11-02  7:20   ` Christoph Hellwig
2021-11-02 12:28     ` Matthew Wilcox
2021-11-01 20:39 ` [PATCH 14/21] iomap: Convert iomap_page_mkwrite " Matthew Wilcox (Oracle)
2021-11-02  7:21   ` Christoph Hellwig
2021-11-01 20:39 ` [PATCH 15/21] iomap: Convert iomap_write_begin and iomap_write_end to folios Matthew Wilcox (Oracle)
2021-11-02  0:25   ` kernel test robot
2021-11-02  0:25     ` kernel test robot
2021-11-02  7:22   ` Christoph Hellwig
2021-11-02 23:22   ` Darrick J. Wong
2021-11-03  3:15     ` Matthew Wilcox
2021-11-03 12:47       ` Matthew Wilcox
2021-11-01 20:39 ` [PATCH 16/21] iomap: Convert iomap_write_end_inline to take a folio Matthew Wilcox (Oracle)
2021-11-02  7:22   ` Christoph Hellwig
2021-11-01 20:39 ` [PATCH 17/21] iomap,xfs: Convert ->discard_page to ->discard_folio Matthew Wilcox (Oracle)
2021-11-02  7:23   ` Christoph Hellwig
2021-11-03 15:43   ` Darrick J. Wong
2021-11-01 20:39 ` [PATCH 18/21] iomap: Convert iomap_add_to_ioend to take a folio Matthew Wilcox (Oracle)
2021-11-02  7:26   ` Christoph Hellwig
2021-11-02 20:28     ` Matthew Wilcox
2021-11-03 15:54       ` Christoph Hellwig
2021-11-04  3:33         ` Matthew Wilcox
2021-11-04  8:38           ` Christoph Hellwig
2021-11-04  8:40             ` Christoph Hellwig
2021-11-03 16:00       ` Darrick J. Wong
2021-11-04  3:42         ` Matthew Wilcox
2021-11-01 20:39 ` [PATCH 19/21] iomap: Convert iomap_migrate_page to use folios Matthew Wilcox (Oracle)
2021-11-02  7:27   ` Christoph Hellwig
2021-11-03 16:02   ` Darrick J. Wong
2021-11-06  3:44   ` Matthew Wilcox
2021-11-01 20:39 ` [PATCH 20/21] iomap: Support multi-page folios in invalidatepage Matthew Wilcox (Oracle)
2021-11-02  7:27   ` Christoph Hellwig
2021-11-03 16:03   ` Darrick J. Wong
2021-11-01 20:39 ` [PATCH 21/21] xfs: Support multi-page folios Matthew Wilcox (Oracle)
2021-11-02  7:27   ` Christoph Hellwig
2021-11-03 16:07   ` Darrick J. Wong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211101203929.954622-12-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=axboe@kernel.dk \
    --cc=djwong@kernel.org \
    --cc=hch@infradead.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.