All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: "Darrick J. Wong" <djwong@kernel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-xfs@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, Christoph Hellwig <hch@lst.de>
Subject: [PATCH v3 22/25] iomap: Convert iomap_add_to_ioend() to take a folio
Date: Thu, 16 Dec 2021 21:07:12 +0000	[thread overview]
Message-ID: <20211216210715.3801857-23-willy@infradead.org> (raw)
In-Reply-To: <20211216210715.3801857-1-willy@infradead.org>

We still iterate one block at a time, but now we call compound_head()
less often.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
---
 fs/iomap/buffered-io.c | 70 ++++++++++++++++++++----------------------
 1 file changed, 34 insertions(+), 36 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 8bfdda8e5f1c..a36695db6f9d 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1263,29 +1263,29 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
  * first; otherwise finish off the current ioend and start another.
  */
 static void
-iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
+iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
 		struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
 		struct writeback_control *wbc, struct list_head *iolist)
 {
-	sector_t sector = iomap_sector(&wpc->iomap, offset);
+	sector_t sector = iomap_sector(&wpc->iomap, pos);
 	unsigned len = i_blocksize(inode);
-	unsigned poff = offset & (PAGE_SIZE - 1);
+	size_t poff = offset_in_folio(folio, pos);
 
-	if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
+	if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
 		if (wpc->ioend)
 			list_add(&wpc->ioend->io_list, iolist);
-		wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
+		wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
 	}
 
-	if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) {
+	if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
 		wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
-		__bio_add_page(wpc->ioend->io_bio, page, len, poff);
+		bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
 	}
 
 	if (iop)
 		atomic_add(len, &iop->write_bytes_pending);
 	wpc->ioend->io_size += len;
-	wbc_account_cgroup_owner(wbc, page, len);
+	wbc_account_cgroup_owner(wbc, &folio->page, len);
 }
 
 /*
@@ -1307,9 +1307,8 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
 static int
 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 		struct writeback_control *wbc, struct inode *inode,
-		struct page *page, u64 end_pos)
+		struct folio *folio, u64 end_pos)
 {
-	struct folio *folio = page_folio(page);
 	struct iomap_page *iop = iomap_page_create(inode, folio);
 	struct iomap_ioend *ioend, *next;
 	unsigned len = i_blocksize(inode);
@@ -1336,15 +1335,15 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 			continue;
 		if (wpc->iomap.type == IOMAP_HOLE)
 			continue;
-		iomap_add_to_ioend(inode, pos, page, iop, wpc, wbc,
+		iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
 				 &submit_list);
 		count++;
 	}
 
 	WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
-	WARN_ON_ONCE(!PageLocked(page));
-	WARN_ON_ONCE(PageWriteback(page));
-	WARN_ON_ONCE(PageDirty(page));
+	WARN_ON_ONCE(!folio_test_locked(folio));
+	WARN_ON_ONCE(folio_test_writeback(folio));
+	WARN_ON_ONCE(folio_test_dirty(folio));
 
 	/*
 	 * We cannot cancel the ioend directly here on error.  We may have
@@ -1362,14 +1361,14 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 		if (wpc->ops->discard_folio)
 			wpc->ops->discard_folio(folio, pos);
 		if (!count) {
-			ClearPageUptodate(page);
-			unlock_page(page);
+			folio_clear_uptodate(folio);
+			folio_unlock(folio);
 			goto done;
 		}
 	}
 
-	set_page_writeback(page);
-	unlock_page(page);
+	folio_start_writeback(folio);
+	folio_unlock(folio);
 
 	/*
 	 * Preserve the original error if there was one; catch
@@ -1390,9 +1389,9 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	 * with a partial page truncate on a sub-page block sized filesystem.
 	 */
 	if (!count)
-		end_page_writeback(page);
+		folio_end_writeback(folio);
 done:
-	mapping_set_error(page->mapping, error);
+	mapping_set_error(folio->mapping, error);
 	return error;
 }
 
@@ -1406,14 +1405,15 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 static int
 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 {
+	struct folio *folio = page_folio(page);
 	struct iomap_writepage_ctx *wpc = data;
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = folio->mapping->host;
 	u64 end_pos, isize;
 
-	trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
+	trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
 
 	/*
-	 * Refuse to write the page out if we're called from reclaim context.
+	 * Refuse to write the folio out if we're called from reclaim context.
 	 *
 	 * This avoids stack overflows when called from deeply used stacks in
 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
@@ -1427,10 +1427,10 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 		goto redirty;
 
 	/*
-	 * Is this page beyond the end of the file?
+	 * Is this folio beyond the end of the file?
 	 *
-	 * The page index is less than the end_index, adjust the end_offset
-	 * to the highest offset that this page should represent.
+	 * The folio index is less than the end_index, adjust the end_pos
+	 * to the highest offset that this folio should represent.
 	 * -----------------------------------------------------
 	 * |			file mapping	       | <EOF> |
 	 * -----------------------------------------------------
@@ -1440,7 +1440,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 	 * ---------------------------------^------------------|
 	 */
 	isize = i_size_read(inode);
-	end_pos = page_offset(page) + PAGE_SIZE;
+	end_pos = folio_pos(folio) + folio_size(folio);
 	if (end_pos > isize) {
 		/*
 		 * Check whether the page to write out is beyond or straddles
@@ -1453,7 +1453,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 		 * |				    |      Straddles     |
 		 * ---------------------------------^-----------|--------|
 		 */
-		size_t poff = offset_in_page(isize);
+		size_t poff = offset_in_folio(folio, isize);
 		pgoff_t end_index = isize >> PAGE_SHIFT;
 
 		/*
@@ -1473,8 +1473,8 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 		 * checking if the page is totally beyond i_size or if its
 		 * offset is just equal to the EOF.
 		 */
-		if (page->index > end_index ||
-		    (page->index == end_index && poff == 0))
+		if (folio->index > end_index ||
+		    (folio->index == end_index && poff == 0))
 			goto redirty;
 
 		/*
@@ -1485,17 +1485,15 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 		 * memory is zeroed when mapped, and writes to that region are
 		 * not written out to the file."
 		 */
-		zero_user_segment(page, poff, PAGE_SIZE);
-
-		/* Adjust the end_offset to the end of file */
+		folio_zero_segment(folio, poff, folio_size(folio));
 		end_pos = isize;
 	}
 
-	return iomap_writepage_map(wpc, wbc, inode, page, end_pos);
+	return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
 
 redirty:
-	redirty_page_for_writepage(wbc, page);
-	unlock_page(page);
+	folio_redirty_for_writepage(wbc, folio);
+	folio_unlock(folio);
 	return 0;
 }
 
-- 
2.33.0


  parent reply	other threads:[~2021-12-16 21:08 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-16 21:06 [PATCH v3 00/25] iomap/xfs folio patches Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 01/25] block: Add bio_add_folio() Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 02/25] block: Add bio_for_each_folio_all() Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 03/25] fs/buffer: Convert __block_write_begin_int() to take a folio Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 04/25] iomap: Convert to_iomap_page " Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 05/25] iomap: Convert iomap_page_create " Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 06/25] iomap: Convert iomap_page_release " Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 07/25] iomap: Convert iomap_releasepage to use " Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 08/25] iomap: Add iomap_invalidate_folio Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 09/25] iomap: Pass the iomap_page into iomap_set_range_uptodate Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 10/25] iomap: Convert bio completions to use folios Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 11/25] iomap: Use folio offsets instead of page offsets Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 12/25] iomap: Convert iomap_read_inline_data to take a folio Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 13/25] iomap: Convert readahead and readpage to use " Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 14/25] iomap: Convert iomap_page_mkwrite " Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 15/25] iomap: Allow iomap_write_begin() to be called with the full length Matthew Wilcox (Oracle)
2021-12-16 21:43   ` Darrick J. Wong
2021-12-16 21:07 ` [PATCH v3 16/25] iomap: Convert __iomap_zero_iter to use a folio Matthew Wilcox (Oracle)
2021-12-21 17:01   ` iomap-folio & nvdimm merge Matthew Wilcox
2021-12-21 18:41     ` Darrick J. Wong
2021-12-21 18:53       ` Matthew Wilcox
2021-12-21 22:46         ` Stephen Rothwell
2021-12-16 21:07 ` [PATCH v3 17/25] iomap: Convert iomap_write_begin() and iomap_write_end() to folios Matthew Wilcox (Oracle)
2021-12-17  5:25   ` kernel test robot
2021-12-17  6:07   ` kernel test robot
2021-12-17  6:07     ` kernel test robot
2021-12-17  6:07   ` kernel test robot
2021-12-17  6:07     ` kernel test robot
2021-12-16 21:07 ` [PATCH v3 18/25] iomap: Convert iomap_write_end_inline to take a folio Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 19/25] iomap,xfs: Convert ->discard_page to ->discard_folio Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 20/25] iomap: Simplify iomap_writepage_map() Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 21/25] iomap: Simplify iomap_do_writepage() Matthew Wilcox (Oracle)
2021-12-16 21:07 ` Matthew Wilcox (Oracle) [this message]
2021-12-16 21:07 ` [PATCH v3 23/25] iomap: Convert iomap_migrate_page() to use folios Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 24/25] iomap: Support large folios in invalidatepage Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 25/25] xfs: Support large folios Matthew Wilcox (Oracle)
2022-06-22 23:27   ` Darrick J. Wong
2022-06-23  0:42   ` Darrick J. Wong
2022-06-27  4:15     ` Darrick J. Wong
2022-06-27 14:10       ` Matthew Wilcox
2022-06-27 22:16         ` Darrick J. Wong
2022-06-27 23:35           ` Dave Chinner
2022-06-28  7:31           ` Multi-page folio issues in 5.19-rc4 (was [PATCH v3 25/25] xfs: Support large folios) Dave Chinner
2022-06-28 11:27             ` Matthew Wilcox
2022-06-28 11:31               ` Matthew Wilcox
2022-06-28 13:18                 ` Matthew Wilcox
2022-06-28 20:57                   ` Darrick J. Wong
2022-06-28 22:17                   ` Dave Chinner
2022-06-28 23:21                     ` Darrick J. Wong
2022-06-29 12:57                       ` Brian Foster
2022-06-29 20:22                         ` Darrick J. Wong
2022-07-01 16:03                           ` Brian Foster
2022-07-01 18:03                             ` Darrick J. Wong
2022-08-17  9:36                         ` Dave Chinner
2022-08-17 23:53                           ` Darrick J. Wong
2022-08-18 21:58                             ` Dave Chinner
2022-06-27 22:07       ` [PATCH v3 25/25] xfs: Support large folios Dave Chinner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211216210715.3801857-23-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=djwong@kernel.org \
    --cc=hch@lst.de \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.