All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: "Darrick J . Wong " <djwong@kernel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-xfs@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-block@vger.kernel.org,
	Jens Axboe <axboe@kernel.dk>,
	Christoph Hellwig <hch@infradead.org>,
	Christoph Hellwig <hch@lst.de>
Subject: [PATCH v2 20/28] iomap: Convert iomap_write_begin() and iomap_write_end() to folios
Date: Mon,  8 Nov 2021 04:05:43 +0000	[thread overview]
Message-ID: <20211108040551.1942823-21-willy@infradead.org> (raw)
In-Reply-To: <20211108040551.1942823-1-willy@infradead.org>

These functions still only work in PAGE_SIZE chunks, but there are
fewer conversions from tail to head pages as a result of this patch.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
---
 fs/iomap/buffered-io.c | 66 ++++++++++++++++++++----------------------
 1 file changed, 31 insertions(+), 35 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 9c61d12028ca..f4ae200adc4c 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -539,9 +539,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
 }
 
 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
-		unsigned len, struct page *page)
+		size_t len, struct folio *folio)
 {
-	struct folio *folio = page_folio(page);
 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
 	struct iomap_page *iop = iomap_page_create(iter->inode, folio);
 	loff_t block_size = i_blocksize(iter->inode);
@@ -582,9 +581,8 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 }
 
 static int iomap_write_begin_inline(const struct iomap_iter *iter,
-		struct page *page)
+		struct folio *folio)
 {
-	struct folio *folio = page_folio(page);
 	int ret;
 
 	/* needs more work for the tailpacking case; disable for now */
@@ -597,12 +595,12 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
 }
 
 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
-		unsigned len, struct page **pagep)
+		size_t len, struct folio **foliop)
 {
 	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
-	struct page *page;
 	struct folio *folio;
+	unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
 	int status = 0;
 
 	BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
@@ -618,30 +616,29 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 			return status;
 	}
 
-	page = grab_cache_page_write_begin(iter->inode->i_mapping,
-				pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
-	if (!page) {
+	folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
+			fgp, mapping_gfp_mask(iter->inode->i_mapping));
+	if (!folio) {
 		status = -ENOMEM;
 		goto out_no_page;
 	}
-	folio = page_folio(page);
 
 	if (srcmap->type == IOMAP_INLINE)
-		status = iomap_write_begin_inline(iter, page);
+		status = iomap_write_begin_inline(iter, folio);
 	else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
 		status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
 	else
-		status = __iomap_write_begin(iter, pos, len, page);
+		status = __iomap_write_begin(iter, pos, len, folio);
 
 	if (unlikely(status))
 		goto out_unlock;
 
-	*pagep = page;
+	*foliop = folio;
 	return 0;
 
 out_unlock:
-	unlock_page(page);
-	put_page(page);
+	folio_unlock(folio);
+	folio_put(folio);
 	iomap_write_failed(iter->inode, pos, len);
 
 out_no_page:
@@ -651,11 +648,10 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 }
 
 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
-		size_t copied, struct page *page)
+		size_t copied, struct folio *folio)
 {
-	struct folio *folio = page_folio(page);
 	struct iomap_page *iop = to_iomap_page(folio);
-	flush_dcache_page(page);
+	flush_dcache_folio(folio);
 
 	/*
 	 * The blocks that were entirely written will now be uptodate, so we
@@ -668,10 +664,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 * non-uptodate page as a zero-length write, and force the caller to
 	 * redo the whole thing.
 	 */
-	if (unlikely(copied < len && !PageUptodate(page)))
+	if (unlikely(copied < len && !folio_test_uptodate(folio)))
 		return 0;
 	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
-	__set_page_dirty_nobuffers(page);
+	filemap_dirty_folio(inode->i_mapping, folio);
 	return copied;
 }
 
@@ -695,7 +691,7 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
 
 /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
-		size_t copied, struct page *page)
+		size_t copied, struct folio *folio)
 {
 	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
@@ -706,9 +702,9 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
 		ret = iomap_write_end_inline(iter, page, pos, copied);
 	} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
 		ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
-				copied, page, NULL);
+				copied, &folio->page, NULL);
 	} else {
-		ret = __iomap_write_end(iter->inode, pos, len, copied, page);
+		ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
 	}
 
 	/*
@@ -720,13 +716,13 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
 		i_size_write(iter->inode, pos + ret);
 		iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
 	}
-	unlock_page(page);
+	folio_unlock(folio);
 
 	if (old_size < pos)
 		pagecache_isize_extended(iter->inode, old_size, pos);
 	if (page_ops && page_ops->page_done)
-		page_ops->page_done(iter->inode, pos, ret, page);
-	put_page(page);
+		page_ops->page_done(iter->inode, pos, ret, &folio->page);
+	folio_put(folio);
 
 	if (ret < len)
 		iomap_write_failed(iter->inode, pos, len);
@@ -741,6 +737,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
 	long status = 0;
 
 	do {
+		struct folio *folio;
 		struct page *page;
 		unsigned long offset;	/* Offset into pagecache page */
 		unsigned long bytes;	/* Bytes to write to page */
@@ -764,16 +761,17 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
 			break;
 		}
 
-		status = iomap_write_begin(iter, pos, bytes, &page);
+		status = iomap_write_begin(iter, pos, bytes, &folio);
 		if (unlikely(status))
 			break;
 
+		page = folio_file_page(folio, pos >> PAGE_SHIFT);
 		if (mapping_writably_mapped(iter->inode->i_mapping))
 			flush_dcache_page(page);
 
 		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
 
-		status = iomap_write_end(iter, pos, bytes, copied, page);
+		status = iomap_write_end(iter, pos, bytes, copied, folio);
 
 		if (unlikely(copied != status))
 			iov_iter_revert(i, copied - status);
@@ -839,13 +837,13 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
 	do {
 		unsigned long offset = offset_in_page(pos);
 		unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
-		struct page *page;
+		struct folio *folio;
 
-		status = iomap_write_begin(iter, pos, bytes, &page);
+		status = iomap_write_begin(iter, pos, bytes, &folio);
 		if (unlikely(status))
 			return status;
 
-		status = iomap_write_end(iter, pos, bytes, bytes, page);
+		status = iomap_write_end(iter, pos, bytes, bytes, folio);
 		if (WARN_ON_ONCE(status == 0))
 			return -EIO;
 
@@ -882,21 +880,19 @@ EXPORT_SYMBOL_GPL(iomap_file_unshare);
 static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
 {
 	struct folio *folio;
-	struct page *page;
 	int status;
 	size_t offset, bytes;
 
-	status = iomap_write_begin(iter, pos, length, &page);
+	status = iomap_write_begin(iter, pos, length, &folio);
 	if (status)
 		return status;
-	folio = page_folio(page);
 
 	offset = offset_in_folio(folio, pos);
 	bytes = min_t(u64, folio_size(folio) - offset, length);
 	folio_zero_range(folio, offset, bytes);
 	folio_mark_accessed(folio);
 
-	return iomap_write_end(iter, pos, bytes, bytes, page);
+	return iomap_write_end(iter, pos, bytes, bytes, folio);
 }
 
 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
-- 
2.33.0


  parent reply	other threads:[~2021-11-08  4:57 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-08  4:05 [PATCH v2 00/28] iomap/xfs folio patches Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 01/28] csky,sparc: Declare flush_dcache_folio() Matthew Wilcox (Oracle)
2021-11-09  8:36   ` Christoph Hellwig
2021-11-15 15:54     ` Matthew Wilcox
2021-11-16  6:33       ` Christoph Hellwig
2021-11-16 21:49         ` Matthew Wilcox
2021-11-17  9:52           ` Geert Uytterhoeven
2021-11-08  4:05 ` [PATCH v2 02/28] mm: Add functions to zero portions of a folio Matthew Wilcox (Oracle)
2021-11-09  8:40   ` Christoph Hellwig
2021-11-17  4:45   ` Darrick J. Wong
2021-11-17 14:07     ` Matthew Wilcox
2021-11-17 17:07       ` Darrick J. Wong
2021-11-18 15:55         ` Matthew Wilcox
2021-11-18 17:26           ` Darrick J. Wong
2021-11-18 20:08             ` Matthew Wilcox
2021-11-08  4:05 ` [PATCH v2 03/28] fs: Remove FS_THP_SUPPORT Matthew Wilcox (Oracle)
2021-11-17  4:36   ` Darrick J. Wong
2021-11-08  4:05 ` [PATCH v2 04/28] fs: Rename AS_THP_SUPPORT and mapping_thp_support Matthew Wilcox (Oracle)
2021-11-09  8:41   ` Christoph Hellwig
2021-11-15 16:03     ` Matthew Wilcox
2021-11-16  6:33       ` Christoph Hellwig
2021-11-08  4:05 ` [PATCH v2 05/28] block: Add bio_add_folio() Matthew Wilcox (Oracle)
2021-11-17  4:48   ` Darrick J. Wong
2021-11-08  4:05 ` [PATCH v2 06/28] block: Add bio_for_each_folio_all() Matthew Wilcox (Oracle)
2021-11-17  4:48   ` Darrick J. Wong
2021-11-08  4:05 ` [PATCH v2 07/28] fs/buffer: Convert __block_write_begin_int() to take a folio Matthew Wilcox (Oracle)
2021-11-09  8:42   ` Christoph Hellwig
2021-11-17  4:35   ` Darrick J. Wong
2021-11-08  4:05 ` [PATCH v2 08/28] iomap: Convert to_iomap_page " Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 09/28] iomap: Convert iomap_page_create " Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 10/28] iomap: Convert iomap_page_release " Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 11/28] iomap: Convert iomap_releasepage to use " Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 12/28] iomap: Add iomap_invalidate_folio Matthew Wilcox (Oracle)
2021-11-17  2:20   ` Darrick J. Wong
2021-11-08  4:05 ` [PATCH v2 13/28] iomap: Pass the iomap_page into iomap_set_range_uptodate Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 14/28] iomap: Convert bio completions to use folios Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 15/28] iomap: Use folio offsets instead of page offsets Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 16/28] iomap: Convert iomap_read_inline_data to take a folio Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 17/28] iomap: Convert readahead and readpage to use " Matthew Wilcox (Oracle)
2021-11-09  8:43   ` Christoph Hellwig
2021-11-08  4:05 ` [PATCH v2 18/28] iomap: Convert iomap_page_mkwrite " Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 19/28] iomap: Convert __iomap_zero_iter " Matthew Wilcox (Oracle)
2021-11-09  8:47   ` Christoph Hellwig
2021-11-17  2:24   ` Darrick J. Wong
2021-11-17 14:20     ` Matthew Wilcox
2021-12-09 21:38   ` Matthew Wilcox
2021-12-10 16:19     ` Matthew Wilcox
2021-12-13  7:34       ` Christoph Hellwig
2021-12-13 18:08         ` Matthew Wilcox
2021-12-16 19:36     ` Darrick J. Wong
2021-12-16 20:43       ` Matthew Wilcox
2021-11-08  4:05 ` Matthew Wilcox (Oracle) [this message]
2021-11-17  4:31   ` [PATCH v2 20/28] iomap: Convert iomap_write_begin() and iomap_write_end() to folios Darrick J. Wong
2021-11-17 14:31     ` Matthew Wilcox
2021-11-17 17:10       ` Darrick J. Wong
2021-11-08  4:05 ` [PATCH v2 21/28] iomap: Convert iomap_write_end_inline to take a folio Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 22/28] iomap,xfs: Convert ->discard_page to ->discard_folio Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 23/28] iomap: Simplify iomap_writepage_map() Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 24/28] iomap: Simplify iomap_do_writepage() Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 25/28] iomap: Convert iomap_add_to_ioend() to take a folio Matthew Wilcox (Oracle)
2021-11-17  4:34   ` Darrick J. Wong
2021-11-08  4:05 ` [PATCH v2 26/28] iomap: Convert iomap_migrate_page() to use folios Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 27/28] iomap: Support multi-page folios in invalidatepage Matthew Wilcox (Oracle)
2021-11-08  4:05 ` [PATCH v2 28/28] xfs: Support multi-page folios Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211108040551.1942823-21-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=axboe@kernel.dk \
    --cc=djwong@kernel.org \
    --cc=hch@infradead.org \
    --cc=hch@lst.de \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.