All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-mm@kvack.org, linux-block@vger.kernel.org
Subject: [PATCH v15 13/17] iomap: Convert iomap_write_begin and iomap_write_end to folios
Date: Mon, 19 Jul 2021 19:39:57 +0100	[thread overview]
Message-ID: <20210719184001.1750630-14-willy@infradead.org> (raw)
In-Reply-To: <20210719184001.1750630-1-willy@infradead.org>

These functions still only work in PAGE_SIZE chunks, but there are
fewer conversions from head to tail pages as a result of this patch.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 68 ++++++++++++++++++++++--------------------
 1 file changed, 36 insertions(+), 32 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index dd05db36e135..4b02337009bc 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -543,9 +543,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
 
 static int
 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
-		struct page *page, struct iomap *srcmap)
+		struct folio *folio, struct iomap *srcmap)
 {
-	struct folio *folio = page_folio(page);
 	struct iomap_page *iop = iomap_page_create(inode, folio);
 	loff_t block_size = i_blocksize(inode);
 	loff_t block_start = round_down(pos, block_size);
@@ -585,12 +584,14 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
 	return 0;
 }
 
-static int
-iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
-		struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
+static int iomap_write_begin(struct inode *inode, loff_t pos, size_t len,
+		unsigned flags, struct folio **foliop, struct iomap *iomap,
+		struct iomap *srcmap)
 {
 	const struct iomap_page_ops *page_ops = iomap->page_ops;
+	struct folio *folio;
 	struct page *page;
+	unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
 	int status = 0;
 
 	BUG_ON(pos + len > iomap->offset + iomap->length);
@@ -606,30 +607,31 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 			return status;
 	}
 
-	page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
-			AOP_FLAG_NOFS);
-	if (!page) {
+	folio = __filemap_get_folio(inode->i_mapping, pos >> PAGE_SHIFT, fgp,
+			mapping_gfp_mask(inode->i_mapping));
+	if (!folio) {
 		status = -ENOMEM;
 		goto out_no_page;
 	}
 
+	page = folio_file_page(folio, pos >> PAGE_SHIFT);
 	if (srcmap->type == IOMAP_INLINE)
 		iomap_read_inline_data(inode, page, srcmap);
 	else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
 		status = __block_write_begin_int(page, pos, len, NULL, srcmap);
 	else
-		status = __iomap_write_begin(inode, pos, len, flags, page,
+		status = __iomap_write_begin(inode, pos, len, flags, folio,
 				srcmap);
 
 	if (unlikely(status))
 		goto out_unlock;
 
-	*pagep = page;
+	*foliop = folio;
 	return 0;
 
 out_unlock:
-	unlock_page(page);
-	put_page(page);
+	folio_unlock(folio);
+	folio_put(folio);
 	iomap_write_failed(inode, pos, len);
 
 out_no_page:
@@ -639,11 +641,10 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 }
 
 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
-		size_t copied, struct page *page)
+		size_t copied, struct folio *folio)
 {
-	struct folio *folio = page_folio(page);
 	struct iomap_page *iop = to_iomap_page(folio);
-	flush_dcache_page(page);
+	flush_dcache_folio(folio);
 
 	/*
 	 * The blocks that were entirely written will now be uptodate, so we
@@ -656,10 +657,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 * uptodate page as a zero-length write, and force the caller to redo
 	 * the whole thing.
 	 */
-	if (unlikely(copied < len && !PageUptodate(page)))
+	if (unlikely(copied < len && !folio_test_uptodate(folio)))
 		return 0;
 	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
-	__set_page_dirty_nobuffers(page);
+	filemap_dirty_folio(inode->i_mapping, folio);
 	return copied;
 }
 
@@ -682,9 +683,10 @@ static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
 
 /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
 static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
-		size_t copied, struct page *page, struct iomap *iomap,
+		size_t copied, struct folio *folio, struct iomap *iomap,
 		struct iomap *srcmap)
 {
+	struct page *page = folio_file_page(folio, pos >> PAGE_SHIFT);
 	const struct iomap_page_ops *page_ops = iomap->page_ops;
 	loff_t old_size = inode->i_size;
 	size_t ret;
@@ -695,7 +697,7 @@ static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 		ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
 				page, NULL);
 	} else {
-		ret = __iomap_write_end(inode, pos, len, copied, page);
+		ret = __iomap_write_end(inode, pos, len, copied, folio);
 	}
 
 	/*
@@ -707,13 +709,13 @@ static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 		i_size_write(inode, pos + ret);
 		iomap->flags |= IOMAP_F_SIZE_CHANGED;
 	}
-	unlock_page(page);
+	folio_unlock(folio);
 
 	if (old_size < pos)
 		pagecache_isize_extended(inode, old_size, pos);
 	if (page_ops && page_ops->page_done)
 		page_ops->page_done(inode, pos, ret, page, iomap);
-	put_page(page);
+	folio_put(folio);
 
 	if (ret < len)
 		iomap_write_failed(inode, pos, len);
@@ -729,6 +731,7 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	ssize_t written = 0;
 
 	do {
+		struct folio *folio;
 		struct page *page;
 		unsigned long offset;	/* Offset into pagecache page */
 		unsigned long bytes;	/* Bytes to write to page */
@@ -752,18 +755,19 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 			break;
 		}
 
-		status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
+		status = iomap_write_begin(inode, pos, bytes, 0, &folio, iomap,
 				srcmap);
 		if (unlikely(status))
 			break;
 
+		page = folio_file_page(folio, pos >> PAGE_SHIFT);
 		if (mapping_writably_mapped(inode->i_mapping))
 			flush_dcache_page(page);
 
 		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
 
-		status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
-				srcmap);
+		status = iomap_write_end(inode, pos, bytes, copied, folio,
+				iomap, srcmap);
 
 		if (unlikely(copied != status))
 			iov_iter_revert(i, copied - status);
@@ -827,14 +831,14 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	do {
 		unsigned long offset = offset_in_page(pos);
 		unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
-		struct page *page;
+		struct folio *folio;
 
 		status = iomap_write_begin(inode, pos, bytes,
-				IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
+				IOMAP_WRITE_F_UNSHARE, &folio, iomap, srcmap);
 		if (unlikely(status))
 			return status;
 
-		status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
+		status = iomap_write_end(inode, pos, bytes, bytes, folio, iomap,
 				srcmap);
 		if (WARN_ON_ONCE(status == 0))
 			return -EIO;
@@ -873,19 +877,19 @@ EXPORT_SYMBOL_GPL(iomap_file_unshare);
 static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
 		struct iomap *iomap, struct iomap *srcmap)
 {
-	struct page *page;
+	struct folio *folio;
 	int status;
 	unsigned offset = offset_in_page(pos);
 	unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
 
-	status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
+	status = iomap_write_begin(inode, pos, bytes, 0, &folio, iomap, srcmap);
 	if (status)
 		return status;
 
-	zero_user(page, offset, bytes);
-	mark_page_accessed(page);
+	zero_user(folio_file_page(folio, pos >> PAGE_SHIFT), offset, bytes);
+	folio_mark_accessed(folio);
 
-	return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
+	return iomap_write_end(inode, pos, bytes, bytes, folio, iomap, srcmap);
 }
 
 static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
-- 
2.30.2


  parent reply	other threads:[~2021-07-19 22:23 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-19 18:39 [PATCH v15 00/17] Folio support in block + iomap layers Matthew Wilcox (Oracle)
2021-07-19 18:39 ` [PATCH v15 01/17] block: Add bio_add_folio() Matthew Wilcox (Oracle)
2021-07-20  1:28   ` Darrick J. Wong
2021-07-20  6:42   ` Christoph Hellwig
2021-07-20 11:16     ` Matthew Wilcox
2021-07-22 16:27     ` Matthew Wilcox
2021-07-30  8:25   ` Ming Lei
2021-07-30 11:30     ` Matthew Wilcox
2021-07-19 18:39 ` [PATCH v15 02/17] block: Add bio_for_each_folio_all() Matthew Wilcox (Oracle)
2021-07-20  1:29   ` Darrick J. Wong
2021-07-20  1:59     ` Matthew Wilcox
2021-07-20  6:48   ` Christoph Hellwig
2021-07-23  2:40     ` Matthew Wilcox
2021-07-20 12:05   ` kernel test robot
2021-07-20 12:05     ` kernel test robot
2021-07-20 12:05   ` kernel test robot
2021-07-20 12:05     ` kernel test robot
2021-07-20 12:26   ` kernel test robot
2021-07-20 12:26     ` kernel test robot
2021-07-19 18:39 ` [PATCH v15 03/17] iomap: Convert to_iomap_page to take a folio Matthew Wilcox (Oracle)
2021-07-20  6:49   ` Christoph Hellwig
2021-07-19 18:39 ` [PATCH v15 04/17] iomap: Convert iomap_page_create " Matthew Wilcox (Oracle)
2021-07-20  6:50   ` Christoph Hellwig
2021-07-19 18:39 ` [PATCH v15 05/17] iomap: Convert iomap_page_release " Matthew Wilcox (Oracle)
2021-07-20  6:52   ` Christoph Hellwig
2021-07-20 11:29     ` Matthew Wilcox
2021-07-20 11:40       ` Christoph Hellwig
2021-07-19 18:39 ` [PATCH v15 06/17] iomap: Convert iomap_releasepage to use " Matthew Wilcox (Oracle)
2021-07-20  6:54   ` Christoph Hellwig
2021-07-20 23:56   ` Darrick J. Wong
2021-07-19 18:39 ` [PATCH v15 07/17] iomap: Convert iomap_invalidatepage " Matthew Wilcox (Oracle)
2021-07-20  6:55   ` Christoph Hellwig
2021-07-20 23:57   ` Darrick J. Wong
2021-07-19 18:39 ` [PATCH v15 08/17] iomap: Pass the iomap_page into iomap_set_range_uptodate Matthew Wilcox (Oracle)
2021-07-20  6:57   ` Christoph Hellwig
2021-07-20 11:36     ` Matthew Wilcox
2021-07-19 18:39 ` [PATCH v15 09/17] iomap: Use folio offsets instead of page offsets Matthew Wilcox (Oracle)
2021-07-20  6:59   ` Christoph Hellwig
2021-07-20 11:42     ` Matthew Wilcox
2021-07-19 18:39 ` [PATCH v15 10/17] iomap: Convert bio completions to use folios Matthew Wilcox (Oracle)
2021-07-20  7:08   ` Christoph Hellwig
2021-07-19 18:39 ` [PATCH v15 11/17] iomap: Convert readahead and readpage to use a folio Matthew Wilcox (Oracle)
2021-07-20  7:09   ` Christoph Hellwig
2021-07-19 18:39 ` [PATCH v15 12/17] iomap: Convert iomap_page_mkwrite " Matthew Wilcox (Oracle)
2021-07-20  7:13   ` Christoph Hellwig
2021-07-19 18:39 ` Matthew Wilcox (Oracle) [this message]
2021-07-20  7:15   ` [PATCH v15 13/17] iomap: Convert iomap_write_begin and iomap_write_end to folios Christoph Hellwig
2021-07-20 23:59   ` Darrick J. Wong
2021-07-19 18:39 ` [PATCH v15 14/17] iomap: Convert iomap_read_inline_data to take a folio Matthew Wilcox (Oracle)
2021-07-20  7:16   ` Christoph Hellwig
2021-07-19 18:39 ` [PATCH v15 15/17] iomap: Convert iomap_write_end_inline " Matthew Wilcox (Oracle)
2021-07-20  7:17   ` Christoph Hellwig
2021-07-19 18:40 ` [PATCH v15 16/17] iomap: Convert iomap_add_to_ioend " Matthew Wilcox (Oracle)
2021-07-20  7:20   ` Christoph Hellwig
2021-07-20 11:45     ` Matthew Wilcox
2021-07-21  0:12   ` Darrick J. Wong
2021-07-21  4:27     ` Christoph Hellwig
2021-07-21  4:31       ` Matthew Wilcox
2021-07-21 15:28         ` Darrick J. Wong
2021-07-19 18:40 ` [PATCH v15 17/17] iomap: Convert iomap_migrate_page to use folios Matthew Wilcox (Oracle)
2021-07-20  7:21   ` Christoph Hellwig
2021-07-20 13:30   ` kernel test robot
2021-07-20 13:30     ` kernel test robot
2021-07-21  0:02   ` Darrick J. Wong
2021-07-20  8:44 ` [PATCH v15 00/17] Folio support in block + iomap layers Christoph Hellwig
2021-07-20 11:08   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210719184001.1750630-14-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.