All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: "Darrick J. Wong" <djwong@kernel.org>
Cc: linux-xfs@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, Christoph Hellwig <hch@lst.de>,
	Dan Williams <dan.j.williams@intel.com>,
	Stephen Rothwell <sfr@canb.auug.org.au>
Subject: iomap-folio & nvdimm merge
Date: Tue, 21 Dec 2021 17:01:34 +0000	[thread overview]
Message-ID: <YcIIbtKhOulAL4s4@casper.infradead.org> (raw)
In-Reply-To: <20211216210715.3801857-17-willy@infradead.org>

On Thu, Dec 16, 2021 at 09:07:06PM +0000, Matthew Wilcox (Oracle) wrote:
> The zero iterator can work in folio-sized chunks instead of page-sized
> chunks.  This will save a lot of page cache lookups if the file is cached
> in large folios.

This patch (and a few others) end up conflicting with what Christoph did
that's now in the nvdimm tree.  In an effort to make the merge cleaner,
I took the next-20211220 tag and did the following:

Revert de291b590286
Apply: https://lore.kernel.org/linux-xfs/20211221044450.517558-1-willy@infradead.org/
(these two things are likely to happen in the nvdimm tree imminently)

I then checked out iomap-folio-5.17e and added this patch:

    iomap: Inline __iomap_zero_iter into its caller

    To make the merge easier, replicate the inlining of __iomap_zero_iter()
    into iomap_zero_iter() that is currently in the nvdimm tree.

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index ba80bedd9590..c6b3a148e898 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -895,27 +895,6 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
 }
 EXPORT_SYMBOL_GPL(iomap_file_unshare);
 
-static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
-{
-       struct folio *folio;
-       int status;
-       size_t offset;
-       size_t bytes = min_t(u64, SIZE_MAX, length);
-
-       status = iomap_write_begin(iter, pos, bytes, &folio);
-       if (status)
-               return status;
-
-       offset = offset_in_folio(folio, pos);
-       if (bytes > folio_size(folio) - offset)
-               bytes = folio_size(folio) - offset;
-
-       folio_zero_range(folio, offset, bytes);
-       folio_mark_accessed(folio);
-
-       return iomap_write_end(iter, pos, bytes, bytes, folio);
-}
-
 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
 {
        struct iomap *iomap = &iter->iomap;
@@ -929,14 +908,34 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
                return length;
 
        do {
-               s64 bytes;
+               struct folio *folio;
+               int status;
+               size_t offset;
+               size_t bytes = min_t(u64, SIZE_MAX, length);
+
+               if (IS_DAX(iter->inode)) {
+                       s64 tmp = dax_iomap_zero(pos, bytes, iomap);
+                       if (tmp < 0)
+                               return tmp;
+                       bytes = tmp;
+                       goto good;
+               }
 
-               if (IS_DAX(iter->inode))
-                       bytes = dax_iomap_zero(pos, length, iomap);
-               else
-                       bytes = __iomap_zero_iter(iter, pos, length);
-               if (bytes < 0)
-                       return bytes;
+               status = iomap_write_begin(iter, pos, bytes, &folio);
+               if (status)
+                       return status;
+
+               offset = offset_in_folio(folio, pos);
+               if (bytes > folio_size(folio) - offset)
+                       bytes = folio_size(folio) - offset;
+
+               folio_zero_range(folio, offset, bytes);
+               folio_mark_accessed(folio);
+
+               bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+good:
+               if (WARN_ON_ONCE(bytes == 0))
+                       return -EIO;
 
                pos += bytes;
                length -= bytes;



Then I did the merge, and the merge commit looks pretty sensible
afterwards:

    Merge branch 'iomap-folio-5.17f' into fixup

diff --cc fs/iomap/buffered-io.c
index 955f51f94b3f,c6b3a148e898..c938bbad075e
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@@ -888,19 -908,32 +907,23 @@@ static loff_t iomap_zero_iter(struct io
                return length;

        do {
-               unsigned offset = offset_in_page(pos);
-               size_t bytes = min_t(u64, PAGE_SIZE - offset, length);
-               struct page *page;
+               struct folio *folio;
                int status;
+               size_t offset;
+               size_t bytes = min_t(u64, SIZE_MAX, length);

-               status = iomap_write_begin(iter, pos, bytes, &page);
 -              if (IS_DAX(iter->inode)) {
 -                      s64 tmp = dax_iomap_zero(pos, bytes, iomap);
 -                      if (tmp < 0)
 -                              return tmp;
 -                      bytes = tmp;
 -                      goto good;
 -              }
 -
+               status = iomap_write_begin(iter, pos, bytes, &folio);
                if (status)
                        return status;

-               zero_user(page, offset, bytes);
-               mark_page_accessed(page);
+               offset = offset_in_folio(folio, pos);
+               if (bytes > folio_size(folio) - offset)
+                       bytes = folio_size(folio) - offset;
+
+               folio_zero_range(folio, offset, bytes);
+               folio_mark_accessed(folio);

-               bytes = iomap_write_end(iter, pos, bytes, bytes, page);
+               bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
 -good:
                if (WARN_ON_ONCE(bytes == 0))
                        return -EIO;



Shall I push out a version of this patch series which includes the
"iomap: Inline __iomap_zero_iter into its caller" patch I pasted above?

  reply	other threads:[~2021-12-21 17:01 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-16 21:06 [PATCH v3 00/25] iomap/xfs folio patches Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 01/25] block: Add bio_add_folio() Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 02/25] block: Add bio_for_each_folio_all() Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 03/25] fs/buffer: Convert __block_write_begin_int() to take a folio Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 04/25] iomap: Convert to_iomap_page " Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 05/25] iomap: Convert iomap_page_create " Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 06/25] iomap: Convert iomap_page_release " Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 07/25] iomap: Convert iomap_releasepage to use " Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 08/25] iomap: Add iomap_invalidate_folio Matthew Wilcox (Oracle)
2021-12-16 21:06 ` [PATCH v3 09/25] iomap: Pass the iomap_page into iomap_set_range_uptodate Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 10/25] iomap: Convert bio completions to use folios Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 11/25] iomap: Use folio offsets instead of page offsets Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 12/25] iomap: Convert iomap_read_inline_data to take a folio Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 13/25] iomap: Convert readahead and readpage to use " Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 14/25] iomap: Convert iomap_page_mkwrite " Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 15/25] iomap: Allow iomap_write_begin() to be called with the full length Matthew Wilcox (Oracle)
2021-12-16 21:43   ` Darrick J. Wong
2021-12-16 21:07 ` [PATCH v3 16/25] iomap: Convert __iomap_zero_iter to use a folio Matthew Wilcox (Oracle)
2021-12-21 17:01   ` Matthew Wilcox [this message]
2021-12-21 18:41     ` iomap-folio & nvdimm merge Darrick J. Wong
2021-12-21 18:53       ` Matthew Wilcox
2021-12-21 22:46         ` Stephen Rothwell
2021-12-16 21:07 ` [PATCH v3 17/25] iomap: Convert iomap_write_begin() and iomap_write_end() to folios Matthew Wilcox (Oracle)
2021-12-17  5:25   ` kernel test robot
2021-12-17  6:07   ` kernel test robot
2021-12-17  6:07     ` kernel test robot
2021-12-17  6:07   ` kernel test robot
2021-12-17  6:07     ` kernel test robot
2021-12-16 21:07 ` [PATCH v3 18/25] iomap: Convert iomap_write_end_inline to take a folio Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 19/25] iomap,xfs: Convert ->discard_page to ->discard_folio Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 20/25] iomap: Simplify iomap_writepage_map() Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 21/25] iomap: Simplify iomap_do_writepage() Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 22/25] iomap: Convert iomap_add_to_ioend() to take a folio Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 23/25] iomap: Convert iomap_migrate_page() to use folios Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 24/25] iomap: Support large folios in invalidatepage Matthew Wilcox (Oracle)
2021-12-16 21:07 ` [PATCH v3 25/25] xfs: Support large folios Matthew Wilcox (Oracle)
2022-06-22 23:27   ` Darrick J. Wong
2022-06-23  0:42   ` Darrick J. Wong
2022-06-27  4:15     ` Darrick J. Wong
2022-06-27 14:10       ` Matthew Wilcox
2022-06-27 22:16         ` Darrick J. Wong
2022-06-27 23:35           ` Dave Chinner
2022-06-28  7:31           ` Multi-page folio issues in 5.19-rc4 (was [PATCH v3 25/25] xfs: Support large folios) Dave Chinner
2022-06-28 11:27             ` Matthew Wilcox
2022-06-28 11:31               ` Matthew Wilcox
2022-06-28 13:18                 ` Matthew Wilcox
2022-06-28 20:57                   ` Darrick J. Wong
2022-06-28 22:17                   ` Dave Chinner
2022-06-28 23:21                     ` Darrick J. Wong
2022-06-29 12:57                       ` Brian Foster
2022-06-29 20:22                         ` Darrick J. Wong
2022-07-01 16:03                           ` Brian Foster
2022-07-01 18:03                             ` Darrick J. Wong
2022-08-17  9:36                         ` Dave Chinner
2022-08-17 23:53                           ` Darrick J. Wong
2022-08-18 21:58                             ` Dave Chinner
2022-06-27 22:07       ` [PATCH v3 25/25] xfs: Support large folios Dave Chinner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YcIIbtKhOulAL4s4@casper.infradead.org \
    --to=willy@infradead.org \
    --cc=dan.j.williams@intel.com \
    --cc=djwong@kernel.org \
    --cc=hch@lst.de \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=sfr@canb.auug.org.au \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.