All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH v5 17/39] iomap: Support large pages in write paths
Date: Thu, 28 May 2020 19:58:02 -0700	[thread overview]
Message-ID: <20200529025824.32296-18-willy@infradead.org> (raw)
In-Reply-To: <20200529025824.32296-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Use thp_size() instead of PAGE_SIZE and offset_in_thp() instead of
offset_in_page().  Also simplify the logic in iomap_do_writepage()
for determining end of file.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 50 +++++++++++++++++++++++-------------------
 1 file changed, 28 insertions(+), 22 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index bd70b7c1efd0..8767241ae535 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -466,7 +466,7 @@ iomap_is_partially_uptodate(struct page *page, unsigned long from,
 	unsigned i;
 
 	/* Limit range to one page */
-	len = min_t(unsigned, PAGE_SIZE - from, count);
+	len = min_t(unsigned, thp_size(page) - from, count);
 
 	/* First and last blocks in range within page */
 	first = from >> inode->i_blkbits;
@@ -590,7 +590,9 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
 	loff_t block_size = i_blocksize(inode);
 	loff_t block_start = pos & ~(block_size - 1);
 	loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
-	unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+	unsigned from = offset_in_thp(page, pos);
+	unsigned to = from + len;
+	unsigned poff, plen;
 	int status;
 
 	if (PageUptodate(page))
@@ -658,8 +660,8 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 	else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
 		status = __block_write_begin_int(page, pos, len, NULL, srcmap);
 	else
-		status = __iomap_write_begin(inode, pos, len, flags, page,
-				srcmap);
+		status = __iomap_write_begin(inode, pos, len, flags,
+				compound_head(page), srcmap);
 
 	if (unlikely(status))
 		goto out_unlock;
@@ -722,7 +724,7 @@ __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
 	 */
 	if (unlikely(copied < len && !PageUptodate(page)))
 		return 0;
-	iomap_set_range_uptodate(page, offset_in_page(pos), len);
+	iomap_set_range_uptodate(page, offset_in_thp(page, pos), len);
 	iomap_set_page_dirty(page);
 	return copied;
 }
@@ -758,7 +760,8 @@ iomap_write_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied,
 		ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
 				page, NULL);
 	} else {
-		ret = __iomap_write_end(inode, pos, len, copied, page);
+		ret = __iomap_write_end(inode, pos, len, copied,
+				compound_head(page));
 	}
 
 	/*
@@ -797,6 +800,10 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		unsigned long bytes;	/* Bytes to write to page */
 		size_t copied;		/* Bytes copied from user */
 
+		/*
+		 * XXX: We don't know what size page we'll find in the
+		 * page cache, so only copy up to a regular page boundary.
+		 */
 		offset = offset_in_page(pos);
 		bytes = min_t(unsigned long, PAGE_SIZE - offset,
 						iov_iter_count(i));
@@ -1133,7 +1140,7 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
 			next = bio->bi_private;
 
 		/* walk each page on bio, ending page IO on them */
-		bio_for_each_segment_all(bv, bio, iter_all)
+		bio_for_each_thp_segment_all(bv, bio, iter_all)
 			iomap_finish_page_writeback(inode, bv->bv_page, error);
 		bio_put(bio);
 	}
@@ -1339,7 +1346,7 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
 {
 	sector_t sector = iomap_sector(&wpc->iomap, offset);
 	unsigned len = i_blocksize(inode);
-	unsigned poff = offset & (PAGE_SIZE - 1);
+	unsigned poff = offset & (thp_size(page) - 1);
 	bool merged, same_page = false;
 
 	if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
@@ -1389,11 +1396,12 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	struct iomap_page *iop = to_iomap_page(page);
 	struct iomap_ioend *ioend, *next;
 	unsigned len = i_blocksize(inode);
-	u64 file_offset; /* file offset of page */
+	loff_t pos;
 	int error = 0, count = 0, i;
+	int nr_blocks = i_blocks_per_page(inode, page);
 	LIST_HEAD(submit_list);
 
-	WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
+	WARN_ON_ONCE(nr_blocks > 1 && !iop);
 	WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
 
 	/*
@@ -1401,20 +1409,20 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	 * end of the current map or find the current map invalid, grab a new
 	 * one.
 	 */
-	for (i = 0, file_offset = page_offset(page);
-	     i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
-	     i++, file_offset += len) {
+	for (i = 0, pos = page_offset(page);
+	     i < nr_blocks && pos < end_offset;
+	     i++, pos += len) {
 		if (iop && !test_bit(i, iop->uptodate))
 			continue;
 
-		error = wpc->ops->map_blocks(wpc, inode, file_offset);
+		error = wpc->ops->map_blocks(wpc, inode, pos);
 		if (error)
 			break;
 		if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
 			continue;
 		if (wpc->iomap.type == IOMAP_HOLE)
 			continue;
-		iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+		iomap_add_to_ioend(inode, pos, page, iop, wpc, wbc,
 				 &submit_list);
 		count++;
 	}
@@ -1496,7 +1504,6 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 {
 	struct iomap_writepage_ctx *wpc = data;
 	struct inode *inode = page->mapping->host;
-	pgoff_t end_index;
 	u64 end_offset;
 	loff_t offset;
 
@@ -1537,10 +1544,8 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 	 * ---------------------------------^------------------|
 	 */
 	offset = i_size_read(inode);
-	end_index = offset >> PAGE_SHIFT;
-	if (page->index < end_index)
-		end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
-	else {
+	end_offset = page_offset(page) + thp_size(page);
+	if (end_offset > offset) {
 		/*
 		 * Check whether the page to write out is beyond or straddles
 		 * i_size or not.
@@ -1552,7 +1557,8 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 		 * |				    |      Straddles     |
 		 * ---------------------------------^-----------|--------|
 		 */
-		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
+		unsigned offset_into_page = offset_in_thp(page, offset);
+		pgoff_t end_index = offset >> PAGE_SHIFT;
 
 		/*
 		 * Skip the page if it is fully outside i_size, e.g. due to a
@@ -1583,7 +1589,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 		 * memory is zeroed when mapped, and writes to that region are
 		 * not written out to the file."
 		 */
-		zero_user_segment(page, offset_into_page, PAGE_SIZE);
+		zero_user_segment(page, offset_into_page, thp_size(page));
 
 		/* Adjust the end_offset to the end of file */
 		end_offset = offset;
-- 
2.26.2


  parent reply	other threads:[~2020-05-29  2:59 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-29  2:57 [PATCH v5 00/39] Large pages in the page cache Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 01/39] mm: Move PageDoubleMap bit Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 02/39] mm: Simplify PageDoubleMap with PF_SECOND policy Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 03/39] mm: Allow hpages to be arbitrary order Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 04/39] mm: Introduce thp_size Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 05/39] mm: Introduce thp_order Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 06/39] mm: Introduce offset_in_thp Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 07/39] fs: Add a filesystem flag for large pages Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 08/39] fs: Do not update nr_thps for large page mappings Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 09/39] fs: Introduce i_blocks_per_page Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 10/39] fs: Make page_mkwrite_check_truncate thp-aware Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 11/39] fs: Support THPs in zero_user_segments Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 12/39] bio: Add bio_for_each_thp_segment_all Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 13/39] iomap: Support arbitrarily many blocks per page Matthew Wilcox
2020-05-29  2:57 ` [PATCH v5 14/39] iomap: Support large pages in iomap_adjust_read_range Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 15/39] iomap: Support large pages in invalidatepage Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 16/39] iomap: Support large pages in read paths Matthew Wilcox
2020-05-29  2:58 ` Matthew Wilcox [this message]
2020-05-29  2:58 ` [PATCH v5 18/39] iomap: Inline data shouldn't see large pages Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 19/39] iomap: Handle tail pages in iomap_page_mkwrite Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 20/39] xfs: Support large pages Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 21/39] mm: Make prep_transhuge_page return its argument Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 22/39] mm: Add __page_cache_alloc_order Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 23/39] mm: Allow large pages to be added to the page cache Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 24/39] mm: Allow large pages to be removed from " Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 25/39] mm: Remove page fault assumption of compound page size Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 26/39] mm: Fix total_mapcount assumption of " Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 27/39] mm: Remove assumptions of THP size Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 28/39] mm: Avoid splitting large pages Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 29/39] mm: Fix truncation for pages of arbitrary size Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 30/39] mm: Handle truncates that split large pages Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 31/39] mm: Support storing shadow entries for " Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 32/39] mm: Support retrieving tail pages from the page cache Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 33/39] mm: Support tail pages in wait_for_stable_page Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 34/39] mm: Add DEFINE_READAHEAD Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 35/39] mm: Make page_cache_readahead_unbounded take a readahead_control Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 36/39] mm: Make __do_page_cache_readahead " Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 37/39] mm: Allow PageReadahead to be set on head pages Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 38/39] mm: Add large page readahead Matthew Wilcox
2020-05-29  2:58 ` [PATCH v5 39/39] mm: Align THP mappings for non-DAX Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200529025824.32296-18-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.