All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH v2 17/25] iomap: Support large pages in write paths
Date: Tue, 11 Feb 2020 20:18:37 -0800	[thread overview]
Message-ID: <20200212041845.25879-18-willy@infradead.org> (raw)
In-Reply-To: <20200212041845.25879-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Use thp_size() instead of PAGE_SIZE, offset_in_this_page().
Also simplify the logic in iomap_do_writepage() for determining end of
file.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 36 +++++++++++++++++++++---------------
 1 file changed, 21 insertions(+), 15 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 68f8903ecd6d..af1f56408fcd 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -445,7 +445,7 @@ iomap_is_partially_uptodate(struct page *page, unsigned long from,
 	unsigned i;
 
 	/* Limit range to one page */
-	len = min_t(unsigned, PAGE_SIZE - from, count);
+	len = min_t(unsigned, thp_size(page) - from, count);
 
 	/* First and last blocks in range within page */
 	first = from >> inode->i_blkbits;
@@ -488,7 +488,7 @@ iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
 	 * If we are invalidating the entire page, clear the dirty state from it
 	 * and release it to avoid unnecessary buildup of the LRU.
 	 */
-	if (offset == 0 && len == PAGE_SIZE) {
+	if (offset == 0 && len == thp_size(page)) {
 		WARN_ON_ONCE(PageWriteback(page));
 		cancel_dirty_page(page);
 		iomap_page_release(page);
@@ -564,7 +564,9 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
 	loff_t block_size = i_blocksize(inode);
 	loff_t block_start = pos & ~(block_size - 1);
 	loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
-	unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+	unsigned from = offset_in_this_page(page, pos);
+	unsigned to = from + len;
+	unsigned poff, plen;
 	int status;
 
 	if (PageUptodate(page))
@@ -696,7 +698,7 @@ __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
 	 */
 	if (unlikely(copied < len && !PageUptodate(page)))
 		return 0;
-	iomap_set_range_uptodate(page, offset_in_page(pos), len);
+	iomap_set_range_uptodate(page, offset_in_this_page(page, pos), len);
 	iomap_set_page_dirty(page);
 	return copied;
 }
@@ -771,6 +773,10 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		unsigned long bytes;	/* Bytes to write to page */
 		size_t copied;		/* Bytes copied from user */
 
+		/*
+		 * XXX: We don't know what size page we'll find in the
+		 * page cache, so only copy up to a regular page boundary.
+		 */
 		offset = offset_in_page(pos);
 		bytes = min_t(unsigned long, PAGE_SIZE - offset,
 						iov_iter_count(i));
@@ -1320,7 +1326,7 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
 {
 	sector_t sector = iomap_sector(&wpc->iomap, offset);
 	unsigned len = i_blocksize(inode);
-	unsigned poff = offset & (PAGE_SIZE - 1);
+	unsigned poff = offset & (thp_size(page) - 1);
 	bool merged, same_page = false;
 
 	if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
@@ -1372,9 +1378,10 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	unsigned len = i_blocksize(inode);
 	u64 file_offset; /* file offset of page */
 	int error = 0, count = 0, i;
+	int nr_blocks = i_blocks_per_page(inode, page);
 	LIST_HEAD(submit_list);
 
-	WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
+	WARN_ON_ONCE(nr_blocks > 1 && !iop);
 	WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
 
 	/*
@@ -1382,8 +1389,8 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	 * end of the current map or find the current map invalid, grab a new
 	 * one.
 	 */
-	for (i = 0, file_offset = page_offset(page);
-	     i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
+	for (i = 0, file_offset = file_offset_of_page(page);
+	     i < nr_blocks && file_offset < end_offset;
 	     i++, file_offset += len) {
 		if (iop && !test_bit(i, iop->uptodate))
 			continue;
@@ -1477,7 +1484,6 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 {
 	struct iomap_writepage_ctx *wpc = data;
 	struct inode *inode = page->mapping->host;
-	pgoff_t end_index;
 	u64 end_offset;
 	loff_t offset;
 
@@ -1518,10 +1524,9 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 	 * ---------------------------------^------------------|
 	 */
 	offset = i_size_read(inode);
-	end_index = offset >> PAGE_SHIFT;
-	if (page->index < end_index)
-		end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
-	else {
+	end_offset = file_offset_of_next_page(page);
+
+	if (end_offset > offset) {
 		/*
 		 * Check whether the page to write out is beyond or straddles
 		 * i_size or not.
@@ -1533,7 +1538,8 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 		 * |				    |      Straddles     |
 		 * ---------------------------------^-----------|--------|
 		 */
-		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
+		unsigned offset_into_page = offset_in_this_page(page, offset);
+		pgoff_t end_index = offset >> PAGE_SHIFT;
 
 		/*
 		 * Skip the page if it is fully outside i_size, e.g. due to a
@@ -1564,7 +1570,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
 		 * memory is zeroed when mapped, and writes to that region are
 		 * not written out to the file."
 		 */
-		zero_user_segment(page, offset_into_page, PAGE_SIZE);
+		zero_user_segment(page, offset_into_page, thp_size(page));
 
 		/* Adjust the end_offset to the end of file */
 		end_offset = offset;
-- 
2.25.0


  parent reply	other threads:[~2020-02-12  4:18 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-12  4:18 [PATCH v2 00/25] Large pages in the page cache Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 01/25] mm: Use vm_fault error code directly Matthew Wilcox
2020-02-12  7:34   ` Christoph Hellwig
2020-02-12  4:18 ` [PATCH v2 02/25] mm: Optimise find_subpage for !THP Matthew Wilcox
2020-02-12  7:41   ` Christoph Hellwig
2020-02-12 13:02     ` Matthew Wilcox
2020-02-12 17:52       ` Christoph Hellwig
2020-02-13 13:50       ` Kirill A. Shutemov
2020-02-12  4:18 ` [PATCH v2 03/25] mm: Use VM_BUG_ON_PAGE in clear_page_dirty_for_io Matthew Wilcox
2020-02-12  7:38   ` Christoph Hellwig
2020-02-13 13:50   ` Kirill A. Shutemov
2020-02-12  4:18 ` [PATCH v2 04/25] mm: Unexport find_get_entry Matthew Wilcox
2020-02-12  7:37   ` Christoph Hellwig
2020-02-13 13:51   ` Kirill A. Shutemov
2020-02-12  4:18 ` [PATCH v2 05/25] mm: Fix documentation of FGP flags Matthew Wilcox
2020-02-12  7:42   ` Christoph Hellwig
2020-02-12 19:11     ` Matthew Wilcox
2020-02-13 14:00       ` Kirill A. Shutemov
2020-02-13 13:59   ` Kirill A. Shutemov
2020-02-13 14:34     ` Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 06/25] mm: Allow hpages to be arbitrary order Matthew Wilcox
2020-02-13 14:11   ` Kirill A. Shutemov
2020-02-13 14:30     ` Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 07/25] mm: Introduce thp_size Matthew Wilcox
2020-02-13 14:19   ` Kirill A. Shutemov
2020-02-12  4:18 ` [PATCH v2 08/25] mm: Introduce thp_order Matthew Wilcox
2020-02-13 14:20   ` Kirill A. Shutemov
2020-02-12  4:18 ` [PATCH v2 09/25] fs: Add a filesystem flag for large pages Matthew Wilcox
2020-02-12  7:43   ` Christoph Hellwig
2020-02-12 14:59     ` Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 10/25] fs: Introduce i_blocks_per_page Matthew Wilcox
2020-02-12  7:44   ` Christoph Hellwig
2020-02-12 15:05     ` Matthew Wilcox
2020-02-12 17:54       ` Christoph Hellwig
2020-02-13 15:40   ` Kirill A. Shutemov
2020-02-13 16:07     ` Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 11/25] fs: Make page_mkwrite_check_truncate thp-aware Matthew Wilcox
2020-02-13 15:44   ` Kirill A. Shutemov
2020-02-13 16:26     ` Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 12/25] mm: Add file_offset_of_ helpers Matthew Wilcox
2020-02-12  7:46   ` Christoph Hellwig
2020-02-12  4:18 ` [PATCH v2 13/25] fs: Add zero_user_large Matthew Wilcox
2020-02-14 13:52   ` Kirill A. Shutemov
2020-02-14 16:03     ` Matthew Wilcox
2020-02-18 14:16       ` Kirill A. Shutemov
2020-02-18 16:13         ` Matthew Wilcox
2020-02-18 17:10           ` Kirill A. Shutemov
2020-02-18 18:07             ` Matthew Wilcox
2020-02-21 12:42               ` Kirill A. Shutemov
2020-02-12  4:18 ` [PATCH v2 14/25] iomap: Support arbitrarily many blocks per page Matthew Wilcox
2020-02-12  8:05   ` Christoph Hellwig
2020-02-12  4:18 ` [PATCH v2 15/25] iomap: Support large pages in iomap_adjust_read_range Matthew Wilcox
2020-02-12  8:11   ` Christoph Hellwig
2020-02-12  4:18 ` [PATCH v2 16/25] iomap: Support large pages in read paths Matthew Wilcox
2020-02-12  8:13   ` Christoph Hellwig
2020-02-12 17:45     ` Matthew Wilcox
2020-02-12  4:18 ` Matthew Wilcox [this message]
2020-02-12  8:17   ` [PATCH v2 17/25] iomap: Support large pages in write paths Christoph Hellwig
2020-02-12  4:18 ` [PATCH v2 18/25] iomap: Inline data shouldn't see large pages Matthew Wilcox
2020-02-12  8:05   ` Christoph Hellwig
2020-02-12  4:18 ` [PATCH v2 19/25] xfs: Support " Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 20/25] mm: Make prep_transhuge_page return its argument Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 21/25] mm: Add __page_cache_alloc_order Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 22/25] mm: Allow large pages to be added to the page cache Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 23/25] mm: Allow large pages to be removed from " Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 24/25] mm: Add large page readahead Matthew Wilcox
2020-02-12  4:18 ` [PATCH v2 25/25] mm: Align THP mappings for non-DAX Matthew Wilcox
2020-02-12  7:50   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200212041845.25879-18-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.