linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH 15/19] mm/readahead: Add THP readahead
Date: Thu, 29 Oct 2020 19:34:01 +0000	[thread overview]
Message-ID: <20201029193405.29125-16-willy@infradead.org> (raw)
In-Reply-To: <20201029193405.29125-1-willy@infradead.org>

If the filesystem supports THPs, allocate larger pages in the
readahead code when it seems worth doing.  The heuristic for choosing
larger page sizes will surely need some tuning, but this aggressive
ramp-up seems good for testing.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/readahead.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 94 insertions(+), 6 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index c5b0457415be..dc9876104ee8 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -149,7 +149,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
 
 	blk_finish_plug(&plug);
 
-	BUG_ON(!list_empty(pages));
+	BUG_ON(pages && !list_empty(pages));
 	BUG_ON(readahead_count(rac));
 
 out:
@@ -429,11 +429,99 @@ static int try_context_readahead(struct address_space *mapping,
 	return 1;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int ra_alloc_page(struct readahead_control *ractl, pgoff_t index,
+		pgoff_t mark, unsigned int order, gfp_t gfp)
+{
+	int err;
+	struct page *page = __page_cache_alloc_order(gfp, order);
+
+	if (!page)
+		return -ENOMEM;
+	if (mark - index < (1UL << order))
+		SetPageReadahead(page);
+	err = add_to_page_cache_lru(page, ractl->mapping, index, gfp);
+	if (err)
+		put_page(page);
+	else
+		ractl->_nr_pages += 1UL << order;
+	return err;
+}
+
+static void page_cache_ra_order(struct readahead_control *ractl,
+		struct file_ra_state *ra, unsigned int new_order)
+{
+	struct address_space *mapping = ractl->mapping;
+	pgoff_t index = readahead_index(ractl);
+	pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
+	pgoff_t mark = index + ra->size - ra->async_size;
+	int err = 0;
+	gfp_t gfp = readahead_gfp_mask(mapping);
+
+	if (!mapping_thp_support(mapping) || ra->size < 4)
+		goto fallback;
+
+	limit = min(limit, index + ra->size - 1);
+
+	/* Grow page size up to PMD size */
+	if (new_order < HPAGE_PMD_ORDER) {
+		new_order += 2;
+		if (new_order > HPAGE_PMD_ORDER)
+			new_order = HPAGE_PMD_ORDER;
+		while ((1 << new_order) > ra->size)
+			new_order--;
+	}
+
+	while (index <= limit) {
+		unsigned int order = new_order;
+
+		/* Align with smaller pages if needed */
+		if (index & ((1UL << order) - 1)) {
+			order = __ffs(index);
+			if (order == 1)
+				order = 0;
+		}
+		/* Don't allocate pages past EOF */
+		while (index + (1UL << order) - 1 > limit) {
+			if (--order == 1)
+				order = 0;
+		}
+		err = ra_alloc_page(ractl, index, mark, order, gfp);
+		if (err)
+			break;
+		index += 1UL << order;
+	}
+
+	if (index > limit) {
+		ra->size += index - limit - 1;
+		ra->async_size += index - limit - 1;
+	}
+
+	read_pages(ractl, NULL, false);
+
+	/*
+	 * If there were already pages in the page cache, then we may have
+	 * left some gaps.  Let the regular readahead code take care of this
+	 * situation.
+	 */
+	if (!err)
+		return;
+fallback:
+	do_page_cache_ra(ractl, ra->size, ra->async_size);
+}
+#else
+static void page_cache_ra_order(struct readahead_control *ractl,
+		struct file_ra_state *ra, unsigned int order)
+{
+	do_page_cache_ra(ractl, ra->size, ra->async_size);
+}
+#endif
+
 /*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
 static void ondemand_readahead(struct readahead_control *ractl,
-		struct file_ra_state *ra, bool hit_readahead_marker,
+		struct file_ra_state *ra, struct page *page,
 		unsigned long req_size)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
@@ -473,7 +561,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
 	 * Query the pagecache for async_size, which normally equals to
 	 * readahead size. Ramp it up and use it as the new readahead size.
 	 */
-	if (hit_readahead_marker) {
+	if (page) {
 		pgoff_t start;
 
 		rcu_read_lock();
@@ -546,7 +634,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
 	}
 
 	ractl->_index = ra->start;
-	do_page_cache_ra(ractl, ra->size, ra->async_size);
+	page_cache_ra_order(ractl, ra, page ? thp_order(page) : 0);
 }
 
 void page_cache_sync_ra(struct readahead_control *ractl,
@@ -574,7 +662,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
 	}
 
 	/* do read-ahead */
-	ondemand_readahead(ractl, ra, false, req_count);
+	ondemand_readahead(ractl, ra, NULL, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
 
@@ -604,7 +692,7 @@ void page_cache_async_ra(struct readahead_control *ractl,
 		return;
 
 	/* do read-ahead */
-	ondemand_readahead(ractl, ra, true, req_count);
+	ondemand_readahead(ractl, ra, page, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_async_ra);
 
-- 
2.28.0



  parent reply	other threads:[~2020-10-29 19:34 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-29 19:33 [PATCH 00/19] Transparent Hugepages for non-tmpfs filesystems Matthew Wilcox (Oracle)
2020-10-29 19:33 ` [PATCH 01/19] XArray: Expose xas_destroy Matthew Wilcox (Oracle)
2020-10-29 20:33   ` Zi Yan
2020-10-29 19:33 ` [PATCH 02/19] mm: Use multi-index entries in the page cache Matthew Wilcox (Oracle)
2020-10-29 20:49   ` Zi Yan
2020-10-29 21:54     ` Matthew Wilcox
2020-10-30 14:48       ` Zi Yan
2020-11-03 14:04   ` Kirill A. Shutemov
2020-10-29 19:33 ` [PATCH 03/19] mm: Support arbitrary THP sizes Matthew Wilcox (Oracle)
2020-10-29 20:50   ` Zi Yan
2020-10-29 19:33 ` [PATCH 04/19] mm: Change NR_FILE_THPS to account in base pages Matthew Wilcox (Oracle)
2020-10-29 19:33 ` [PATCH 05/19] mm/filemap: Rename generic_file_buffered_read subfunctions Matthew Wilcox (Oracle)
2020-10-30  0:04   ` Kent Overstreet
2020-10-30  8:56   ` Christoph Hellwig
2020-11-03 14:16   ` Kirill A. Shutemov
2020-11-03 14:40     ` Matthew Wilcox
2020-11-03 15:02       ` Kirill A. Shutemov
2020-10-29 19:33 ` [PATCH 06/19] mm/filemap: Change calling convention for gfbr_ functions Matthew Wilcox (Oracle)
2020-10-30  0:05   ` Kent Overstreet
2020-10-29 19:33 ` [PATCH 07/19] mm/filemap: Use head pages in generic_file_buffered_read Matthew Wilcox (Oracle)
2020-10-30  0:19   ` Kent Overstreet
2020-10-30  1:03     ` Matthew Wilcox
2020-10-29 19:33 ` [PATCH 08/19] mm/filemap: Add __page_cache_alloc_order Matthew Wilcox (Oracle)
2020-10-29 19:33 ` [PATCH 09/19] mm/filemap: Allow THPs to be added to the page cache Matthew Wilcox (Oracle)
2020-10-29 19:33 ` [PATCH 10/19] mm/vmscan: Optimise shrink_page_list for smaller THPs Matthew Wilcox (Oracle)
2020-10-29 19:33 ` [PATCH 11/19] mm/filemap: Allow PageReadahead to be set on head pages Matthew Wilcox (Oracle)
2020-10-29 19:33 ` [PATCH 12/19] mm: Pass a sleep state to put_and_wait_on_page_locked Matthew Wilcox (Oracle)
2020-10-29 19:33 ` [PATCH 13/19] mm/filemap: Support readpage splitting a page Matthew Wilcox (Oracle)
2020-10-29 19:34 ` [PATCH 14/19] mm/filemap: Inline __wait_on_page_locked_async into caller Matthew Wilcox (Oracle)
2020-10-29 19:34 ` Matthew Wilcox (Oracle) [this message]
2020-10-29 19:34 ` [PATCH 16/19] mm/readahead: Align THP mappings for non-DAX Matthew Wilcox (Oracle)
2020-10-29 19:34 ` [PATCH 17/19] mm/readahead: Switch to page_cache_ra_order Matthew Wilcox (Oracle)
2020-10-29 19:34 ` [PATCH 18/19] mm/filemap: Support VM_HUGEPAGE for file mappings Matthew Wilcox (Oracle)
2020-10-29 19:34 ` [PATCH 19/19] selftests/vm/transhuge-stress: Support file-backed THPs Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201029193405.29125-16-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).