linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: willy@infradead.org
Cc: dhowells@redhat.com, linux-fsdevel@vger.kernel.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 5/7] mm: Make __do_page_cache_readahead() use rac->_nr_pages
Date: Tue, 01 Sep 2020 17:28:52 +0100	[thread overview]
Message-ID: <159897773253.405783.7186877407321511610.stgit@warthog.procyon.org.uk> (raw)
In-Reply-To: <159897769535.405783.17587409235571100774.stgit@warthog.procyon.org.uk>

Make __do_page_cache_readahead() use rac->_nr_pages rather than passing in
nr_to_read argument.

Signed-off-by: David Howells <dhowells@redhat.com>
---

 fs/ext4/verity.c        |    8 +++++---
 fs/f2fs/verity.c        |    8 +++++---
 include/linux/pagemap.h |    3 +--
 mm/internal.h           |    6 +++---
 mm/readahead.c          |   20 +++++++++++---------
 5 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index 6fc2dbc87c0b..3d377110e839 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -356,10 +356,12 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
 
 	page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
 	if (!page || !PageUptodate(page)) {
-		if (page)
+		if (page) {
 			put_page(page);
-		else if (num_ra_pages > 1)
-			page_cache_readahead_unbounded(&rac, num_ra_pages, 0);
+		} else if (num_ra_pages > 1) {
+			rac._nr_pages = num_ra_pages;
+			page_cache_readahead_unbounded(&rac, 0);
+		}
 		page = read_mapping_page(inode->i_mapping, index, NULL);
 	}
 	return page;
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 392dd07f4214..8445eed5a1bc 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -235,10 +235,12 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
 
 	page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
 	if (!page || !PageUptodate(page)) {
-		if (page)
+		if (page) {
 			put_page(page);
-		else if (num_ra_pages > 1)
-			page_cache_readahead_unbounded(&rac, num_ra_pages, 0);
+		} else if (num_ra_pages > 1) {
+			rac._nr_pages = num_ra_pages;
+			page_cache_readahead_unbounded(&rac, 0);
+		}
 		page = read_mapping_page(inode->i_mapping, index, NULL);
 	}
 	return page;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index cd7bde29d4cc..72e9c44d62bb 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -772,8 +772,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
 void page_cache_sync_readahead(struct readahead_control *, struct file_ra_state *);
 void page_cache_async_readahead(struct readahead_control *, struct file_ra_state *,
 				struct page *);
-void page_cache_readahead_unbounded(struct readahead_control *,
-		unsigned long nr_to_read, unsigned long lookahead_count);
+void page_cache_readahead_unbounded(struct readahead_control *, unsigned long);
 
 /*
  * Like add_to_page_cache_locked, but used to add newly allocated pages:
diff --git a/mm/internal.h b/mm/internal.h
index 2eb9f7f5f134..e1d296e76fb0 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -50,8 +50,7 @@ void unmap_page_range(struct mmu_gather *tlb,
 			     struct zap_details *details);
 
 void force_page_cache_readahead(struct readahead_control *);
-void __do_page_cache_readahead(struct readahead_control *,
-		unsigned long nr_to_read, unsigned long lookahead_size);
+void __do_page_cache_readahead(struct readahead_control *, unsigned long);
 
 /*
  * Submit IO for the read-ahead request in file_ra_state.
@@ -60,7 +59,8 @@ static inline void ra_submit(struct file_ra_state *ra,
 		struct address_space *mapping, struct file *file)
 {
 	DEFINE_READAHEAD(rac, file, mapping, ra->start);
-	__do_page_cache_readahead(&rac, ra->size, ra->async_size);
+	rac._nr_pages = ra->size;
+	__do_page_cache_readahead(&rac, ra->async_size);
 }
 
 /**
diff --git a/mm/readahead.c b/mm/readahead.c
index 7114246b4e41..28ff80304a21 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -172,10 +172,11 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
  * May sleep, but will not reenter filesystem to reclaim memory.
  */
 void page_cache_readahead_unbounded(struct readahead_control *rac,
-		unsigned long nr_to_read, unsigned long lookahead_size)
+				    unsigned long lookahead_size)
 {
 	struct address_space *mapping = rac->mapping;
 	unsigned long index = readahead_index(rac);
+	unsigned long nr_to_read = readahead_count(rac);
 	LIST_HEAD(page_pool);
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
 	unsigned long i;
@@ -195,6 +196,7 @@ void page_cache_readahead_unbounded(struct readahead_control *rac,
 	/*
 	 * Preallocate as many pages as we will need.
 	 */
+	rac->_nr_pages = 0;
 	for (i = 0; i < nr_to_read; i++) {
 		struct page *page = xa_load(&mapping->i_pages, index + i);
 
@@ -247,7 +249,7 @@ EXPORT_SYMBOL_GPL(page_cache_readahead_unbounded);
  * We really don't want to intermingle reads and writes like that.
  */
 void __do_page_cache_readahead(struct readahead_control *rac,
-		unsigned long nr_to_read, unsigned long lookahead_size)
+			       unsigned long lookahead_size)
 {
 	struct inode *inode = rac->mapping->host;
 	unsigned long index = readahead_index(rac);
@@ -261,10 +263,10 @@ void __do_page_cache_readahead(struct readahead_control *rac,
 	if (index > end_index)
 		return;
 	/* Don't read past the page containing the last byte of the file */
-	if (nr_to_read > end_index - index)
-		nr_to_read = end_index - index + 1;
+	if (readahead_count(rac) > end_index - index)
+		rac->_nr_pages = end_index - index + 1;
 
-	page_cache_readahead_unbounded(rac, nr_to_read, lookahead_size);
+	page_cache_readahead_unbounded(rac, lookahead_size);
 }
 
 /*
@@ -297,8 +299,7 @@ void force_page_cache_readahead(struct readahead_control *rac)
 
 		rac->_index = index;
 		rac->_nr_pages = this_chunk;
-		// Do I need to modify rac->_batch_count?
-		__do_page_cache_readahead(rac, this_chunk, 0);
+		__do_page_cache_readahead(rac, 0);
 
 		index += this_chunk;
 		nr_to_read -= this_chunk;
@@ -601,7 +602,7 @@ static void ondemand_readahead(struct readahead_control *rac,
 	 * standalone, small random read
 	 * Read as is, and do not pollute the readahead state.
 	 */
-	__do_page_cache_readahead(rac, req_size, 0);
+	__do_page_cache_readahead(rac, 0);
 	return;
 
 initial_readahead:
@@ -630,7 +631,8 @@ static void ondemand_readahead(struct readahead_control *rac,
 	rac->_index = ra->start;
 	if (page && page_cache_readahead_order(rac, ra, thp_order(page)))
 		return;
-	__do_page_cache_readahead(rac, ra->size, ra->async_size);
+	rac->_nr_pages = ra->size;
+	__do_page_cache_readahead(rac, ra->async_size);
 }
 
 /**



  parent reply	other threads:[~2020-09-01 16:29 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-01 16:28 [RFC PATCH 0/7] mm: Make more use of readahead_control David Howells
2020-09-01 16:28 ` [RFC PATCH 1/7] Fix khugepaged's request size in collapse_file() David Howells
2020-09-01 18:06   ` Song Liu
2020-09-01 16:28 ` [RFC PATCH 2/7] mm: Make ondemand_readahead() take a readahead_control struct David Howells
2020-09-01 16:28 ` [RFC PATCH 3/7] mm: Push readahead_control down into force_page_cache_readahead() David Howells
2020-09-01 16:28 ` [RFC PATCH 4/7] mm: Pass readahead_control into page_cache_{sync,async}_readahead() David Howells
2020-09-01 16:28 ` David Howells [this message]
2020-09-01 16:28 ` [RFC PATCH 6/7] mm: Fold ra_submit() into do_sync_mmap_readahead() David Howells
2020-09-01 16:29 ` [RFC PATCH 7/7] mm: Pass a file_ra_state struct into force_page_cache_readahead() David Howells
2020-09-01 16:41 ` [RFC PATCH 0/7] mm: Make more use of readahead_control Eric Biggers
2020-09-01 16:45   ` Matthew Wilcox
2020-09-02 15:42   ` David Howells
2020-09-01 16:48 ` Matthew Wilcox
2020-09-01 19:40 ` David Howells
2020-09-01 19:44 ` David Howells
2020-09-01 22:33   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=159897773253.405783.7186877407321511610.stgit@warthog.procyon.org.uk \
    --to=dhowells@redhat.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).