* [merged] mm-readahead-make-do_page_cache_ra-take-a-readahead_control.patch removed from -mm tree
@ 2020-10-16 20:48 akpm
0 siblings, 0 replies; only message in thread
From: akpm @ 2020-10-16 20:48 UTC (permalink / raw)
To: dhowells, ebiggers, mm-commits, willy
The patch titled
Subject: mm/readahead: make do_page_cache_ra take a readahead_control
has been removed from the -mm tree. Its filename was
mm-readahead-make-do_page_cache_ra-take-a-readahead_control.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: mm/readahead: make do_page_cache_ra take a readahead_control
Rename __do_page_cache_readahead() to do_page_cache_ra() and call it
directly from ondemand_readahead() instead of indirecting via ra_submit().
Link: https://lkml.kernel.org/r/20200903140844.14194-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
mm/internal.h | 11 +++++------
mm/readahead.c | 28 +++++++++++++++-------------
2 files changed, 20 insertions(+), 19 deletions(-)
--- a/mm/internal.h~mm-readahead-make-do_page_cache_ra-take-a-readahead_control
+++ a/mm/internal.h
@@ -51,18 +51,17 @@ void unmap_page_range(struct mmu_gather
void force_page_cache_readahead(struct address_space *, struct file *,
pgoff_t index, unsigned long nr_to_read);
-void __do_page_cache_readahead(struct address_space *, struct file *,
- pgoff_t index, unsigned long nr_to_read,
- unsigned long lookahead_size);
+void do_page_cache_ra(struct readahead_control *,
+ unsigned long nr_to_read, unsigned long lookahead_size);
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
static inline void ra_submit(struct file_ra_state *ra,
- struct address_space *mapping, struct file *filp)
+ struct address_space *mapping, struct file *file)
{
- __do_page_cache_readahead(mapping, filp,
- ra->start, ra->size, ra->async_size);
+ DEFINE_READAHEAD(ractl, file, mapping, ra->start);
+ do_page_cache_ra(&ractl, ra->size, ra->async_size);
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
--- a/mm/readahead.c~mm-readahead-make-do_page_cache_ra-take-a-readahead_control
+++ a/mm/readahead.c
@@ -241,17 +241,16 @@ void page_cache_ra_unbounded(struct read
EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
/*
- * __do_page_cache_readahead() actually reads a chunk of disk. It allocates
+ * do_page_cache_ra() actually reads a chunk of disk. It allocates
* the pages first, then submits them for I/O. This avoids the very bad
* behaviour which would occur if page allocations are causing VM writeback.
* We really don't want to intermingle reads and writes like that.
*/
-void __do_page_cache_readahead(struct address_space *mapping,
- struct file *file, pgoff_t index, unsigned long nr_to_read,
- unsigned long lookahead_size)
+void do_page_cache_ra(struct readahead_control *ractl,
+ unsigned long nr_to_read, unsigned long lookahead_size)
{
- DEFINE_READAHEAD(ractl, file, mapping, index);
- struct inode *inode = mapping->host;
+ struct inode *inode = ractl->mapping->host;
+ unsigned long index = readahead_index(ractl);
loff_t isize = i_size_read(inode);
pgoff_t end_index; /* The last page we want to read */
@@ -265,7 +264,7 @@ void __do_page_cache_readahead(struct ad
if (nr_to_read > end_index - index)
nr_to_read = end_index - index + 1;
- page_cache_ra_unbounded(&ractl, nr_to_read, lookahead_size);
+ page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
}
/*
@@ -273,10 +272,11 @@ void __do_page_cache_readahead(struct ad
* memory at once.
*/
void force_page_cache_readahead(struct address_space *mapping,
- struct file *filp, pgoff_t index, unsigned long nr_to_read)
+ struct file *file, pgoff_t index, unsigned long nr_to_read)
{
+ DEFINE_READAHEAD(ractl, file, mapping, index);
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
- struct file_ra_state *ra = &filp->f_ra;
+ struct file_ra_state *ra = &file->f_ra;
unsigned long max_pages;
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
@@ -294,7 +294,7 @@ void force_page_cache_readahead(struct a
if (this_chunk > nr_to_read)
this_chunk = nr_to_read;
- __do_page_cache_readahead(mapping, filp, index, this_chunk, 0);
+ do_page_cache_ra(&ractl, this_chunk, 0);
index += this_chunk;
nr_to_read -= this_chunk;
@@ -432,10 +432,11 @@ static int try_context_readahead(struct
* A minimal readahead algorithm for trivial sequential/random reads.
*/
static void ondemand_readahead(struct address_space *mapping,
- struct file_ra_state *ra, struct file *filp,
+ struct file_ra_state *ra, struct file *file,
bool hit_readahead_marker, pgoff_t index,
unsigned long req_size)
{
+ DEFINE_READAHEAD(ractl, file, mapping, index);
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages = ra->ra_pages;
unsigned long add_pages;
@@ -516,7 +517,7 @@ static void ondemand_readahead(struct ad
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
- __do_page_cache_readahead(mapping, filp, index, req_size, 0);
+ do_page_cache_ra(&ractl, req_size, 0);
return;
initial_readahead:
@@ -542,7 +543,8 @@ readit:
}
}
- ra_submit(ra, mapping, filp);
+ ractl._index = ra->start;
+ do_page_cache_ra(&ractl, ra->size, ra->async_size);
}
/**
_
Patches currently in -mm which might be from willy@infradead.org are
mm-update-the-documentation-for-vfree.patch
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2020-10-16 20:48 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-16 20:48 [merged] mm-readahead-make-do_page_cache_ra-take-a-readahead_control.patch removed from -mm tree akpm
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).