From: Matthew Wilcox <willy@infradead.org>
To: linux-xfs@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
jlayton@kernel.org, hch@infradead.org
Subject: [PATCH 3/8] mm: Use a pagevec for readahead
Date: Mon, 13 Jan 2020 07:37:41 -0800 [thread overview]
Message-ID: <20200113153746.26654-4-willy@infradead.org> (raw)
In-Reply-To: <20200113153746.26654-1-willy@infradead.org>
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Instead of using a linked list, use a small array. This does mean we
will allocate and then submit for I/O no more than 15 pages at a time
(60kB), but we have the block queue plugged so the bios can be combined
afterwards. We generally don't readahead more than 256kB anyway,
so this is not a huge reduction in efficiency, and we'll make up
for it with later patches.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/readahead.c | 97 +++++++++++++++++++++++++++-----------------------
1 file changed, 52 insertions(+), 45 deletions(-)
diff --git a/mm/readahead.c b/mm/readahead.c
index 6bf73ef33b7e..76a70a4406b5 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -113,35 +113,37 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
EXPORT_SYMBOL(read_cache_pages);
-static int read_pages(struct address_space *mapping, struct file *filp,
- struct list_head *pages, unsigned int nr_pages, gfp_t gfp)
+/*
+ * We ignore I/O errors - they will be handled by the actual consumer of
+ * the data that we attempted to prefetch.
+ */
+static unsigned read_pages(struct address_space *mapping, struct file *filp,
+ struct pagevec *pvec, pgoff_t offset, gfp_t gfp)
{
- struct blk_plug plug;
- unsigned page_idx;
- int ret;
-
- blk_start_plug(&plug);
+ struct page *page;
+ unsigned int nr_pages = pagevec_count(pvec);
if (mapping->a_ops->readpages) {
- ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
- /* Clean up the remaining pages */
- put_pages_list(pages);
- goto out;
- }
+ LIST_HEAD(pages);
- for (page_idx = 0; page_idx < nr_pages; page_idx++) {
- struct page *page = lru_to_page(pages);
- list_del(&page->lru);
- if (!add_to_page_cache_lru(page, mapping, page->index, gfp))
- mapping->a_ops->readpage(filp, page);
- put_page(page);
+ pagevec_for_each(pvec, page) {
+ page->index = offset++;
+ list_add(&page->lru, &pages);
+ }
+ mapping->a_ops->readpages(filp, mapping, &pages, nr_pages);
+ /* Clean up the remaining pages */
+ put_pages_list(&pages);
+ } else {
+ pagevec_for_each(pvec, page) {
+ if (!add_to_page_cache_lru(page, mapping, offset++,
+ gfp))
+ mapping->a_ops->readpage(filp, page);
+ put_page(page);
+ }
}
- ret = 0;
-out:
- blk_finish_plug(&plug);
-
- return ret;
+ pagevec_reinit(pvec);
+ return nr_pages;
}
/*
@@ -159,59 +161,64 @@ unsigned long __do_page_cache_readahead(struct address_space *mapping,
struct inode *inode = mapping->host;
struct page *page;
unsigned long end_index; /* The last page we want to read */
- LIST_HEAD(page_pool);
+ struct pagevec pages;
int page_idx;
+ pgoff_t page_offset = offset;
unsigned long nr_pages = 0;
loff_t isize = i_size_read(inode);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
if (isize == 0)
goto out;
end_index = ((isize - 1) >> PAGE_SHIFT);
+ pagevec_init(&pages);
/*
* Preallocate as many pages as we will need.
*/
for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
- pgoff_t page_offset = offset + page_idx;
+ page_offset++;
if (page_offset > end_index)
break;
page = xa_load(&mapping->i_pages, page_offset);
+
+ /*
+ * Page already present? Kick off the current batch of
+ * contiguous pages before continuing with the next batch.
+ */
if (page && !xa_is_value(page)) {
- /*
- * Page already present? Kick off the current batch of
- * contiguous pages before continuing with the next
- * batch.
- */
- if (nr_pages)
- read_pages(mapping, filp, &page_pool, nr_pages,
- gfp_mask);
- nr_pages = 0;
+ unsigned int count = pagevec_count(&pages);
+
+ if (count)
+ nr_pages += read_pages(mapping, filp, &pages,
+ offset, gfp_mask);
+ offset = page_offset + 1;
continue;
}
page = __page_cache_alloc(gfp_mask);
if (!page)
break;
- page->index = page_offset;
- list_add(&page->lru, &page_pool);
+ if (pagevec_add(&pages, page) == 0) {
+ nr_pages += read_pages(mapping, filp, &pages,
+ offset, gfp_mask);
+ offset = page_offset + 1;
+ }
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
- nr_pages++;
}
- /*
- * Now start the IO. We ignore I/O errors - if the page is not
- * uptodate then the caller will launch readpage again, and
- * will then handle the error.
- */
- if (nr_pages)
- read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
- BUG_ON(!list_empty(&page_pool));
+ if (pagevec_count(&pages))
+ nr_pages += read_pages(mapping, filp, &pages, offset, gfp_mask);
out:
+ blk_finish_plug(&plug);
+
return nr_pages;
}
--
2.24.1
next prev parent reply other threads:[~2020-01-13 15:37 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-13 15:37 [RFC 0/8] Replacing the readpages a_op Matthew Wilcox
2020-01-13 15:37 ` [PATCH 1/8] pagevec: Add an iterator Matthew Wilcox
2020-01-13 15:37 ` [PATCH 2/8] mm: Fix the return type of __do_page_cache_readahead Matthew Wilcox
2020-01-13 15:37 ` Matthew Wilcox [this message]
2020-01-13 15:37 ` [PATCH 4/8] mm/fs: Add a_ops->readahead Matthew Wilcox
2020-01-13 18:22 ` Daniel Wagner
2020-01-13 19:17 ` Matthew Wilcox
2020-01-13 15:37 ` [PATCH 5/8] iomap,xfs: Convert from readpages to readahead Matthew Wilcox
2020-01-13 15:37 ` [PATCH 6/8] cifs: " Matthew Wilcox
2020-01-13 15:37 ` [PATCH 7/8] mm: Remove add_to_page_cache_locked Matthew Wilcox
2020-01-13 15:37 ` [PATCH 8/8] mm: Unify all add_to_page_cache variants Matthew Wilcox
2020-01-13 16:42 ` [RFC 0/8] Replacing the readpages a_op Chris Mason
2020-01-13 17:40 ` Matthew Wilcox
2020-01-13 18:00 ` Chris Mason
2020-01-13 21:58 ` Matthew Wilcox
2020-01-13 22:00 ` Jens Axboe
2020-01-13 22:10 ` Matthew Wilcox
2020-01-13 22:14 ` Jens Axboe
2020-01-13 22:27 ` Matthew Wilcox
2020-01-13 22:30 ` Jens Axboe
2020-01-13 22:34 ` Chris Mason
2020-01-14 1:01 ` Matthew Wilcox
2020-01-14 1:07 ` Chris Mason
2020-01-13 17:54 ` Matthew Wilcox
2020-01-13 22:19 ` Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200113153746.26654-4-willy@infradead.org \
--to=willy@infradead.org \
--cc=hch@infradead.org \
--cc=jlayton@kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-xfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).