All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-erofs@lists.ozlabs.org, linux-ext4@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	ocfs2-devel@oss.oracle.com, linux-xfs@vger.kernel.org,
	Christoph Hellwig <hch@lst.de>,
	William Kucharski <william.kucharski@oracle.com>
Subject: [PATCH v9 10/25] mm: Put readahead pages in cache earlier
Date: Fri, 20 Mar 2020 07:22:16 -0700	[thread overview]
Message-ID: <20200320142231.2402-11-willy@infradead.org> (raw)
In-Reply-To: <20200320142231.2402-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

When populating the page cache for readahead, mappings that use
->readpages must populate the page cache themselves as the pages are
passed on a linked list which would normally be used for the page cache's
LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
we can put the pages into the page cache as soon as they're allocated,
which solves a race between readahead and direct IO.  It also lets us
remove the gfp argument from read_pages().

Use the new readahead_page() API to implement the repeated calls to
->readpage(), just like most filesystems will.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 mm/readahead.c | 46 ++++++++++++++++++++++++++++------------------
 1 file changed, 28 insertions(+), 18 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index ddc63d3b07b8..e52b3a7b9da5 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -114,14 +114,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
 EXPORT_SYMBOL(read_cache_pages);
 
 static void read_pages(struct readahead_control *rac, struct list_head *pages,
-		gfp_t gfp)
+		bool skip_page)
 {
 	const struct address_space_operations *aops = rac->mapping->a_ops;
+	struct page *page;
 	struct blk_plug plug;
-	unsigned page_idx;
 
 	if (!readahead_count(rac))
-		return;
+		goto out;
 
 	blk_start_plug(&plug);
 
@@ -130,23 +130,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
 				readahead_count(rac));
 		/* Clean up the remaining pages */
 		put_pages_list(pages);
-		goto out;
-	}
-
-	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
-		struct page *page = lru_to_page(pages);
-		list_del(&page->lru);
-		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
-				gfp))
+		rac->_index += rac->_nr_pages;
+		rac->_nr_pages = 0;
+	} else {
+		while ((page = readahead_page(rac))) {
 			aops->readpage(rac->file, page);
-		put_page(page);
+			put_page(page);
+		}
 	}
 
-out:
 	blk_finish_plug(&plug);
 
 	BUG_ON(!list_empty(pages));
-	rac->_nr_pages = 0;
+	BUG_ON(readahead_count(rac));
+
+out:
+	if (skip_page)
+		rac->_index++;
 }
 
 /*
@@ -168,6 +168,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	struct readahead_control rac = {
 		.mapping = mapping,
 		.file = filp,
+		._index = index,
 	};
 	unsigned long i;
 
@@ -183,6 +184,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
 		if (index + i > end_index)
 			break;
 
+		BUG_ON(index + i != rac._index + rac._nr_pages);
+
 		page = xa_load(&mapping->i_pages, index + i);
 		if (page && !xa_is_value(page)) {
 			/*
@@ -190,15 +193,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
 			 * contiguous pages before continuing with the next
 			 * batch.
 			 */
-			read_pages(&rac, &page_pool, gfp_mask);
+			read_pages(&rac, &page_pool, true);
 			continue;
 		}
 
 		page = __page_cache_alloc(gfp_mask);
 		if (!page)
 			break;
-		page->index = index + i;
-		list_add(&page->lru, &page_pool);
+		if (mapping->a_ops->readpages) {
+			page->index = index + i;
+			list_add(&page->lru, &page_pool);
+		} else if (add_to_page_cache_lru(page, mapping, index + i,
+					gfp_mask) < 0) {
+			put_page(page);
+			read_pages(&rac, &page_pool, true);
+			continue;
+		}
 		if (i == nr_to_read - lookahead_size)
 			SetPageReadahead(page);
 		rac._nr_pages++;
@@ -209,7 +219,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	 * uptodate then the caller will launch readpage again, and
 	 * will then handle the error.
 	 */
-	read_pages(&rac, &page_pool, gfp_mask);
+	read_pages(&rac, &page_pool, false);
 }
 
 /*
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: ocfs2-devel@oss.oracle.com
Subject: [Ocfs2-devel] [PATCH v9 10/25] mm: Put readahead pages in cache earlier
Date: Fri, 20 Mar 2020 07:22:16 -0700	[thread overview]
Message-ID: <20200320142231.2402-11-willy@infradead.org> (raw)
In-Reply-To: <20200320142231.2402-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

When populating the page cache for readahead, mappings that use
->readpages must populate the page cache themselves as the pages are
passed on a linked list which would normally be used for the page cache's
LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
we can put the pages into the page cache as soon as they're allocated,
which solves a race between readahead and direct IO.  It also lets us
remove the gfp argument from read_pages().

Use the new readahead_page() API to implement the repeated calls to
->readpage(), just like most filesystems will.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 mm/readahead.c | 46 ++++++++++++++++++++++++++++------------------
 1 file changed, 28 insertions(+), 18 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index ddc63d3b07b8..e52b3a7b9da5 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -114,14 +114,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
 EXPORT_SYMBOL(read_cache_pages);
 
 static void read_pages(struct readahead_control *rac, struct list_head *pages,
-		gfp_t gfp)
+		bool skip_page)
 {
 	const struct address_space_operations *aops = rac->mapping->a_ops;
+	struct page *page;
 	struct blk_plug plug;
-	unsigned page_idx;
 
 	if (!readahead_count(rac))
-		return;
+		goto out;
 
 	blk_start_plug(&plug);
 
@@ -130,23 +130,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
 				readahead_count(rac));
 		/* Clean up the remaining pages */
 		put_pages_list(pages);
-		goto out;
-	}
-
-	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
-		struct page *page = lru_to_page(pages);
-		list_del(&page->lru);
-		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
-				gfp))
+		rac->_index += rac->_nr_pages;
+		rac->_nr_pages = 0;
+	} else {
+		while ((page = readahead_page(rac))) {
 			aops->readpage(rac->file, page);
-		put_page(page);
+			put_page(page);
+		}
 	}
 
-out:
 	blk_finish_plug(&plug);
 
 	BUG_ON(!list_empty(pages));
-	rac->_nr_pages = 0;
+	BUG_ON(readahead_count(rac));
+
+out:
+	if (skip_page)
+		rac->_index++;
 }
 
 /*
@@ -168,6 +168,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	struct readahead_control rac = {
 		.mapping = mapping,
 		.file = filp,
+		._index = index,
 	};
 	unsigned long i;
 
@@ -183,6 +184,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
 		if (index + i > end_index)
 			break;
 
+		BUG_ON(index + i != rac._index + rac._nr_pages);
+
 		page = xa_load(&mapping->i_pages, index + i);
 		if (page && !xa_is_value(page)) {
 			/*
@@ -190,15 +193,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
 			 * contiguous pages before continuing with the next
 			 * batch.
 			 */
-			read_pages(&rac, &page_pool, gfp_mask);
+			read_pages(&rac, &page_pool, true);
 			continue;
 		}
 
 		page = __page_cache_alloc(gfp_mask);
 		if (!page)
 			break;
-		page->index = index + i;
-		list_add(&page->lru, &page_pool);
+		if (mapping->a_ops->readpages) {
+			page->index = index + i;
+			list_add(&page->lru, &page_pool);
+		} else if (add_to_page_cache_lru(page, mapping, index + i,
+					gfp_mask) < 0) {
+			put_page(page);
+			read_pages(&rac, &page_pool, true);
+			continue;
+		}
 		if (i == nr_to_read - lookahead_size)
 			SetPageReadahead(page);
 		rac._nr_pages++;
@@ -209,7 +219,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	 * uptodate then the caller will launch readpage again, and
 	 * will then handle the error.
 	 */
-	read_pages(&rac, &page_pool, gfp_mask);
+	read_pages(&rac, &page_pool, false);
 }
 
 /*
-- 
2.25.1

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-xfs@vger.kernel.org,
	William Kucharski <william.kucharski@oracle.com>,
	linux-kernel@vger.kernel.org,
	"Matthew Wilcox \(Oracle\)" <willy@infradead.org>,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-mm@kvack.org, ocfs2-devel@oss.oracle.com,
	linux-fsdevel@vger.kernel.org, linux-ext4@vger.kernel.org,
	linux-erofs@lists.ozlabs.org, Christoph Hellwig <hch@lst.de>,
	linux-btrfs@vger.kernel.org
Subject: [f2fs-dev] [PATCH v9 10/25] mm: Put readahead pages in cache earlier
Date: Fri, 20 Mar 2020 07:22:16 -0700	[thread overview]
Message-ID: <20200320142231.2402-11-willy@infradead.org> (raw)
In-Reply-To: <20200320142231.2402-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

When populating the page cache for readahead, mappings that use
->readpages must populate the page cache themselves as the pages are
passed on a linked list which would normally be used for the page cache's
LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
we can put the pages into the page cache as soon as they're allocated,
which solves a race between readahead and direct IO.  It also lets us
remove the gfp argument from read_pages().

Use the new readahead_page() API to implement the repeated calls to
->readpage(), just like most filesystems will.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 mm/readahead.c | 46 ++++++++++++++++++++++++++++------------------
 1 file changed, 28 insertions(+), 18 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index ddc63d3b07b8..e52b3a7b9da5 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -114,14 +114,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
 EXPORT_SYMBOL(read_cache_pages);
 
 static void read_pages(struct readahead_control *rac, struct list_head *pages,
-		gfp_t gfp)
+		bool skip_page)
 {
 	const struct address_space_operations *aops = rac->mapping->a_ops;
+	struct page *page;
 	struct blk_plug plug;
-	unsigned page_idx;
 
 	if (!readahead_count(rac))
-		return;
+		goto out;
 
 	blk_start_plug(&plug);
 
@@ -130,23 +130,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
 				readahead_count(rac));
 		/* Clean up the remaining pages */
 		put_pages_list(pages);
-		goto out;
-	}
-
-	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
-		struct page *page = lru_to_page(pages);
-		list_del(&page->lru);
-		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
-				gfp))
+		rac->_index += rac->_nr_pages;
+		rac->_nr_pages = 0;
+	} else {
+		while ((page = readahead_page(rac))) {
 			aops->readpage(rac->file, page);
-		put_page(page);
+			put_page(page);
+		}
 	}
 
-out:
 	blk_finish_plug(&plug);
 
 	BUG_ON(!list_empty(pages));
-	rac->_nr_pages = 0;
+	BUG_ON(readahead_count(rac));
+
+out:
+	if (skip_page)
+		rac->_index++;
 }
 
 /*
@@ -168,6 +168,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	struct readahead_control rac = {
 		.mapping = mapping,
 		.file = filp,
+		._index = index,
 	};
 	unsigned long i;
 
@@ -183,6 +184,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
 		if (index + i > end_index)
 			break;
 
+		BUG_ON(index + i != rac._index + rac._nr_pages);
+
 		page = xa_load(&mapping->i_pages, index + i);
 		if (page && !xa_is_value(page)) {
 			/*
@@ -190,15 +193,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
 			 * contiguous pages before continuing with the next
 			 * batch.
 			 */
-			read_pages(&rac, &page_pool, gfp_mask);
+			read_pages(&rac, &page_pool, true);
 			continue;
 		}
 
 		page = __page_cache_alloc(gfp_mask);
 		if (!page)
 			break;
-		page->index = index + i;
-		list_add(&page->lru, &page_pool);
+		if (mapping->a_ops->readpages) {
+			page->index = index + i;
+			list_add(&page->lru, &page_pool);
+		} else if (add_to_page_cache_lru(page, mapping, index + i,
+					gfp_mask) < 0) {
+			put_page(page);
+			read_pages(&rac, &page_pool, true);
+			continue;
+		}
 		if (i == nr_to_read - lookahead_size)
 			SetPageReadahead(page);
 		rac._nr_pages++;
@@ -209,7 +219,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	 * uptodate then the caller will launch readpage again, and
 	 * will then handle the error.
 	 */
-	read_pages(&rac, &page_pool, gfp_mask);
+	read_pages(&rac, &page_pool, false);
 }
 
 /*
-- 
2.25.1



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-xfs@vger.kernel.org,
	William Kucharski <william.kucharski@oracle.com>,
	linux-kernel@vger.kernel.org,
	"Matthew Wilcox \(Oracle\)" <willy@infradead.org>,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-mm@kvack.org, ocfs2-devel@oss.oracle.com,
	linux-fsdevel@vger.kernel.org, linux-ext4@vger.kernel.org,
	linux-erofs@lists.ozlabs.org, Christoph Hellwig <hch@lst.de>,
	linux-btrfs@vger.kernel.org
Subject: [PATCH v9 10/25] mm: Put readahead pages in cache earlier
Date: Fri, 20 Mar 2020 07:22:16 -0700	[thread overview]
Message-ID: <20200320142231.2402-11-willy@infradead.org> (raw)
In-Reply-To: <20200320142231.2402-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

When populating the page cache for readahead, mappings that use
->readpages must populate the page cache themselves as the pages are
passed on a linked list which would normally be used for the page cache's
LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
we can put the pages into the page cache as soon as they're allocated,
which solves a race between readahead and direct IO.  It also lets us
remove the gfp argument from read_pages().

Use the new readahead_page() API to implement the repeated calls to
->readpage(), just like most filesystems will.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 mm/readahead.c | 46 ++++++++++++++++++++++++++++------------------
 1 file changed, 28 insertions(+), 18 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index ddc63d3b07b8..e52b3a7b9da5 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -114,14 +114,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
 EXPORT_SYMBOL(read_cache_pages);
 
 static void read_pages(struct readahead_control *rac, struct list_head *pages,
-		gfp_t gfp)
+		bool skip_page)
 {
 	const struct address_space_operations *aops = rac->mapping->a_ops;
+	struct page *page;
 	struct blk_plug plug;
-	unsigned page_idx;
 
 	if (!readahead_count(rac))
-		return;
+		goto out;
 
 	blk_start_plug(&plug);
 
@@ -130,23 +130,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
 				readahead_count(rac));
 		/* Clean up the remaining pages */
 		put_pages_list(pages);
-		goto out;
-	}
-
-	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
-		struct page *page = lru_to_page(pages);
-		list_del(&page->lru);
-		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
-				gfp))
+		rac->_index += rac->_nr_pages;
+		rac->_nr_pages = 0;
+	} else {
+		while ((page = readahead_page(rac))) {
 			aops->readpage(rac->file, page);
-		put_page(page);
+			put_page(page);
+		}
 	}
 
-out:
 	blk_finish_plug(&plug);
 
 	BUG_ON(!list_empty(pages));
-	rac->_nr_pages = 0;
+	BUG_ON(readahead_count(rac));
+
+out:
+	if (skip_page)
+		rac->_index++;
 }
 
 /*
@@ -168,6 +168,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	struct readahead_control rac = {
 		.mapping = mapping,
 		.file = filp,
+		._index = index,
 	};
 	unsigned long i;
 
@@ -183,6 +184,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
 		if (index + i > end_index)
 			break;
 
+		BUG_ON(index + i != rac._index + rac._nr_pages);
+
 		page = xa_load(&mapping->i_pages, index + i);
 		if (page && !xa_is_value(page)) {
 			/*
@@ -190,15 +193,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
 			 * contiguous pages before continuing with the next
 			 * batch.
 			 */
-			read_pages(&rac, &page_pool, gfp_mask);
+			read_pages(&rac, &page_pool, true);
 			continue;
 		}
 
 		page = __page_cache_alloc(gfp_mask);
 		if (!page)
 			break;
-		page->index = index + i;
-		list_add(&page->lru, &page_pool);
+		if (mapping->a_ops->readpages) {
+			page->index = index + i;
+			list_add(&page->lru, &page_pool);
+		} else if (add_to_page_cache_lru(page, mapping, index + i,
+					gfp_mask) < 0) {
+			put_page(page);
+			read_pages(&rac, &page_pool, true);
+			continue;
+		}
 		if (i == nr_to_read - lookahead_size)
 			SetPageReadahead(page);
 		rac._nr_pages++;
@@ -209,7 +219,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	 * uptodate then the caller will launch readpage again, and
 	 * will then handle the error.
 	 */
-	read_pages(&rac, &page_pool, gfp_mask);
+	read_pages(&rac, &page_pool, false);
 }
 
 /*
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [PATCH v9 10/25] mm: Put readahead pages in cache earlier
Date: Fri, 20 Mar 2020 07:22:16 -0700	[thread overview]
Message-ID: <20200320142231.2402-11-willy@infradead.org> (raw)
In-Reply-To: <20200320142231.2402-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

When populating the page cache for readahead, mappings that use
->readpages must populate the page cache themselves as the pages are
passed on a linked list which would normally be used for the page cache's
LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
we can put the pages into the page cache as soon as they're allocated,
which solves a race between readahead and direct IO.  It also lets us
remove the gfp argument from read_pages().

Use the new readahead_page() API to implement the repeated calls to
->readpage(), just like most filesystems will.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 mm/readahead.c | 46 ++++++++++++++++++++++++++++------------------
 1 file changed, 28 insertions(+), 18 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index ddc63d3b07b8..e52b3a7b9da5 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -114,14 +114,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
 EXPORT_SYMBOL(read_cache_pages);
 
 static void read_pages(struct readahead_control *rac, struct list_head *pages,
-		gfp_t gfp)
+		bool skip_page)
 {
 	const struct address_space_operations *aops = rac->mapping->a_ops;
+	struct page *page;
 	struct blk_plug plug;
-	unsigned page_idx;
 
 	if (!readahead_count(rac))
-		return;
+		goto out;
 
 	blk_start_plug(&plug);
 
@@ -130,23 +130,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
 				readahead_count(rac));
 		/* Clean up the remaining pages */
 		put_pages_list(pages);
-		goto out;
-	}
-
-	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
-		struct page *page = lru_to_page(pages);
-		list_del(&page->lru);
-		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
-				gfp))
+		rac->_index += rac->_nr_pages;
+		rac->_nr_pages = 0;
+	} else {
+		while ((page = readahead_page(rac))) {
 			aops->readpage(rac->file, page);
-		put_page(page);
+			put_page(page);
+		}
 	}
 
-out:
 	blk_finish_plug(&plug);
 
 	BUG_ON(!list_empty(pages));
-	rac->_nr_pages = 0;
+	BUG_ON(readahead_count(rac));
+
+out:
+	if (skip_page)
+		rac->_index++;
 }
 
 /*
@@ -168,6 +168,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	struct readahead_control rac = {
 		.mapping = mapping,
 		.file = filp,
+		._index = index,
 	};
 	unsigned long i;
 
@@ -183,6 +184,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
 		if (index + i > end_index)
 			break;
 
+		BUG_ON(index + i != rac._index + rac._nr_pages);
+
 		page = xa_load(&mapping->i_pages, index + i);
 		if (page && !xa_is_value(page)) {
 			/*
@@ -190,15 +193,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
 			 * contiguous pages before continuing with the next
 			 * batch.
 			 */
-			read_pages(&rac, &page_pool, gfp_mask);
+			read_pages(&rac, &page_pool, true);
 			continue;
 		}
 
 		page = __page_cache_alloc(gfp_mask);
 		if (!page)
 			break;
-		page->index = index + i;
-		list_add(&page->lru, &page_pool);
+		if (mapping->a_ops->readpages) {
+			page->index = index + i;
+			list_add(&page->lru, &page_pool);
+		} else if (add_to_page_cache_lru(page, mapping, index + i,
+					gfp_mask) < 0) {
+			put_page(page);
+			read_pages(&rac, &page_pool, true);
+			continue;
+		}
 		if (i == nr_to_read - lookahead_size)
 			SetPageReadahead(page);
 		rac._nr_pages++;
@@ -209,7 +219,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
 	 * uptodate then the caller will launch readpage again, and
 	 * will then handle the error.
 	 */
-	read_pages(&rac, &page_pool, gfp_mask);
+	read_pages(&rac, &page_pool, false);
 }
 
 /*
-- 
2.25.1




  parent reply	other threads:[~2020-03-20 14:24 UTC|newest]

Thread overview: 231+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-20 14:22 [PATCH v9 00/25] Change readahead API Matthew Wilcox
2020-03-20 14:22 ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22 ` Matthew Wilcox
2020-03-20 14:22 ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22 ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 01/25] mm: Move readahead prototypes from mm.h Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 02/25] mm: Return void from various readahead functions Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 03/25] mm: Ignore return value of ->readpages Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 04/25] mm: Move readahead nr_pages check into read_pages Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 05/25] mm: Add new readahead_control API Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 06/25] mm: Use readahead_control to pass arguments Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 07/25] mm: Rename various 'offset' parameters to 'index' Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 08/25] mm: rename readahead loop variable to 'i' Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 09/25] mm: Remove 'page_offset' from readahead loop Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` Matthew Wilcox [this message]
2020-03-20 14:22   ` [Cluster-devel] [PATCH v9 10/25] mm: Put readahead pages in cache earlier Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 11/25] mm: Add readahead address space operation Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 12/25] mm: Move end_index check out of readahead loop Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 16:58   ` Eric Biggers
2020-03-20 16:58     ` [Cluster-devel] " Eric Biggers
2020-03-20 16:58     ` Eric Biggers
2020-03-20 16:58     ` [f2fs-dev] " Eric Biggers
2020-03-20 16:58     ` [Ocfs2-devel] " Eric Biggers
2020-03-20 17:30     ` Matthew Wilcox
2020-03-20 17:30       ` [Cluster-devel] " Matthew Wilcox
2020-03-20 17:30       ` Matthew Wilcox
2020-03-20 17:30       ` [f2fs-dev] " Matthew Wilcox
2020-03-20 17:30       ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 18:00       ` Eric Biggers
2020-03-20 18:00         ` [Cluster-devel] " Eric Biggers
2020-03-20 18:00         ` Eric Biggers
2020-03-20 18:00         ` [f2fs-dev] " Eric Biggers
2020-03-20 18:00         ` [Ocfs2-devel] " Eric Biggers
2020-03-20 18:11         ` Matthew Wilcox
2020-03-20 18:11           ` [Cluster-devel] " Matthew Wilcox
2020-03-20 18:11           ` Matthew Wilcox
2020-03-20 18:11           ` [f2fs-dev] " Matthew Wilcox
2020-03-20 18:11           ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 18:24           ` Eric Biggers
2020-03-20 18:24             ` [Cluster-devel] " Eric Biggers
2020-03-20 18:24             ` Eric Biggers
2020-03-20 18:24             ` [f2fs-dev] " Eric Biggers
2020-03-20 18:24             ` [Ocfs2-devel] " Eric Biggers
2020-03-22 16:28             ` Matthew Wilcox
2020-03-22 16:28               ` [Cluster-devel] " Matthew Wilcox
2020-03-22 16:28               ` Matthew Wilcox
2020-03-22 16:28               ` [f2fs-dev] " Matthew Wilcox
2020-03-22 16:28               ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 13/25] mm: Add page_cache_readahead_unbounded Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 17:27   ` Eric Biggers
2020-03-20 17:27     ` [Cluster-devel] " Eric Biggers
2020-03-20 17:27     ` Eric Biggers
2020-03-20 17:27     ` [f2fs-dev] " Eric Biggers
2020-03-20 17:27     ` [Ocfs2-devel] " Eric Biggers
2020-03-20 14:22 ` [PATCH v9 14/25] mm: Document why we don't set PageReadahead Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 15/25] mm: Use memalloc_nofs_save in readahead path Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 16/25] fs: Convert mpage_readpages to mpage_readahead Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 23:24   ` Namjae Jeon
2020-03-20 23:24     ` [Cluster-devel] " Namjae Jeon
2020-03-20 23:24     ` Namjae Jeon
2020-03-20 23:24     ` [f2fs-dev] " Namjae Jeon
2020-03-20 23:24     ` Namjae Jeon
2020-03-20 23:24     ` [Ocfs2-devel] " Namjae Jeon
2020-03-20 14:22 ` [PATCH v9 17/25] btrfs: Convert from readpages to readahead Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 18/25] erofs: Convert uncompressed files " Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-21 12:38   ` [f2fs-dev] " Chao Yu
2020-03-21 12:38     ` [Cluster-devel] " Chao Yu
2020-03-21 12:38     ` Chao Yu
2020-03-21 12:38     ` Chao Yu
2020-03-21 12:38     ` [Ocfs2-devel] " Chao Yu
2020-03-20 14:22 ` [PATCH v9 19/25] erofs: Convert compressed " Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-21 12:41   ` [f2fs-dev] " Chao Yu
2020-03-21 12:41     ` [Cluster-devel] " Chao Yu
2020-03-21 12:41     ` Chao Yu
2020-03-21 12:41     ` Chao Yu
2020-03-21 12:41     ` [Ocfs2-devel] " Chao Yu
2020-03-20 14:22 ` [PATCH v9 20/25] ext4: Convert " Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 17:37   ` Eric Biggers
2020-03-20 17:37     ` [Cluster-devel] " Eric Biggers
2020-03-20 17:37     ` Eric Biggers
2020-03-20 17:37     ` [f2fs-dev] " Eric Biggers
2020-03-20 17:37     ` [Ocfs2-devel] " Eric Biggers
2020-03-20 17:48     ` Matthew Wilcox
2020-03-20 17:48       ` [Cluster-devel] " Matthew Wilcox
2020-03-20 17:48       ` Matthew Wilcox
2020-03-20 17:48       ` [f2fs-dev] " Matthew Wilcox
2020-03-20 17:48       ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 18:40       ` Eric Biggers
2020-03-20 18:40         ` [Cluster-devel] " Eric Biggers
2020-03-20 18:40         ` Eric Biggers
2020-03-20 18:40         ` [f2fs-dev] " Eric Biggers
2020-03-20 18:40         ` [Ocfs2-devel] " Eric Biggers
2020-03-20 14:22 ` [PATCH v9 21/25] ext4: Pass the inode to ext4_mpage_readpages Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 18:44   ` Eric Biggers
2020-03-20 18:44     ` [Cluster-devel] " Eric Biggers
2020-03-20 18:44     ` Eric Biggers
2020-03-20 18:44     ` [f2fs-dev] " Eric Biggers
2020-03-20 18:44     ` [Ocfs2-devel] " Eric Biggers
2020-03-20 14:22 ` [PATCH v9 22/25] f2fs: Convert from readpages to readahead Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 18:51   ` Eric Biggers
2020-03-20 18:51     ` [Cluster-devel] " Eric Biggers
2020-03-20 18:51     ` Eric Biggers
2020-03-20 18:51     ` [f2fs-dev] " Eric Biggers
2020-03-20 18:51     ` [Ocfs2-devel] " Eric Biggers
2020-03-21 12:34   ` [f2fs-dev] " Chao Yu
2020-03-21 12:34     ` [Cluster-devel] " Chao Yu
2020-03-21 12:34     ` Chao Yu
2020-03-21 12:34     ` Chao Yu
2020-03-21 12:34     ` [Ocfs2-devel] " Chao Yu
2020-03-23  3:55   ` Jaegeuk Kim
2020-03-23  3:55     ` [Cluster-devel] " Jaegeuk Kim
2020-03-23  3:55     ` Jaegeuk Kim
2020-03-23  3:55     ` Jaegeuk Kim
2020-03-23  3:55     ` [Ocfs2-devel] " Jaegeuk Kim
2020-03-20 14:22 ` [PATCH v9 23/25] f2fs: Pass the inode to f2fs_mpage_readpages Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 18:52   ` Eric Biggers
2020-03-20 18:52     ` [Cluster-devel] " Eric Biggers
2020-03-20 18:52     ` Eric Biggers
2020-03-20 18:52     ` [f2fs-dev] " Eric Biggers
2020-03-20 18:52     ` [Ocfs2-devel] " Eric Biggers
2020-03-21 12:35   ` [f2fs-dev] " Chao Yu
2020-03-21 12:35     ` [Cluster-devel] " Chao Yu
2020-03-21 12:35     ` Chao Yu
2020-03-21 12:35     ` Chao Yu
2020-03-21 12:35     ` [Ocfs2-devel] " Chao Yu
2020-03-23  3:53   ` Jaegeuk Kim
2020-03-23  3:53     ` [Cluster-devel] " Jaegeuk Kim
2020-03-23  3:53     ` Jaegeuk Kim
2020-03-23  3:53     ` Jaegeuk Kim
2020-03-23  3:53     ` [Ocfs2-devel] " Jaegeuk Kim
2020-03-20 14:22 ` [PATCH v9 24/25] fuse: Convert from readpages to readahead Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox
2020-03-20 14:22 ` [PATCH v9 25/25] iomap: " Matthew Wilcox
2020-03-20 14:22   ` [Cluster-devel] " Matthew Wilcox
2020-03-20 14:22   ` Matthew Wilcox
2020-03-20 14:22   ` [f2fs-dev] " Matthew Wilcox
2020-03-20 14:22   ` [Ocfs2-devel] " Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200320142231.2402-11-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=cluster-devel@redhat.com \
    --cc=hch@lst.de \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-erofs@lists.ozlabs.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=ocfs2-devel@oss.oracle.com \
    --cc=william.kucharski@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.