All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	linux-btrfs@vger.kernel.org, linux-erofs@lists.ozlabs.org,
	linux-ext4@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	ocfs2-devel@oss.oracle.com, linux-xfs@vger.kernel.org,
	Dave Chinner <dchinner@redhat.com>,
	John Hubbard <jhubbard@nvidia.com>
Subject: [PATCH v7 02/24] mm: Return void from various readahead functions
Date: Wed, 19 Feb 2020 13:00:41 -0800	[thread overview]
Message-ID: <20200219210103.32400-3-willy@infradead.org> (raw)
In-Reply-To: <20200219210103.32400-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

ondemand_readahead has two callers, neither of which use the return value.
That means that both ra_submit and __do_page_cache_readahead() can return
void, and we don't need to worry that a present page in the readahead
window causes us to return a smaller nr_pages than we ought to have.

Similarly, no caller uses the return value from force_page_cache_readahead().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
---
 mm/fadvise.c   |  4 ----
 mm/internal.h  | 12 ++++++------
 mm/readahead.c | 31 +++++++++++++------------------
 3 files changed, 19 insertions(+), 28 deletions(-)

diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3efebfb9952c..0e66f2aaeea3 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -104,10 +104,6 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
 		if (!nrpages)
 			nrpages = ~0UL;
 
-		/*
-		 * Ignore return value because fadvise() shall return
-		 * success even if filesystem can't retrieve a hint,
-		 */
 		force_page_cache_readahead(mapping, file, start_index, nrpages);
 		break;
 	case POSIX_FADV_NOREUSE:
diff --git a/mm/internal.h b/mm/internal.h
index 83f353e74654..15aaebebd768 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -49,20 +49,20 @@ void unmap_page_range(struct mmu_gather *tlb,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
 
-int force_page_cache_readahead(struct address_space *, struct file *,
+void force_page_cache_readahead(struct address_space *, struct file *,
 		pgoff_t index, unsigned long nr_to_read);
-extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
-		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
+void __do_page_cache_readahead(struct address_space *, struct file *,
+		pgoff_t index, unsigned long nr_to_read,
 		unsigned long lookahead_size);
 
 /*
  * Submit IO for the read-ahead request in file_ra_state.
  */
-static inline unsigned long ra_submit(struct file_ra_state *ra,
+static inline void ra_submit(struct file_ra_state *ra,
 		struct address_space *mapping, struct file *filp)
 {
-	return __do_page_cache_readahead(mapping, filp,
-					ra->start, ra->size, ra->async_size);
+	__do_page_cache_readahead(mapping, filp,
+			ra->start, ra->size, ra->async_size);
 }
 
 /*
diff --git a/mm/readahead.c b/mm/readahead.c
index 2fe72cd29b47..41a592886da7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -149,10 +149,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
  * the pages first, then submits them for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
  * We really don't want to intermingle reads and writes like that.
- *
- * Returns the number of pages requested, or the maximum amount of I/O allowed.
  */
-unsigned int __do_page_cache_readahead(struct address_space *mapping,
+void __do_page_cache_readahead(struct address_space *mapping,
 		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 		unsigned long lookahead_size)
 {
@@ -166,7 +164,7 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
 
 	if (isize == 0)
-		goto out;
+		return;
 
 	end_index = ((isize - 1) >> PAGE_SHIFT);
 
@@ -211,23 +209,21 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	if (nr_pages)
 		read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
 	BUG_ON(!list_empty(&page_pool));
-out:
-	return nr_pages;
 }
 
 /*
  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
  * memory at once.
  */
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
-			       pgoff_t offset, unsigned long nr_to_read)
+void force_page_cache_readahead(struct address_space *mapping,
+		struct file *filp, pgoff_t offset, unsigned long nr_to_read)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	struct file_ra_state *ra = &filp->f_ra;
 	unsigned long max_pages;
 
 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
-		return -EINVAL;
+		return;
 
 	/*
 	 * If the request exceeds the readahead window, allow the read to
@@ -245,7 +241,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
 		offset += this_chunk;
 		nr_to_read -= this_chunk;
 	}
-	return 0;
 }
 
 /*
@@ -378,11 +373,10 @@ static int try_context_readahead(struct address_space *mapping,
 /*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
-static unsigned long
-ondemand_readahead(struct address_space *mapping,
-		   struct file_ra_state *ra, struct file *filp,
-		   bool hit_readahead_marker, pgoff_t offset,
-		   unsigned long req_size)
+static void ondemand_readahead(struct address_space *mapping,
+		struct file_ra_state *ra, struct file *filp,
+		bool hit_readahead_marker, pgoff_t offset,
+		unsigned long req_size)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages = ra->ra_pages;
@@ -428,7 +422,7 @@ ondemand_readahead(struct address_space *mapping,
 		rcu_read_unlock();
 
 		if (!start || start - offset > max_pages)
-			return 0;
+			return;
 
 		ra->start = start;
 		ra->size = start - offset;	/* old async_size */
@@ -464,7 +458,8 @@ ondemand_readahead(struct address_space *mapping,
 	 * standalone, small random read
 	 * Read as is, and do not pollute the readahead state.
 	 */
-	return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	__do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	return;
 
 initial_readahead:
 	ra->start = offset;
@@ -489,7 +484,7 @@ ondemand_readahead(struct address_space *mapping,
 		}
 	}
 
-	return ra_submit(ra, mapping, filp);
+	ra_submit(ra, mapping, filp);
 }
 
 /**
-- 
2.25.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	linux-btrfs@vger.kernel.org, linux-erofs@lists.ozlabs.org,
	linux-ext4@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	ocfs2-devel@oss.oracle.com, linux-xfs@vger.kernel.org,
	Dave Chinner <dchinner@redhat.com>,
	John Hubbard <jhubbard@nvidia.com>
Subject: [Ocfs2-devel] [PATCH v7 02/24] mm: Return void from various readahead functions
Date: Wed, 19 Feb 2020 13:00:41 -0800	[thread overview]
Message-ID: <20200219210103.32400-3-willy@infradead.org> (raw)
In-Reply-To: <20200219210103.32400-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

ondemand_readahead has two callers, neither of which use the return value.
That means that both ra_submit and __do_page_cache_readahead() can return
void, and we don't need to worry that a present page in the readahead
window causes us to return a smaller nr_pages than we ought to have.

Similarly, no caller uses the return value from force_page_cache_readahead().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
---
 mm/fadvise.c   |  4 ----
 mm/internal.h  | 12 ++++++------
 mm/readahead.c | 31 +++++++++++++------------------
 3 files changed, 19 insertions(+), 28 deletions(-)

diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3efebfb9952c..0e66f2aaeea3 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -104,10 +104,6 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
 		if (!nrpages)
 			nrpages = ~0UL;
 
-		/*
-		 * Ignore return value because fadvise() shall return
-		 * success even if filesystem can't retrieve a hint,
-		 */
 		force_page_cache_readahead(mapping, file, start_index, nrpages);
 		break;
 	case POSIX_FADV_NOREUSE:
diff --git a/mm/internal.h b/mm/internal.h
index 83f353e74654..15aaebebd768 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -49,20 +49,20 @@ void unmap_page_range(struct mmu_gather *tlb,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
 
-int force_page_cache_readahead(struct address_space *, struct file *,
+void force_page_cache_readahead(struct address_space *, struct file *,
 		pgoff_t index, unsigned long nr_to_read);
-extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
-		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
+void __do_page_cache_readahead(struct address_space *, struct file *,
+		pgoff_t index, unsigned long nr_to_read,
 		unsigned long lookahead_size);
 
 /*
  * Submit IO for the read-ahead request in file_ra_state.
  */
-static inline unsigned long ra_submit(struct file_ra_state *ra,
+static inline void ra_submit(struct file_ra_state *ra,
 		struct address_space *mapping, struct file *filp)
 {
-	return __do_page_cache_readahead(mapping, filp,
-					ra->start, ra->size, ra->async_size);
+	__do_page_cache_readahead(mapping, filp,
+			ra->start, ra->size, ra->async_size);
 }
 
 /*
diff --git a/mm/readahead.c b/mm/readahead.c
index 2fe72cd29b47..41a592886da7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -149,10 +149,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
  * the pages first, then submits them for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
  * We really don't want to intermingle reads and writes like that.
- *
- * Returns the number of pages requested, or the maximum amount of I/O allowed.
  */
-unsigned int __do_page_cache_readahead(struct address_space *mapping,
+void __do_page_cache_readahead(struct address_space *mapping,
 		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 		unsigned long lookahead_size)
 {
@@ -166,7 +164,7 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
 
 	if (isize == 0)
-		goto out;
+		return;
 
 	end_index = ((isize - 1) >> PAGE_SHIFT);
 
@@ -211,23 +209,21 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	if (nr_pages)
 		read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
 	BUG_ON(!list_empty(&page_pool));
-out:
-	return nr_pages;
 }
 
 /*
  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
  * memory at once.
  */
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
-			       pgoff_t offset, unsigned long nr_to_read)
+void force_page_cache_readahead(struct address_space *mapping,
+		struct file *filp, pgoff_t offset, unsigned long nr_to_read)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	struct file_ra_state *ra = &filp->f_ra;
 	unsigned long max_pages;
 
 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
-		return -EINVAL;
+		return;
 
 	/*
 	 * If the request exceeds the readahead window, allow the read to
@@ -245,7 +241,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
 		offset += this_chunk;
 		nr_to_read -= this_chunk;
 	}
-	return 0;
 }
 
 /*
@@ -378,11 +373,10 @@ static int try_context_readahead(struct address_space *mapping,
 /*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
-static unsigned long
-ondemand_readahead(struct address_space *mapping,
-		   struct file_ra_state *ra, struct file *filp,
-		   bool hit_readahead_marker, pgoff_t offset,
-		   unsigned long req_size)
+static void ondemand_readahead(struct address_space *mapping,
+		struct file_ra_state *ra, struct file *filp,
+		bool hit_readahead_marker, pgoff_t offset,
+		unsigned long req_size)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages = ra->ra_pages;
@@ -428,7 +422,7 @@ ondemand_readahead(struct address_space *mapping,
 		rcu_read_unlock();
 
 		if (!start || start - offset > max_pages)
-			return 0;
+			return;
 
 		ra->start = start;
 		ra->size = start - offset;	/* old async_size */
@@ -464,7 +458,8 @@ ondemand_readahead(struct address_space *mapping,
 	 * standalone, small random read
 	 * Read as is, and do not pollute the readahead state.
 	 */
-	return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	__do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	return;
 
 initial_readahead:
 	ra->start = offset;
@@ -489,7 +484,7 @@ ondemand_readahead(struct address_space *mapping,
 		}
 	}
 
-	return ra_submit(ra, mapping, filp);
+	ra_submit(ra, mapping, filp);
 }
 
 /**
-- 
2.25.0

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org
Cc: linux-xfs@vger.kernel.org, John Hubbard <jhubbard@nvidia.com>,
	linux-kernel@vger.kernel.org,
	"Matthew Wilcox \(Oracle\)" <willy@infradead.org>,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-mm@kvack.org, ocfs2-devel@oss.oracle.com,
	Dave Chinner <dchinner@redhat.com>,
	linux-ext4@vger.kernel.org, linux-erofs@lists.ozlabs.org,
	linux-btrfs@vger.kernel.org
Subject: [f2fs-dev] [PATCH v7 02/24] mm: Return void from various readahead functions
Date: Wed, 19 Feb 2020 13:00:41 -0800	[thread overview]
Message-ID: <20200219210103.32400-3-willy@infradead.org> (raw)
In-Reply-To: <20200219210103.32400-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

ondemand_readahead has two callers, neither of which use the return value.
That means that both ra_submit and __do_page_cache_readahead() can return
void, and we don't need to worry that a present page in the readahead
window causes us to return a smaller nr_pages than we ought to have.

Similarly, no caller uses the return value from force_page_cache_readahead().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
---
 mm/fadvise.c   |  4 ----
 mm/internal.h  | 12 ++++++------
 mm/readahead.c | 31 +++++++++++++------------------
 3 files changed, 19 insertions(+), 28 deletions(-)

diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3efebfb9952c..0e66f2aaeea3 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -104,10 +104,6 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
 		if (!nrpages)
 			nrpages = ~0UL;
 
-		/*
-		 * Ignore return value because fadvise() shall return
-		 * success even if filesystem can't retrieve a hint,
-		 */
 		force_page_cache_readahead(mapping, file, start_index, nrpages);
 		break;
 	case POSIX_FADV_NOREUSE:
diff --git a/mm/internal.h b/mm/internal.h
index 83f353e74654..15aaebebd768 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -49,20 +49,20 @@ void unmap_page_range(struct mmu_gather *tlb,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
 
-int force_page_cache_readahead(struct address_space *, struct file *,
+void force_page_cache_readahead(struct address_space *, struct file *,
 		pgoff_t index, unsigned long nr_to_read);
-extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
-		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
+void __do_page_cache_readahead(struct address_space *, struct file *,
+		pgoff_t index, unsigned long nr_to_read,
 		unsigned long lookahead_size);
 
 /*
  * Submit IO for the read-ahead request in file_ra_state.
  */
-static inline unsigned long ra_submit(struct file_ra_state *ra,
+static inline void ra_submit(struct file_ra_state *ra,
 		struct address_space *mapping, struct file *filp)
 {
-	return __do_page_cache_readahead(mapping, filp,
-					ra->start, ra->size, ra->async_size);
+	__do_page_cache_readahead(mapping, filp,
+			ra->start, ra->size, ra->async_size);
 }
 
 /*
diff --git a/mm/readahead.c b/mm/readahead.c
index 2fe72cd29b47..41a592886da7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -149,10 +149,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
  * the pages first, then submits them for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
  * We really don't want to intermingle reads and writes like that.
- *
- * Returns the number of pages requested, or the maximum amount of I/O allowed.
  */
-unsigned int __do_page_cache_readahead(struct address_space *mapping,
+void __do_page_cache_readahead(struct address_space *mapping,
 		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 		unsigned long lookahead_size)
 {
@@ -166,7 +164,7 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
 
 	if (isize == 0)
-		goto out;
+		return;
 
 	end_index = ((isize - 1) >> PAGE_SHIFT);
 
@@ -211,23 +209,21 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	if (nr_pages)
 		read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
 	BUG_ON(!list_empty(&page_pool));
-out:
-	return nr_pages;
 }
 
 /*
  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
  * memory at once.
  */
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
-			       pgoff_t offset, unsigned long nr_to_read)
+void force_page_cache_readahead(struct address_space *mapping,
+		struct file *filp, pgoff_t offset, unsigned long nr_to_read)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	struct file_ra_state *ra = &filp->f_ra;
 	unsigned long max_pages;
 
 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
-		return -EINVAL;
+		return;
 
 	/*
 	 * If the request exceeds the readahead window, allow the read to
@@ -245,7 +241,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
 		offset += this_chunk;
 		nr_to_read -= this_chunk;
 	}
-	return 0;
 }
 
 /*
@@ -378,11 +373,10 @@ static int try_context_readahead(struct address_space *mapping,
 /*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
-static unsigned long
-ondemand_readahead(struct address_space *mapping,
-		   struct file_ra_state *ra, struct file *filp,
-		   bool hit_readahead_marker, pgoff_t offset,
-		   unsigned long req_size)
+static void ondemand_readahead(struct address_space *mapping,
+		struct file_ra_state *ra, struct file *filp,
+		bool hit_readahead_marker, pgoff_t offset,
+		unsigned long req_size)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages = ra->ra_pages;
@@ -428,7 +422,7 @@ ondemand_readahead(struct address_space *mapping,
 		rcu_read_unlock();
 
 		if (!start || start - offset > max_pages)
-			return 0;
+			return;
 
 		ra->start = start;
 		ra->size = start - offset;	/* old async_size */
@@ -464,7 +458,8 @@ ondemand_readahead(struct address_space *mapping,
 	 * standalone, small random read
 	 * Read as is, and do not pollute the readahead state.
 	 */
-	return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	__do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	return;
 
 initial_readahead:
 	ra->start = offset;
@@ -489,7 +484,7 @@ ondemand_readahead(struct address_space *mapping,
 		}
 	}
 
-	return ra_submit(ra, mapping, filp);
+	ra_submit(ra, mapping, filp);
 }
 
 /**
-- 
2.25.0



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org
Cc: linux-xfs@vger.kernel.org, John Hubbard <jhubbard@nvidia.com>,
	linux-kernel@vger.kernel.org,
	"Matthew Wilcox \(Oracle\)" <willy@infradead.org>,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-mm@kvack.org, ocfs2-devel@oss.oracle.com,
	Dave Chinner <dchinner@redhat.com>,
	linux-ext4@vger.kernel.org, linux-erofs@lists.ozlabs.org,
	linux-btrfs@vger.kernel.org
Subject: [PATCH v7 02/24] mm: Return void from various readahead functions
Date: Wed, 19 Feb 2020 13:00:41 -0800	[thread overview]
Message-ID: <20200219210103.32400-3-willy@infradead.org> (raw)
In-Reply-To: <20200219210103.32400-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

ondemand_readahead has two callers, neither of which use the return value.
That means that both ra_submit and __do_page_cache_readahead() can return
void, and we don't need to worry that a present page in the readahead
window causes us to return a smaller nr_pages than we ought to have.

Similarly, no caller uses the return value from force_page_cache_readahead().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
---
 mm/fadvise.c   |  4 ----
 mm/internal.h  | 12 ++++++------
 mm/readahead.c | 31 +++++++++++++------------------
 3 files changed, 19 insertions(+), 28 deletions(-)

diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3efebfb9952c..0e66f2aaeea3 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -104,10 +104,6 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
 		if (!nrpages)
 			nrpages = ~0UL;
 
-		/*
-		 * Ignore return value because fadvise() shall return
-		 * success even if filesystem can't retrieve a hint,
-		 */
 		force_page_cache_readahead(mapping, file, start_index, nrpages);
 		break;
 	case POSIX_FADV_NOREUSE:
diff --git a/mm/internal.h b/mm/internal.h
index 83f353e74654..15aaebebd768 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -49,20 +49,20 @@ void unmap_page_range(struct mmu_gather *tlb,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
 
-int force_page_cache_readahead(struct address_space *, struct file *,
+void force_page_cache_readahead(struct address_space *, struct file *,
 		pgoff_t index, unsigned long nr_to_read);
-extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
-		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
+void __do_page_cache_readahead(struct address_space *, struct file *,
+		pgoff_t index, unsigned long nr_to_read,
 		unsigned long lookahead_size);
 
 /*
  * Submit IO for the read-ahead request in file_ra_state.
  */
-static inline unsigned long ra_submit(struct file_ra_state *ra,
+static inline void ra_submit(struct file_ra_state *ra,
 		struct address_space *mapping, struct file *filp)
 {
-	return __do_page_cache_readahead(mapping, filp,
-					ra->start, ra->size, ra->async_size);
+	__do_page_cache_readahead(mapping, filp,
+			ra->start, ra->size, ra->async_size);
 }
 
 /*
diff --git a/mm/readahead.c b/mm/readahead.c
index 2fe72cd29b47..41a592886da7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -149,10 +149,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
  * the pages first, then submits them for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
  * We really don't want to intermingle reads and writes like that.
- *
- * Returns the number of pages requested, or the maximum amount of I/O allowed.
  */
-unsigned int __do_page_cache_readahead(struct address_space *mapping,
+void __do_page_cache_readahead(struct address_space *mapping,
 		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 		unsigned long lookahead_size)
 {
@@ -166,7 +164,7 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
 
 	if (isize == 0)
-		goto out;
+		return;
 
 	end_index = ((isize - 1) >> PAGE_SHIFT);
 
@@ -211,23 +209,21 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	if (nr_pages)
 		read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
 	BUG_ON(!list_empty(&page_pool));
-out:
-	return nr_pages;
 }
 
 /*
  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
  * memory at once.
  */
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
-			       pgoff_t offset, unsigned long nr_to_read)
+void force_page_cache_readahead(struct address_space *mapping,
+		struct file *filp, pgoff_t offset, unsigned long nr_to_read)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	struct file_ra_state *ra = &filp->f_ra;
 	unsigned long max_pages;
 
 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
-		return -EINVAL;
+		return;
 
 	/*
 	 * If the request exceeds the readahead window, allow the read to
@@ -245,7 +241,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
 		offset += this_chunk;
 		nr_to_read -= this_chunk;
 	}
-	return 0;
 }
 
 /*
@@ -378,11 +373,10 @@ static int try_context_readahead(struct address_space *mapping,
 /*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
-static unsigned long
-ondemand_readahead(struct address_space *mapping,
-		   struct file_ra_state *ra, struct file *filp,
-		   bool hit_readahead_marker, pgoff_t offset,
-		   unsigned long req_size)
+static void ondemand_readahead(struct address_space *mapping,
+		struct file_ra_state *ra, struct file *filp,
+		bool hit_readahead_marker, pgoff_t offset,
+		unsigned long req_size)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages = ra->ra_pages;
@@ -428,7 +422,7 @@ ondemand_readahead(struct address_space *mapping,
 		rcu_read_unlock();
 
 		if (!start || start - offset > max_pages)
-			return 0;
+			return;
 
 		ra->start = start;
 		ra->size = start - offset;	/* old async_size */
@@ -464,7 +458,8 @@ ondemand_readahead(struct address_space *mapping,
 	 * standalone, small random read
 	 * Read as is, and do not pollute the readahead state.
 	 */
-	return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	__do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	return;
 
 initial_readahead:
 	ra->start = offset;
@@ -489,7 +484,7 @@ ondemand_readahead(struct address_space *mapping,
 		}
 	}
 
-	return ra_submit(ra, mapping, filp);
+	ra_submit(ra, mapping, filp);
 }
 
 /**
-- 
2.25.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [PATCH v7 02/24] mm: Return void from various readahead functions
Date: Wed, 19 Feb 2020 13:00:41 -0800	[thread overview]
Message-ID: <20200219210103.32400-3-willy@infradead.org> (raw)
In-Reply-To: <20200219210103.32400-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

ondemand_readahead has two callers, neither of which use the return value.
That means that both ra_submit and __do_page_cache_readahead() can return
void, and we don't need to worry that a present page in the readahead
window causes us to return a smaller nr_pages than we ought to have.

Similarly, no caller uses the return value from force_page_cache_readahead().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
---
 mm/fadvise.c   |  4 ----
 mm/internal.h  | 12 ++++++------
 mm/readahead.c | 31 +++++++++++++------------------
 3 files changed, 19 insertions(+), 28 deletions(-)

diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3efebfb9952c..0e66f2aaeea3 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -104,10 +104,6 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
 		if (!nrpages)
 			nrpages = ~0UL;
 
-		/*
-		 * Ignore return value because fadvise() shall return
-		 * success even if filesystem can't retrieve a hint,
-		 */
 		force_page_cache_readahead(mapping, file, start_index, nrpages);
 		break;
 	case POSIX_FADV_NOREUSE:
diff --git a/mm/internal.h b/mm/internal.h
index 83f353e74654..15aaebebd768 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -49,20 +49,20 @@ void unmap_page_range(struct mmu_gather *tlb,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
 
-int force_page_cache_readahead(struct address_space *, struct file *,
+void force_page_cache_readahead(struct address_space *, struct file *,
 		pgoff_t index, unsigned long nr_to_read);
-extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
-		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
+void __do_page_cache_readahead(struct address_space *, struct file *,
+		pgoff_t index, unsigned long nr_to_read,
 		unsigned long lookahead_size);
 
 /*
  * Submit IO for the read-ahead request in file_ra_state.
  */
-static inline unsigned long ra_submit(struct file_ra_state *ra,
+static inline void ra_submit(struct file_ra_state *ra,
 		struct address_space *mapping, struct file *filp)
 {
-	return __do_page_cache_readahead(mapping, filp,
-					ra->start, ra->size, ra->async_size);
+	__do_page_cache_readahead(mapping, filp,
+			ra->start, ra->size, ra->async_size);
 }
 
 /*
diff --git a/mm/readahead.c b/mm/readahead.c
index 2fe72cd29b47..41a592886da7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -149,10 +149,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
  * the pages first, then submits them for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
  * We really don't want to intermingle reads and writes like that.
- *
- * Returns the number of pages requested, or the maximum amount of I/O allowed.
  */
-unsigned int __do_page_cache_readahead(struct address_space *mapping,
+void __do_page_cache_readahead(struct address_space *mapping,
 		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 		unsigned long lookahead_size)
 {
@@ -166,7 +164,7 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
 
 	if (isize == 0)
-		goto out;
+		return;
 
 	end_index = ((isize - 1) >> PAGE_SHIFT);
 
@@ -211,23 +209,21 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
 	if (nr_pages)
 		read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
 	BUG_ON(!list_empty(&page_pool));
-out:
-	return nr_pages;
 }
 
 /*
  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
  * memory at once.
  */
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
-			       pgoff_t offset, unsigned long nr_to_read)
+void force_page_cache_readahead(struct address_space *mapping,
+		struct file *filp, pgoff_t offset, unsigned long nr_to_read)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	struct file_ra_state *ra = &filp->f_ra;
 	unsigned long max_pages;
 
 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
-		return -EINVAL;
+		return;
 
 	/*
 	 * If the request exceeds the readahead window, allow the read to
@@ -245,7 +241,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
 		offset += this_chunk;
 		nr_to_read -= this_chunk;
 	}
-	return 0;
 }
 
 /*
@@ -378,11 +373,10 @@ static int try_context_readahead(struct address_space *mapping,
 /*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
-static unsigned long
-ondemand_readahead(struct address_space *mapping,
-		   struct file_ra_state *ra, struct file *filp,
-		   bool hit_readahead_marker, pgoff_t offset,
-		   unsigned long req_size)
+static void ondemand_readahead(struct address_space *mapping,
+		struct file_ra_state *ra, struct file *filp,
+		bool hit_readahead_marker, pgoff_t offset,
+		unsigned long req_size)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages = ra->ra_pages;
@@ -428,7 +422,7 @@ ondemand_readahead(struct address_space *mapping,
 		rcu_read_unlock();
 
 		if (!start || start - offset > max_pages)
-			return 0;
+			return;
 
 		ra->start = start;
 		ra->size = start - offset;	/* old async_size */
@@ -464,7 +458,8 @@ ondemand_readahead(struct address_space *mapping,
 	 * standalone, small random read
 	 * Read as is, and do not pollute the readahead state.
 	 */
-	return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	__do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+	return;
 
 initial_readahead:
 	ra->start = offset;
@@ -489,7 +484,7 @@ ondemand_readahead(struct address_space *mapping,
 		}
 	}
 
-	return ra_submit(ra, mapping, filp);
+	ra_submit(ra, mapping, filp);
 }
 
 /**
-- 
2.25.0




  parent reply	other threads:[~2020-02-19 21:01 UTC|newest]

Thread overview: 385+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-19 21:00 [PATCH v7 00/23] Change readahead API Matthew Wilcox
2020-02-19 21:00 ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00 ` Matthew Wilcox
2020-02-19 21:00 ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00 ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 01/24] mm: Move readahead prototypes from mm.h Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  2:43   ` John Hubbard
2020-02-21  2:43     ` [Cluster-devel] " John Hubbard
2020-02-21  2:43     ` John Hubbard
2020-02-21  2:43     ` [f2fs-dev] " John Hubbard
2020-02-21  2:43     ` [Ocfs2-devel] " John Hubbard
2020-02-21 21:48     ` Matthew Wilcox
2020-02-21 21:48       ` [Cluster-devel] " Matthew Wilcox
2020-02-21 21:48       ` Matthew Wilcox
2020-02-21 21:48       ` [f2fs-dev] " Matthew Wilcox
2020-02-21 21:48       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-22  0:15       ` John Hubbard
2020-02-22  0:15         ` [Cluster-devel] " John Hubbard
2020-02-22  0:15         ` John Hubbard
2020-02-22  0:15         ` [f2fs-dev] " John Hubbard
2020-02-22  0:15         ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:32   ` Christoph Hellwig
2020-02-24 21:32     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:32     ` Christoph Hellwig
2020-02-24 21:32     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:32     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` Matthew Wilcox [this message]
2020-02-19 21:00   ` [Cluster-devel] [PATCH v7 02/24] mm: Return void from various readahead functions Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:33   ` Christoph Hellwig
2020-02-24 21:33     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:33     ` Christoph Hellwig
2020-02-24 21:33     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:33     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 03/24] mm: Ignore return value of ->readpages Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 04/24] mm: Move readahead nr_pages check into read_pages Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 14:36   ` Zi Yan
2020-02-20 14:36     ` [Cluster-devel] " Zi Yan
2020-02-20 14:36     ` Zi Yan
2020-02-20 14:36     ` [Ocfs2-devel] " Zi Yan
2020-02-21  4:24   ` John Hubbard
2020-02-21  4:24     ` [Cluster-devel] " John Hubbard
2020-02-21  4:24     ` John Hubbard
2020-02-21  4:24     ` [f2fs-dev] " John Hubbard
2020-02-21  4:24     ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:34   ` Christoph Hellwig
2020-02-24 21:34     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:34     ` Christoph Hellwig
2020-02-24 21:34     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:34     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 05/24] mm: Use readahead_control to pass arguments Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:36   ` Christoph Hellwig
2020-02-24 21:36     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:36     ` Christoph Hellwig
2020-02-24 21:36     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:36     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 06/24] mm: Rename various 'offset' parameters to 'index' Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  2:21   ` John Hubbard
2020-02-21  2:21     ` [Cluster-devel] " John Hubbard
2020-02-21  2:21     ` John Hubbard
2020-02-21  2:21     ` [f2fs-dev] " John Hubbard
2020-02-21  2:21     ` [Ocfs2-devel] " John Hubbard
2020-02-21  3:27   ` John Hubbard
2020-02-21  3:27     ` [Cluster-devel] " John Hubbard
2020-02-21  3:27     ` John Hubbard
2020-02-21  3:27     ` [f2fs-dev] " John Hubbard
2020-02-21  3:27     ` [Ocfs2-devel] " John Hubbard
2020-02-19 21:00 ` [PATCH v7 07/24] mm: rename readahead loop variable to 'i' Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 08/24] mm: Remove 'page_offset' from readahead loop Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  2:48   ` John Hubbard
2020-02-21  2:48     ` [Cluster-devel] " John Hubbard
2020-02-21  2:48     ` John Hubbard
2020-02-21  2:48     ` [f2fs-dev] " John Hubbard
2020-02-21  2:48     ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:37   ` Christoph Hellwig
2020-02-24 21:37     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:37     ` Christoph Hellwig
2020-02-24 21:37     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:37     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 09/24] mm: Put readahead pages in cache earlier Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  3:19   ` John Hubbard
2020-02-21  3:19     ` [Cluster-devel] " John Hubbard
2020-02-21  3:19     ` John Hubbard
2020-02-21  3:19     ` [f2fs-dev] " John Hubbard
2020-02-21  3:19     ` [Ocfs2-devel] " John Hubbard
2020-02-21  3:43     ` Matthew Wilcox
2020-02-21  3:43       ` [Cluster-devel] " Matthew Wilcox
2020-02-21  3:43       ` Matthew Wilcox
2020-02-21  3:43       ` [f2fs-dev] " Matthew Wilcox
2020-02-21  3:43       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  4:19       ` John Hubbard
2020-02-21  4:19         ` [Cluster-devel] " John Hubbard
2020-02-21  4:19         ` John Hubbard
2020-02-21  4:19         ` [f2fs-dev] " John Hubbard
2020-02-21  4:19         ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:40   ` Christoph Hellwig
2020-02-24 21:40     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:40     ` Christoph Hellwig
2020-02-24 21:40     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:40     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 10/24] mm: Add readahead address space operation Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:00   ` Zi Yan
2020-02-20 15:00     ` [Cluster-devel] " Zi Yan
2020-02-20 15:00     ` Zi Yan
2020-02-20 15:00     ` [Ocfs2-devel] " Zi Yan
2020-02-20 15:10     ` Matthew Wilcox
2020-02-20 15:10       ` [Cluster-devel] " Matthew Wilcox
2020-02-20 15:10       ` Matthew Wilcox
2020-02-20 15:10       ` [f2fs-dev] " Matthew Wilcox
2020-02-20 15:10       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  4:30   ` John Hubbard
2020-02-21  4:30     ` [Cluster-devel] " John Hubbard
2020-02-21  4:30     ` John Hubbard
2020-02-21  4:30     ` [f2fs-dev] " John Hubbard
2020-02-21  4:30     ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:41   ` Christoph Hellwig
2020-02-24 21:41     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:41     ` Christoph Hellwig
2020-02-24 21:41     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:41     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 11/24] mm: Move end_index check out of readahead loop Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  3:50   ` John Hubbard
2020-02-21  3:50     ` [Cluster-devel] " John Hubbard
2020-02-21  3:50     ` John Hubbard
2020-02-21  3:50     ` [f2fs-dev] " John Hubbard
2020-02-21  3:50     ` [Ocfs2-devel] " John Hubbard
2020-02-21 15:35     ` Matthew Wilcox
2020-02-21 15:35       ` [Cluster-devel] " Matthew Wilcox
2020-02-21 15:35       ` Matthew Wilcox
2020-02-21 15:35       ` [f2fs-dev] " Matthew Wilcox
2020-02-21 15:35       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21 19:41       ` John Hubbard
2020-02-21 19:41         ` [Cluster-devel] " John Hubbard
2020-02-21 19:41         ` John Hubbard
2020-02-21 19:41         ` [f2fs-dev] " John Hubbard
2020-02-21 19:41         ` [Ocfs2-devel] " John Hubbard
2020-02-19 21:00 ` [PATCH v7 12/24] mm: Add page_cache_readahead_unbounded Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:53   ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:53     ` Christoph Hellwig
2020-02-24 21:53     ` Christoph Hellwig
2020-02-24 21:53     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:53     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 13/24] fs: Convert mpage_readpages to mpage_readahead Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:54   ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:54     ` Christoph Hellwig
2020-02-24 21:54     ` Christoph Hellwig
2020-02-24 21:54     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:54     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 14/24] btrfs: Convert from readpages to readahead Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20  9:42   ` Johannes Thumshirn
2020-02-20  9:42     ` [Cluster-devel] " Johannes Thumshirn
2020-02-20  9:42     ` Johannes Thumshirn
2020-02-20  9:42     ` [f2fs-dev] " Johannes Thumshirn
2020-02-20  9:42     ` Johannes Thumshirn
2020-02-20  9:42     ` [Ocfs2-devel] " Johannes Thumshirn
2020-02-20 13:48     ` Matthew Wilcox
2020-02-20 13:48       ` [Cluster-devel] " Matthew Wilcox
2020-02-20 13:48       ` Matthew Wilcox
2020-02-20 13:48       ` [f2fs-dev] " Matthew Wilcox
2020-02-20 13:48       ` Matthew Wilcox
2020-02-20 13:48       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:46       ` Christoph Hellwig
2020-02-20 15:46         ` [Cluster-devel] " Christoph Hellwig
2020-02-20 15:46         ` Christoph Hellwig
2020-02-20 15:46         ` [f2fs-dev] " Christoph Hellwig
2020-02-20 15:46         ` Christoph Hellwig
2020-02-20 15:46         ` [Ocfs2-devel] " Christoph Hellwig
2020-02-20 15:54         ` Matthew Wilcox
2020-02-20 15:54           ` [Cluster-devel] " Matthew Wilcox
2020-02-20 15:54           ` Matthew Wilcox
2020-02-20 15:54           ` [f2fs-dev] " Matthew Wilcox
2020-02-20 15:54           ` Matthew Wilcox
2020-02-20 15:54           ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:57           ` Christoph Hellwig
2020-02-20 15:57             ` [Cluster-devel] " Christoph Hellwig
2020-02-20 15:57             ` Christoph Hellwig
2020-02-20 15:57             ` [f2fs-dev] " Christoph Hellwig
2020-02-20 15:57             ` Christoph Hellwig
2020-02-20 15:57             ` [Ocfs2-devel] " Christoph Hellwig
2020-02-24 21:43             ` Christoph Hellwig
2020-02-24 21:43               ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:43               ` Christoph Hellwig
2020-02-24 21:43               ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:43               ` Christoph Hellwig
2020-02-24 21:43               ` [Ocfs2-devel] " Christoph Hellwig
2020-02-24 21:54               ` Matthew Wilcox
2020-02-24 21:54                 ` [Cluster-devel] " Matthew Wilcox
2020-02-24 21:54                 ` Matthew Wilcox
2020-02-24 21:54                 ` [f2fs-dev] " Matthew Wilcox
2020-02-24 21:54                 ` Matthew Wilcox
2020-02-24 21:54                 ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:57                 ` Christoph Hellwig
2020-02-24 21:57                   ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:57                   ` Christoph Hellwig
2020-02-24 21:57                   ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:57                   ` Christoph Hellwig
2020-02-24 21:57                   ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 15/24] erofs: Convert uncompressed files " Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 16/24] erofs: Convert compressed " Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 17/24] ext4: Convert " Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 18/24] ext4: Pass the inode to ext4_mpage_readpages Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 19/24] f2fs: Convert from readpages to readahead Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 20/24] fuse: " Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:01 ` [PATCH v7 21/24] iomap: Restructure iomap_readpages_actor Matthew Wilcox
2020-02-19 21:01   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:01   ` Matthew Wilcox
2020-02-19 21:01   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:01   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:47   ` Christoph Hellwig
2020-02-20 15:47     ` [Cluster-devel] " Christoph Hellwig
2020-02-20 15:47     ` Christoph Hellwig
2020-02-20 15:47     ` [f2fs-dev] " Christoph Hellwig
2020-02-20 15:47     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-20 16:24     ` Matthew Wilcox
2020-02-20 16:24       ` [Cluster-devel] " Matthew Wilcox
2020-02-20 16:24       ` Matthew Wilcox
2020-02-20 16:24       ` [f2fs-dev] " Matthew Wilcox
2020-02-20 16:24       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 22:17       ` Christoph Hellwig
2020-02-24 22:17         ` [Cluster-devel] " Christoph Hellwig
2020-02-24 22:17         ` Christoph Hellwig
2020-02-24 22:17         ` [f2fs-dev] " Christoph Hellwig
2020-02-24 22:17         ` [Ocfs2-devel] " Christoph Hellwig
2020-02-25  1:49         ` Matthew Wilcox
2020-02-25  1:49           ` [Cluster-devel] " Matthew Wilcox
2020-02-25  1:49           ` Matthew Wilcox
2020-02-25  1:49           ` [f2fs-dev] " Matthew Wilcox
2020-02-25  1:49           ` [Ocfs2-devel] " Matthew Wilcox
2020-02-22  0:44   ` Darrick J. Wong
2020-02-22  0:44     ` [Cluster-devel] " Darrick J. Wong
2020-02-22  0:44     ` Darrick J. Wong
2020-02-22  0:44     ` [f2fs-dev] " Darrick J. Wong
2020-02-22  0:44     ` [Ocfs2-devel] " Darrick J. Wong
2020-02-22  1:54     ` Matthew Wilcox
2020-02-22  1:54       ` [Cluster-devel] " Matthew Wilcox
2020-02-22  1:54       ` Matthew Wilcox
2020-02-22  1:54       ` [f2fs-dev] " Matthew Wilcox
2020-02-22  1:54       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-23 17:55       ` Darrick J. Wong
2020-02-23 17:55         ` [Cluster-devel] " Darrick J. Wong
2020-02-23 17:55         ` Darrick J. Wong
2020-02-23 17:55         ` [f2fs-dev] " Darrick J. Wong
2020-02-23 17:55         ` [Ocfs2-devel] " Darrick J. Wong
2020-02-19 21:01 ` [PATCH v7 22/24] iomap: Convert from readpages to readahead Matthew Wilcox
2020-02-19 21:01   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:01   ` Matthew Wilcox
2020-02-19 21:01   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:01   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:49   ` Christoph Hellwig
2020-02-20 15:49     ` [Cluster-devel] " Christoph Hellwig
2020-02-20 15:49     ` Christoph Hellwig
2020-02-20 15:49     ` [f2fs-dev] " Christoph Hellwig
2020-02-20 15:49     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-20 16:57     ` Matthew Wilcox
2020-02-20 16:57       ` [Cluster-devel] " Matthew Wilcox
2020-02-20 16:57       ` Matthew Wilcox
2020-02-20 16:57       ` [f2fs-dev] " Matthew Wilcox
2020-02-20 16:57       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-22  1:00       ` Darrick J. Wong
2020-02-22  1:00         ` [Cluster-devel] " Darrick J. Wong
2020-02-22  1:00         ` Darrick J. Wong
2020-02-22  1:00         ` [f2fs-dev] " Darrick J. Wong
2020-02-22  1:00         ` [Ocfs2-devel] " Darrick J. Wong
2020-02-24  4:33         ` Matthew Wilcox
2020-02-24  4:33           ` [Cluster-devel] " Matthew Wilcox
2020-02-24  4:33           ` Matthew Wilcox
2020-02-24  4:33           ` [f2fs-dev] " Matthew Wilcox
2020-02-24  4:33           ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 16:52           ` Darrick J. Wong
2020-02-24 16:52             ` [Cluster-devel] " Darrick J. Wong
2020-02-24 16:52             ` Darrick J. Wong
2020-02-24 16:52             ` [f2fs-dev] " Darrick J. Wong
2020-02-24 16:52             ` [Ocfs2-devel] " Darrick J. Wong
2020-02-22  1:03   ` Darrick J. Wong
2020-02-22  1:03     ` [Cluster-devel] " Darrick J. Wong
2020-02-22  1:03     ` Darrick J. Wong
2020-02-22  1:03     ` [f2fs-dev] " Darrick J. Wong
2020-02-22  1:03     ` [Ocfs2-devel] " Darrick J. Wong
2020-02-22  1:09     ` Matthew Wilcox
2020-02-22  1:09       ` [Cluster-devel] " Matthew Wilcox
2020-02-22  1:09       ` Matthew Wilcox
2020-02-22  1:09       ` [f2fs-dev] " Matthew Wilcox
2020-02-22  1:09       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:01 ` [PATCH v7 23/24] mm: Document why we don't set PageReadahead Matthew Wilcox
2020-02-19 21:01   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:01   ` Matthew Wilcox
2020-02-19 21:01   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:01   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:01 ` [PATCH v7 24/24] mm: Use memalloc_nofs_save in readahead path Matthew Wilcox
2020-02-19 21:01   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:01   ` Matthew Wilcox
2020-02-19 21:01   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:01   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 17:54 ` [PATCH v7 00/23] Change readahead API David Sterba
2020-02-20 17:54   ` [Cluster-devel] " David Sterba
2020-02-20 17:54   ` David Sterba
2020-02-20 17:54   ` [f2fs-dev] " David Sterba
2020-02-20 17:54   ` [Ocfs2-devel] " David Sterba
2020-02-20 22:39   ` Matthew Wilcox
2020-02-20 22:39     ` [Cluster-devel] " Matthew Wilcox
2020-02-20 22:39     ` [f2fs-dev] " Matthew Wilcox
2020-02-20 22:39     ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21 11:59     ` David Sterba
2020-02-21 11:59       ` [Cluster-devel] " David Sterba
2020-02-21 11:59       ` David Sterba
2020-02-21 11:59       ` [f2fs-dev] " David Sterba
2020-02-21 11:59       ` [Ocfs2-devel] " David Sterba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200219210103.32400-3-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=cluster-devel@redhat.com \
    --cc=dchinner@redhat.com \
    --cc=jhubbard@nvidia.com \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-erofs@lists.ozlabs.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=ocfs2-devel@oss.oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.