All of lore.kernel.org
 help / color / mirror / Atom feed
From: John Hubbard <jhubbard@nvidia.com>
To: Matthew Wilcox <willy@infradead.org>, <linux-fsdevel@vger.kernel.org>
Cc: <linux-mm@kvack.org>, <linux-kernel@vger.kernel.org>,
	<linux-btrfs@vger.kernel.org>, <linux-erofs@lists.ozlabs.org>,
	<linux-ext4@vger.kernel.org>,
	<linux-f2fs-devel@lists.sourceforge.net>,
	<cluster-devel@redhat.com>, <ocfs2-devel@oss.oracle.com>,
	<linux-xfs@vger.kernel.org>
Subject: Re: [PATCH v7 09/24] mm: Put readahead pages in cache earlier
Date: Thu, 20 Feb 2020 19:19:58 -0800	[thread overview]
Message-ID: <5691442b-56c7-7b0d-d91b-275be52abb42@nvidia.com> (raw)
In-Reply-To: <20200219210103.32400-10-willy@infradead.org>

On 2/19/20 1:00 PM, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> When populating the page cache for readahead, mappings that use
> ->readpages must populate the page cache themselves as the pages are
> passed on a linked list which would normally be used for the page cache's
> LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
> we can put the pages into the page cache as soon as they're allocated,
> which solves a race between readahead and direct IO.  It also lets us
> remove the gfp argument from read_pages().
> 
> Use the new readahead_page() API to implement the repeated calls to
> ->readpage(), just like most filesystems will.  This iterator also
> supports huge pages, even though none of the filesystems have been
> converted to use them yet.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/pagemap.h | 20 +++++++++++++++++
>  mm/readahead.c          | 48 +++++++++++++++++++++++++----------------
>  2 files changed, 49 insertions(+), 19 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 55fcea0249e6..4989d330fada 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -647,8 +647,28 @@ struct readahead_control {
>  /* private: use the readahead_* accessors instead */
>  	pgoff_t _index;
>  	unsigned int _nr_pages;
> +	unsigned int _batch_count;
>  };
>  
> +static inline struct page *readahead_page(struct readahead_control *rac)
> +{
> +	struct page *page;
> +
> +	BUG_ON(rac->_batch_count > rac->_nr_pages);
> +	rac->_nr_pages -= rac->_batch_count;
> +	rac->_index += rac->_batch_count;
> +	rac->_batch_count = 0;


Is it intentional, to set rac->_batch_count twice (here, and below)? The
only reason I can see is if a caller needs to use ->_batch_count in the
"return NULL" case, which doesn't seem to come up...


> +
> +	if (!rac->_nr_pages)
> +		return NULL;
> +
> +	page = xa_load(&rac->mapping->i_pages, rac->_index);
> +	VM_BUG_ON_PAGE(!PageLocked(page), page);
> +	rac->_batch_count = hpage_nr_pages(page);
> +
> +	return page;
> +}
> +
>  /* The number of pages in this readahead block */
>  static inline unsigned int readahead_count(struct readahead_control *rac)
>  {
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 83df5c061d33..aaa209559ba2 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -113,15 +113,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
>  
>  EXPORT_SYMBOL(read_cache_pages);
>  
> -static void read_pages(struct readahead_control *rac, struct list_head *pages,
> -		gfp_t gfp)
> +static void read_pages(struct readahead_control *rac, struct list_head *pages)
>  {
>  	const struct address_space_operations *aops = rac->mapping->a_ops;
> +	struct page *page;
>  	struct blk_plug plug;
> -	unsigned page_idx;
>  
>  	if (!readahead_count(rac))
> -		return;
> +		goto out;
>  
>  	blk_start_plug(&plug);
>  
> @@ -130,23 +129,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
>  				readahead_count(rac));
>  		/* Clean up the remaining pages */
>  		put_pages_list(pages);
> -		goto out;
> -	}
> -
> -	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
> -		struct page *page = lru_to_page(pages);
> -		list_del(&page->lru);
> -		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
> -				gfp))
> +		rac->_index += rac->_nr_pages;
> +		rac->_nr_pages = 0;
> +	} else {
> +		while ((page = readahead_page(rac))) {
>  			aops->readpage(rac->file, page);
> -		put_page(page);
> +			put_page(page);
> +		}
>  	}
>  
> -out:
>  	blk_finish_plug(&plug);
>  
>  	BUG_ON(!list_empty(pages));
> -	rac->_nr_pages = 0;
> +	BUG_ON(readahead_count(rac));
> +
> +out:
> +	/* If we were called due to a conflicting page, skip over it */


Tiny documentation nit: What if we were *not* called due to a conflicting page? 
(And what is a "conflicting page", in this context, btw?) The next line unconditionally 
moves the index ahead, so the "if" part of the comment really confuses me.


> +	rac->_index++;
>  }
>  
>  /*
> @@ -165,9 +164,11 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	LIST_HEAD(page_pool);
>  	loff_t isize = i_size_read(inode);
>  	gfp_t gfp_mask = readahead_gfp_mask(mapping);
> +	bool use_list = mapping->a_ops->readpages;
>  	struct readahead_control rac = {
>  		.mapping = mapping,
>  		.file = filp,
> +		._index = index,
>  		._nr_pages = 0,
>  	};
>  	unsigned long i;
> @@ -184,6 +185,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  		if (index + i > end_index)
>  			break;
>  
> +		BUG_ON(index + i != rac._index + rac._nr_pages);
> +
>  		page = xa_load(&mapping->i_pages, index + i);
>  		if (page && !xa_is_value(page)) {
>  			/*
> @@ -191,15 +194,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  			 * contiguous pages before continuing with the next
>  			 * batch.
>  			 */
> -			read_pages(&rac, &page_pool, gfp_mask);
> +			read_pages(&rac, &page_pool);
>  			continue;
>  		}
>  
>  		page = __page_cache_alloc(gfp_mask);
>  		if (!page)
>  			break;
> -		page->index = index + i;
> -		list_add(&page->lru, &page_pool);
> +		if (use_list) {
> +			page->index = index + i;
> +			list_add(&page->lru, &page_pool);
> +		} else if (add_to_page_cache_lru(page, mapping, index + i,
> +					gfp_mask) < 0) {


I still think you'll want to compare against !=0, rather than < 0, here.


> +			put_page(page);
> +			read_pages(&rac, &page_pool);


Doing a read_pages() in the error case is because...actually, I'm not sure yet.
Why do we do this? Effectively it's a retry?


> +			continue;
> +		}
>  		if (i == nr_to_read - lookahead_size)
>  			SetPageReadahead(page);
>  		rac._nr_pages++;
> @@ -210,7 +220,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	 * uptodate then the caller will launch readpage again, and
>  	 * will then handle the error.
>  	 */
> -	read_pages(&rac, &page_pool, gfp_mask);
> +	read_pages(&rac, &page_pool);
>  }
>  
>  /*
> 

Didn't spot any actual errors, just mainly my own questions here. :)


thanks,
-- 
John Hubbard
NVIDIA

WARNING: multiple messages have this Message-ID (diff)
From: John Hubbard <jhubbard@nvidia.com>
To: Matthew Wilcox <willy@infradead.org>, linux-fsdevel@vger.kernel.org
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	linux-btrfs@vger.kernel.org, linux-erofs@lists.ozlabs.org,
	linux-ext4@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	ocfs2-devel@oss.oracle.com, linux-xfs@vger.kernel.org
Subject: [Ocfs2-devel] [PATCH v7 09/24] mm: Put readahead pages in cache earlier
Date: Thu, 20 Feb 2020 19:19:58 -0800	[thread overview]
Message-ID: <5691442b-56c7-7b0d-d91b-275be52abb42@nvidia.com> (raw)
In-Reply-To: <20200219210103.32400-10-willy@infradead.org>

On 2/19/20 1:00 PM, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> When populating the page cache for readahead, mappings that use
> ->readpages must populate the page cache themselves as the pages are
> passed on a linked list which would normally be used for the page cache's
> LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
> we can put the pages into the page cache as soon as they're allocated,
> which solves a race between readahead and direct IO.  It also lets us
> remove the gfp argument from read_pages().
> 
> Use the new readahead_page() API to implement the repeated calls to
> ->readpage(), just like most filesystems will.  This iterator also
> supports huge pages, even though none of the filesystems have been
> converted to use them yet.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/pagemap.h | 20 +++++++++++++++++
>  mm/readahead.c          | 48 +++++++++++++++++++++++++----------------
>  2 files changed, 49 insertions(+), 19 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 55fcea0249e6..4989d330fada 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -647,8 +647,28 @@ struct readahead_control {
>  /* private: use the readahead_* accessors instead */
>  	pgoff_t _index;
>  	unsigned int _nr_pages;
> +	unsigned int _batch_count;
>  };
>  
> +static inline struct page *readahead_page(struct readahead_control *rac)
> +{
> +	struct page *page;
> +
> +	BUG_ON(rac->_batch_count > rac->_nr_pages);
> +	rac->_nr_pages -= rac->_batch_count;
> +	rac->_index += rac->_batch_count;
> +	rac->_batch_count = 0;


Is it intentional, to set rac->_batch_count twice (here, and below)? The
only reason I can see is if a caller needs to use ->_batch_count in the
"return NULL" case, which doesn't seem to come up...


> +
> +	if (!rac->_nr_pages)
> +		return NULL;
> +
> +	page = xa_load(&rac->mapping->i_pages, rac->_index);
> +	VM_BUG_ON_PAGE(!PageLocked(page), page);
> +	rac->_batch_count = hpage_nr_pages(page);
> +
> +	return page;
> +}
> +
>  /* The number of pages in this readahead block */
>  static inline unsigned int readahead_count(struct readahead_control *rac)
>  {
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 83df5c061d33..aaa209559ba2 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -113,15 +113,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
>  
>  EXPORT_SYMBOL(read_cache_pages);
>  
> -static void read_pages(struct readahead_control *rac, struct list_head *pages,
> -		gfp_t gfp)
> +static void read_pages(struct readahead_control *rac, struct list_head *pages)
>  {
>  	const struct address_space_operations *aops = rac->mapping->a_ops;
> +	struct page *page;
>  	struct blk_plug plug;
> -	unsigned page_idx;
>  
>  	if (!readahead_count(rac))
> -		return;
> +		goto out;
>  
>  	blk_start_plug(&plug);
>  
> @@ -130,23 +129,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
>  				readahead_count(rac));
>  		/* Clean up the remaining pages */
>  		put_pages_list(pages);
> -		goto out;
> -	}
> -
> -	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
> -		struct page *page = lru_to_page(pages);
> -		list_del(&page->lru);
> -		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
> -				gfp))
> +		rac->_index += rac->_nr_pages;
> +		rac->_nr_pages = 0;
> +	} else {
> +		while ((page = readahead_page(rac))) {
>  			aops->readpage(rac->file, page);
> -		put_page(page);
> +			put_page(page);
> +		}
>  	}
>  
> -out:
>  	blk_finish_plug(&plug);
>  
>  	BUG_ON(!list_empty(pages));
> -	rac->_nr_pages = 0;
> +	BUG_ON(readahead_count(rac));
> +
> +out:
> +	/* If we were called due to a conflicting page, skip over it */


Tiny documentation nit: What if we were *not* called due to a conflicting page? 
(And what is a "conflicting page", in this context, btw?) The next line unconditionally 
moves the index ahead, so the "if" part of the comment really confuses me.


> +	rac->_index++;
>  }
>  
>  /*
> @@ -165,9 +164,11 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	LIST_HEAD(page_pool);
>  	loff_t isize = i_size_read(inode);
>  	gfp_t gfp_mask = readahead_gfp_mask(mapping);
> +	bool use_list = mapping->a_ops->readpages;
>  	struct readahead_control rac = {
>  		.mapping = mapping,
>  		.file = filp,
> +		._index = index,
>  		._nr_pages = 0,
>  	};
>  	unsigned long i;
> @@ -184,6 +185,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  		if (index + i > end_index)
>  			break;
>  
> +		BUG_ON(index + i != rac._index + rac._nr_pages);
> +
>  		page = xa_load(&mapping->i_pages, index + i);
>  		if (page && !xa_is_value(page)) {
>  			/*
> @@ -191,15 +194,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  			 * contiguous pages before continuing with the next
>  			 * batch.
>  			 */
> -			read_pages(&rac, &page_pool, gfp_mask);
> +			read_pages(&rac, &page_pool);
>  			continue;
>  		}
>  
>  		page = __page_cache_alloc(gfp_mask);
>  		if (!page)
>  			break;
> -		page->index = index + i;
> -		list_add(&page->lru, &page_pool);
> +		if (use_list) {
> +			page->index = index + i;
> +			list_add(&page->lru, &page_pool);
> +		} else if (add_to_page_cache_lru(page, mapping, index + i,
> +					gfp_mask) < 0) {


I still think you'll want to compare against !=0, rather than < 0, here.


> +			put_page(page);
> +			read_pages(&rac, &page_pool);


Doing a read_pages() in the error case is because...actually, I'm not sure yet.
Why do we do this? Effectively it's a retry?


> +			continue;
> +		}
>  		if (i == nr_to_read - lookahead_size)
>  			SetPageReadahead(page);
>  		rac._nr_pages++;
> @@ -210,7 +220,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	 * uptodate then the caller will launch readpage again, and
>  	 * will then handle the error.
>  	 */
> -	read_pages(&rac, &page_pool, gfp_mask);
> +	read_pages(&rac, &page_pool);
>  }
>  
>  /*
> 

Didn't spot any actual errors, just mainly my own questions here. :)


thanks,
-- 
John Hubbard
NVIDIA

WARNING: multiple messages have this Message-ID (diff)
From: John Hubbard <jhubbard@nvidia.com>
To: Matthew Wilcox <willy@infradead.org>, <linux-fsdevel@vger.kernel.org>
Cc: linux-xfs@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-mm@kvack.org, ocfs2-devel@oss.oracle.com,
	linux-ext4@vger.kernel.org, linux-erofs@lists.ozlabs.org,
	linux-btrfs@vger.kernel.org
Subject: Re: [f2fs-dev] [PATCH v7 09/24] mm: Put readahead pages in cache earlier
Date: Thu, 20 Feb 2020 19:19:58 -0800	[thread overview]
Message-ID: <5691442b-56c7-7b0d-d91b-275be52abb42@nvidia.com> (raw)
In-Reply-To: <20200219210103.32400-10-willy@infradead.org>

On 2/19/20 1:00 PM, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> When populating the page cache for readahead, mappings that use
> ->readpages must populate the page cache themselves as the pages are
> passed on a linked list which would normally be used for the page cache's
> LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
> we can put the pages into the page cache as soon as they're allocated,
> which solves a race between readahead and direct IO.  It also lets us
> remove the gfp argument from read_pages().
> 
> Use the new readahead_page() API to implement the repeated calls to
> ->readpage(), just like most filesystems will.  This iterator also
> supports huge pages, even though none of the filesystems have been
> converted to use them yet.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/pagemap.h | 20 +++++++++++++++++
>  mm/readahead.c          | 48 +++++++++++++++++++++++++----------------
>  2 files changed, 49 insertions(+), 19 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 55fcea0249e6..4989d330fada 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -647,8 +647,28 @@ struct readahead_control {
>  /* private: use the readahead_* accessors instead */
>  	pgoff_t _index;
>  	unsigned int _nr_pages;
> +	unsigned int _batch_count;
>  };
>  
> +static inline struct page *readahead_page(struct readahead_control *rac)
> +{
> +	struct page *page;
> +
> +	BUG_ON(rac->_batch_count > rac->_nr_pages);
> +	rac->_nr_pages -= rac->_batch_count;
> +	rac->_index += rac->_batch_count;
> +	rac->_batch_count = 0;


Is it intentional, to set rac->_batch_count twice (here, and below)? The
only reason I can see is if a caller needs to use ->_batch_count in the
"return NULL" case, which doesn't seem to come up...


> +
> +	if (!rac->_nr_pages)
> +		return NULL;
> +
> +	page = xa_load(&rac->mapping->i_pages, rac->_index);
> +	VM_BUG_ON_PAGE(!PageLocked(page), page);
> +	rac->_batch_count = hpage_nr_pages(page);
> +
> +	return page;
> +}
> +
>  /* The number of pages in this readahead block */
>  static inline unsigned int readahead_count(struct readahead_control *rac)
>  {
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 83df5c061d33..aaa209559ba2 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -113,15 +113,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
>  
>  EXPORT_SYMBOL(read_cache_pages);
>  
> -static void read_pages(struct readahead_control *rac, struct list_head *pages,
> -		gfp_t gfp)
> +static void read_pages(struct readahead_control *rac, struct list_head *pages)
>  {
>  	const struct address_space_operations *aops = rac->mapping->a_ops;
> +	struct page *page;
>  	struct blk_plug plug;
> -	unsigned page_idx;
>  
>  	if (!readahead_count(rac))
> -		return;
> +		goto out;
>  
>  	blk_start_plug(&plug);
>  
> @@ -130,23 +129,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
>  				readahead_count(rac));
>  		/* Clean up the remaining pages */
>  		put_pages_list(pages);
> -		goto out;
> -	}
> -
> -	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
> -		struct page *page = lru_to_page(pages);
> -		list_del(&page->lru);
> -		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
> -				gfp))
> +		rac->_index += rac->_nr_pages;
> +		rac->_nr_pages = 0;
> +	} else {
> +		while ((page = readahead_page(rac))) {
>  			aops->readpage(rac->file, page);
> -		put_page(page);
> +			put_page(page);
> +		}
>  	}
>  
> -out:
>  	blk_finish_plug(&plug);
>  
>  	BUG_ON(!list_empty(pages));
> -	rac->_nr_pages = 0;
> +	BUG_ON(readahead_count(rac));
> +
> +out:
> +	/* If we were called due to a conflicting page, skip over it */


Tiny documentation nit: What if we were *not* called due to a conflicting page? 
(And what is a "conflicting page", in this context, btw?) The next line unconditionally 
moves the index ahead, so the "if" part of the comment really confuses me.


> +	rac->_index++;
>  }
>  
>  /*
> @@ -165,9 +164,11 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	LIST_HEAD(page_pool);
>  	loff_t isize = i_size_read(inode);
>  	gfp_t gfp_mask = readahead_gfp_mask(mapping);
> +	bool use_list = mapping->a_ops->readpages;
>  	struct readahead_control rac = {
>  		.mapping = mapping,
>  		.file = filp,
> +		._index = index,
>  		._nr_pages = 0,
>  	};
>  	unsigned long i;
> @@ -184,6 +185,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  		if (index + i > end_index)
>  			break;
>  
> +		BUG_ON(index + i != rac._index + rac._nr_pages);
> +
>  		page = xa_load(&mapping->i_pages, index + i);
>  		if (page && !xa_is_value(page)) {
>  			/*
> @@ -191,15 +194,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  			 * contiguous pages before continuing with the next
>  			 * batch.
>  			 */
> -			read_pages(&rac, &page_pool, gfp_mask);
> +			read_pages(&rac, &page_pool);
>  			continue;
>  		}
>  
>  		page = __page_cache_alloc(gfp_mask);
>  		if (!page)
>  			break;
> -		page->index = index + i;
> -		list_add(&page->lru, &page_pool);
> +		if (use_list) {
> +			page->index = index + i;
> +			list_add(&page->lru, &page_pool);
> +		} else if (add_to_page_cache_lru(page, mapping, index + i,
> +					gfp_mask) < 0) {


I still think you'll want to compare against !=0, rather than < 0, here.


> +			put_page(page);
> +			read_pages(&rac, &page_pool);


Doing a read_pages() in the error case is because...actually, I'm not sure yet.
Why do we do this? Effectively it's a retry?


> +			continue;
> +		}
>  		if (i == nr_to_read - lookahead_size)
>  			SetPageReadahead(page);
>  		rac._nr_pages++;
> @@ -210,7 +220,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	 * uptodate then the caller will launch readpage again, and
>  	 * will then handle the error.
>  	 */
> -	read_pages(&rac, &page_pool, gfp_mask);
> +	read_pages(&rac, &page_pool);
>  }
>  
>  /*
> 

Didn't spot any actual errors, just mainly my own questions here. :)


thanks,
-- 
John Hubbard
NVIDIA


_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

WARNING: multiple messages have this Message-ID (diff)
From: John Hubbard <jhubbard@nvidia.com>
To: Matthew Wilcox <willy@infradead.org>, <linux-fsdevel@vger.kernel.org>
Cc: linux-xfs@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-mm@kvack.org, ocfs2-devel@oss.oracle.com,
	linux-ext4@vger.kernel.org, linux-erofs@lists.ozlabs.org,
	linux-btrfs@vger.kernel.org
Subject: Re: [PATCH v7 09/24] mm: Put readahead pages in cache earlier
Date: Thu, 20 Feb 2020 19:19:58 -0800	[thread overview]
Message-ID: <5691442b-56c7-7b0d-d91b-275be52abb42@nvidia.com> (raw)
In-Reply-To: <20200219210103.32400-10-willy@infradead.org>

On 2/19/20 1:00 PM, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> When populating the page cache for readahead, mappings that use
> ->readpages must populate the page cache themselves as the pages are
> passed on a linked list which would normally be used for the page cache's
> LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
> we can put the pages into the page cache as soon as they're allocated,
> which solves a race between readahead and direct IO.  It also lets us
> remove the gfp argument from read_pages().
> 
> Use the new readahead_page() API to implement the repeated calls to
> ->readpage(), just like most filesystems will.  This iterator also
> supports huge pages, even though none of the filesystems have been
> converted to use them yet.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/pagemap.h | 20 +++++++++++++++++
>  mm/readahead.c          | 48 +++++++++++++++++++++++++----------------
>  2 files changed, 49 insertions(+), 19 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 55fcea0249e6..4989d330fada 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -647,8 +647,28 @@ struct readahead_control {
>  /* private: use the readahead_* accessors instead */
>  	pgoff_t _index;
>  	unsigned int _nr_pages;
> +	unsigned int _batch_count;
>  };
>  
> +static inline struct page *readahead_page(struct readahead_control *rac)
> +{
> +	struct page *page;
> +
> +	BUG_ON(rac->_batch_count > rac->_nr_pages);
> +	rac->_nr_pages -= rac->_batch_count;
> +	rac->_index += rac->_batch_count;
> +	rac->_batch_count = 0;


Is it intentional, to set rac->_batch_count twice (here, and below)? The
only reason I can see is if a caller needs to use ->_batch_count in the
"return NULL" case, which doesn't seem to come up...


> +
> +	if (!rac->_nr_pages)
> +		return NULL;
> +
> +	page = xa_load(&rac->mapping->i_pages, rac->_index);
> +	VM_BUG_ON_PAGE(!PageLocked(page), page);
> +	rac->_batch_count = hpage_nr_pages(page);
> +
> +	return page;
> +}
> +
>  /* The number of pages in this readahead block */
>  static inline unsigned int readahead_count(struct readahead_control *rac)
>  {
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 83df5c061d33..aaa209559ba2 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -113,15 +113,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
>  
>  EXPORT_SYMBOL(read_cache_pages);
>  
> -static void read_pages(struct readahead_control *rac, struct list_head *pages,
> -		gfp_t gfp)
> +static void read_pages(struct readahead_control *rac, struct list_head *pages)
>  {
>  	const struct address_space_operations *aops = rac->mapping->a_ops;
> +	struct page *page;
>  	struct blk_plug plug;
> -	unsigned page_idx;
>  
>  	if (!readahead_count(rac))
> -		return;
> +		goto out;
>  
>  	blk_start_plug(&plug);
>  
> @@ -130,23 +129,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
>  				readahead_count(rac));
>  		/* Clean up the remaining pages */
>  		put_pages_list(pages);
> -		goto out;
> -	}
> -
> -	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
> -		struct page *page = lru_to_page(pages);
> -		list_del(&page->lru);
> -		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
> -				gfp))
> +		rac->_index += rac->_nr_pages;
> +		rac->_nr_pages = 0;
> +	} else {
> +		while ((page = readahead_page(rac))) {
>  			aops->readpage(rac->file, page);
> -		put_page(page);
> +			put_page(page);
> +		}
>  	}
>  
> -out:
>  	blk_finish_plug(&plug);
>  
>  	BUG_ON(!list_empty(pages));
> -	rac->_nr_pages = 0;
> +	BUG_ON(readahead_count(rac));
> +
> +out:
> +	/* If we were called due to a conflicting page, skip over it */


Tiny documentation nit: What if we were *not* called due to a conflicting page? 
(And what is a "conflicting page", in this context, btw?) The next line unconditionally 
moves the index ahead, so the "if" part of the comment really confuses me.


> +	rac->_index++;
>  }
>  
>  /*
> @@ -165,9 +164,11 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	LIST_HEAD(page_pool);
>  	loff_t isize = i_size_read(inode);
>  	gfp_t gfp_mask = readahead_gfp_mask(mapping);
> +	bool use_list = mapping->a_ops->readpages;
>  	struct readahead_control rac = {
>  		.mapping = mapping,
>  		.file = filp,
> +		._index = index,
>  		._nr_pages = 0,
>  	};
>  	unsigned long i;
> @@ -184,6 +185,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  		if (index + i > end_index)
>  			break;
>  
> +		BUG_ON(index + i != rac._index + rac._nr_pages);
> +
>  		page = xa_load(&mapping->i_pages, index + i);
>  		if (page && !xa_is_value(page)) {
>  			/*
> @@ -191,15 +194,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  			 * contiguous pages before continuing with the next
>  			 * batch.
>  			 */
> -			read_pages(&rac, &page_pool, gfp_mask);
> +			read_pages(&rac, &page_pool);
>  			continue;
>  		}
>  
>  		page = __page_cache_alloc(gfp_mask);
>  		if (!page)
>  			break;
> -		page->index = index + i;
> -		list_add(&page->lru, &page_pool);
> +		if (use_list) {
> +			page->index = index + i;
> +			list_add(&page->lru, &page_pool);
> +		} else if (add_to_page_cache_lru(page, mapping, index + i,
> +					gfp_mask) < 0) {


I still think you'll want to compare against !=0, rather than < 0, here.


> +			put_page(page);
> +			read_pages(&rac, &page_pool);


Doing a read_pages() in the error case is because...actually, I'm not sure yet.
Why do we do this? Effectively it's a retry?


> +			continue;
> +		}
>  		if (i == nr_to_read - lookahead_size)
>  			SetPageReadahead(page);
>  		rac._nr_pages++;
> @@ -210,7 +220,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	 * uptodate then the caller will launch readpage again, and
>  	 * will then handle the error.
>  	 */
> -	read_pages(&rac, &page_pool, gfp_mask);
> +	read_pages(&rac, &page_pool);
>  }
>  
>  /*
> 

Didn't spot any actual errors, just mainly my own questions here. :)


thanks,
-- 
John Hubbard
NVIDIA

WARNING: multiple messages have this Message-ID (diff)
From: John Hubbard <jhubbard@nvidia.com>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [PATCH v7 09/24] mm: Put readahead pages in cache earlier
Date: Thu, 20 Feb 2020 19:19:58 -0800	[thread overview]
Message-ID: <5691442b-56c7-7b0d-d91b-275be52abb42@nvidia.com> (raw)
In-Reply-To: <20200219210103.32400-10-willy@infradead.org>

On 2/19/20 1:00 PM, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> When populating the page cache for readahead, mappings that use
> ->readpages must populate the page cache themselves as the pages are
> passed on a linked list which would normally be used for the page cache's
> LRU.  For mappings that use ->readpage or the upcoming ->readahead method,
> we can put the pages into the page cache as soon as they're allocated,
> which solves a race between readahead and direct IO.  It also lets us
> remove the gfp argument from read_pages().
> 
> Use the new readahead_page() API to implement the repeated calls to
> ->readpage(), just like most filesystems will.  This iterator also
> supports huge pages, even though none of the filesystems have been
> converted to use them yet.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/pagemap.h | 20 +++++++++++++++++
>  mm/readahead.c          | 48 +++++++++++++++++++++++++----------------
>  2 files changed, 49 insertions(+), 19 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 55fcea0249e6..4989d330fada 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -647,8 +647,28 @@ struct readahead_control {
>  /* private: use the readahead_* accessors instead */
>  	pgoff_t _index;
>  	unsigned int _nr_pages;
> +	unsigned int _batch_count;
>  };
>  
> +static inline struct page *readahead_page(struct readahead_control *rac)
> +{
> +	struct page *page;
> +
> +	BUG_ON(rac->_batch_count > rac->_nr_pages);
> +	rac->_nr_pages -= rac->_batch_count;
> +	rac->_index += rac->_batch_count;
> +	rac->_batch_count = 0;


Is it intentional, to set rac->_batch_count twice (here, and below)? The
only reason I can see is if a caller needs to use ->_batch_count in the
"return NULL" case, which doesn't seem to come up...


> +
> +	if (!rac->_nr_pages)
> +		return NULL;
> +
> +	page = xa_load(&rac->mapping->i_pages, rac->_index);
> +	VM_BUG_ON_PAGE(!PageLocked(page), page);
> +	rac->_batch_count = hpage_nr_pages(page);
> +
> +	return page;
> +}
> +
>  /* The number of pages in this readahead block */
>  static inline unsigned int readahead_count(struct readahead_control *rac)
>  {
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 83df5c061d33..aaa209559ba2 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -113,15 +113,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
>  
>  EXPORT_SYMBOL(read_cache_pages);
>  
> -static void read_pages(struct readahead_control *rac, struct list_head *pages,
> -		gfp_t gfp)
> +static void read_pages(struct readahead_control *rac, struct list_head *pages)
>  {
>  	const struct address_space_operations *aops = rac->mapping->a_ops;
> +	struct page *page;
>  	struct blk_plug plug;
> -	unsigned page_idx;
>  
>  	if (!readahead_count(rac))
> -		return;
> +		goto out;
>  
>  	blk_start_plug(&plug);
>  
> @@ -130,23 +129,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
>  				readahead_count(rac));
>  		/* Clean up the remaining pages */
>  		put_pages_list(pages);
> -		goto out;
> -	}
> -
> -	for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
> -		struct page *page = lru_to_page(pages);
> -		list_del(&page->lru);
> -		if (!add_to_page_cache_lru(page, rac->mapping, page->index,
> -				gfp))
> +		rac->_index += rac->_nr_pages;
> +		rac->_nr_pages = 0;
> +	} else {
> +		while ((page = readahead_page(rac))) {
>  			aops->readpage(rac->file, page);
> -		put_page(page);
> +			put_page(page);
> +		}
>  	}
>  
> -out:
>  	blk_finish_plug(&plug);
>  
>  	BUG_ON(!list_empty(pages));
> -	rac->_nr_pages = 0;
> +	BUG_ON(readahead_count(rac));
> +
> +out:
> +	/* If we were called due to a conflicting page, skip over it */


Tiny documentation nit: What if we were *not* called due to a conflicting page? 
(And what is a "conflicting page", in this context, btw?) The next line unconditionally 
moves the index ahead, so the "if" part of the comment really confuses me.


> +	rac->_index++;
>  }
>  
>  /*
> @@ -165,9 +164,11 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	LIST_HEAD(page_pool);
>  	loff_t isize = i_size_read(inode);
>  	gfp_t gfp_mask = readahead_gfp_mask(mapping);
> +	bool use_list = mapping->a_ops->readpages;
>  	struct readahead_control rac = {
>  		.mapping = mapping,
>  		.file = filp,
> +		._index = index,
>  		._nr_pages = 0,
>  	};
>  	unsigned long i;
> @@ -184,6 +185,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  		if (index + i > end_index)
>  			break;
>  
> +		BUG_ON(index + i != rac._index + rac._nr_pages);
> +
>  		page = xa_load(&mapping->i_pages, index + i);
>  		if (page && !xa_is_value(page)) {
>  			/*
> @@ -191,15 +194,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  			 * contiguous pages before continuing with the next
>  			 * batch.
>  			 */
> -			read_pages(&rac, &page_pool, gfp_mask);
> +			read_pages(&rac, &page_pool);
>  			continue;
>  		}
>  
>  		page = __page_cache_alloc(gfp_mask);
>  		if (!page)
>  			break;
> -		page->index = index + i;
> -		list_add(&page->lru, &page_pool);
> +		if (use_list) {
> +			page->index = index + i;
> +			list_add(&page->lru, &page_pool);
> +		} else if (add_to_page_cache_lru(page, mapping, index + i,
> +					gfp_mask) < 0) {


I still think you'll want to compare against !=0, rather than < 0, here.


> +			put_page(page);
> +			read_pages(&rac, &page_pool);


Doing a read_pages() in the error case is because...actually, I'm not sure yet.
Why do we do this? Effectively it's a retry?


> +			continue;
> +		}
>  		if (i == nr_to_read - lookahead_size)
>  			SetPageReadahead(page);
>  		rac._nr_pages++;
> @@ -210,7 +220,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
>  	 * uptodate then the caller will launch readpage again, and
>  	 * will then handle the error.
>  	 */
> -	read_pages(&rac, &page_pool, gfp_mask);
> +	read_pages(&rac, &page_pool);
>  }
>  
>  /*
> 

Didn't spot any actual errors, just mainly my own questions here. :)


thanks,
-- 
John Hubbard
NVIDIA



  reply	other threads:[~2020-02-21  3:20 UTC|newest]

Thread overview: 385+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-19 21:00 [PATCH v7 00/23] Change readahead API Matthew Wilcox
2020-02-19 21:00 ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00 ` Matthew Wilcox
2020-02-19 21:00 ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00 ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 01/24] mm: Move readahead prototypes from mm.h Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  2:43   ` John Hubbard
2020-02-21  2:43     ` [Cluster-devel] " John Hubbard
2020-02-21  2:43     ` John Hubbard
2020-02-21  2:43     ` [f2fs-dev] " John Hubbard
2020-02-21  2:43     ` [Ocfs2-devel] " John Hubbard
2020-02-21 21:48     ` Matthew Wilcox
2020-02-21 21:48       ` [Cluster-devel] " Matthew Wilcox
2020-02-21 21:48       ` Matthew Wilcox
2020-02-21 21:48       ` [f2fs-dev] " Matthew Wilcox
2020-02-21 21:48       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-22  0:15       ` John Hubbard
2020-02-22  0:15         ` [Cluster-devel] " John Hubbard
2020-02-22  0:15         ` John Hubbard
2020-02-22  0:15         ` [f2fs-dev] " John Hubbard
2020-02-22  0:15         ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:32   ` Christoph Hellwig
2020-02-24 21:32     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:32     ` Christoph Hellwig
2020-02-24 21:32     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:32     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 02/24] mm: Return void from various readahead functions Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:33   ` Christoph Hellwig
2020-02-24 21:33     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:33     ` Christoph Hellwig
2020-02-24 21:33     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:33     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 03/24] mm: Ignore return value of ->readpages Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 04/24] mm: Move readahead nr_pages check into read_pages Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 14:36   ` Zi Yan
2020-02-20 14:36     ` [Cluster-devel] " Zi Yan
2020-02-20 14:36     ` Zi Yan
2020-02-20 14:36     ` [Ocfs2-devel] " Zi Yan
2020-02-21  4:24   ` John Hubbard
2020-02-21  4:24     ` [Cluster-devel] " John Hubbard
2020-02-21  4:24     ` John Hubbard
2020-02-21  4:24     ` [f2fs-dev] " John Hubbard
2020-02-21  4:24     ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:34   ` Christoph Hellwig
2020-02-24 21:34     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:34     ` Christoph Hellwig
2020-02-24 21:34     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:34     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 05/24] mm: Use readahead_control to pass arguments Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:36   ` Christoph Hellwig
2020-02-24 21:36     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:36     ` Christoph Hellwig
2020-02-24 21:36     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:36     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 06/24] mm: Rename various 'offset' parameters to 'index' Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  2:21   ` John Hubbard
2020-02-21  2:21     ` [Cluster-devel] " John Hubbard
2020-02-21  2:21     ` John Hubbard
2020-02-21  2:21     ` [f2fs-dev] " John Hubbard
2020-02-21  2:21     ` [Ocfs2-devel] " John Hubbard
2020-02-21  3:27   ` John Hubbard
2020-02-21  3:27     ` [Cluster-devel] " John Hubbard
2020-02-21  3:27     ` John Hubbard
2020-02-21  3:27     ` [f2fs-dev] " John Hubbard
2020-02-21  3:27     ` [Ocfs2-devel] " John Hubbard
2020-02-19 21:00 ` [PATCH v7 07/24] mm: rename readahead loop variable to 'i' Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 08/24] mm: Remove 'page_offset' from readahead loop Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  2:48   ` John Hubbard
2020-02-21  2:48     ` [Cluster-devel] " John Hubbard
2020-02-21  2:48     ` John Hubbard
2020-02-21  2:48     ` [f2fs-dev] " John Hubbard
2020-02-21  2:48     ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:37   ` Christoph Hellwig
2020-02-24 21:37     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:37     ` Christoph Hellwig
2020-02-24 21:37     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:37     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 09/24] mm: Put readahead pages in cache earlier Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  3:19   ` John Hubbard [this message]
2020-02-21  3:19     ` [Cluster-devel] " John Hubbard
2020-02-21  3:19     ` John Hubbard
2020-02-21  3:19     ` [f2fs-dev] " John Hubbard
2020-02-21  3:19     ` [Ocfs2-devel] " John Hubbard
2020-02-21  3:43     ` Matthew Wilcox
2020-02-21  3:43       ` [Cluster-devel] " Matthew Wilcox
2020-02-21  3:43       ` Matthew Wilcox
2020-02-21  3:43       ` [f2fs-dev] " Matthew Wilcox
2020-02-21  3:43       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  4:19       ` John Hubbard
2020-02-21  4:19         ` [Cluster-devel] " John Hubbard
2020-02-21  4:19         ` John Hubbard
2020-02-21  4:19         ` [f2fs-dev] " John Hubbard
2020-02-21  4:19         ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:40   ` Christoph Hellwig
2020-02-24 21:40     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:40     ` Christoph Hellwig
2020-02-24 21:40     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:40     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 10/24] mm: Add readahead address space operation Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:00   ` Zi Yan
2020-02-20 15:00     ` [Cluster-devel] " Zi Yan
2020-02-20 15:00     ` Zi Yan
2020-02-20 15:00     ` [Ocfs2-devel] " Zi Yan
2020-02-20 15:10     ` Matthew Wilcox
2020-02-20 15:10       ` [Cluster-devel] " Matthew Wilcox
2020-02-20 15:10       ` Matthew Wilcox
2020-02-20 15:10       ` [f2fs-dev] " Matthew Wilcox
2020-02-20 15:10       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  4:30   ` John Hubbard
2020-02-21  4:30     ` [Cluster-devel] " John Hubbard
2020-02-21  4:30     ` John Hubbard
2020-02-21  4:30     ` [f2fs-dev] " John Hubbard
2020-02-21  4:30     ` [Ocfs2-devel] " John Hubbard
2020-02-24 21:41   ` Christoph Hellwig
2020-02-24 21:41     ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:41     ` Christoph Hellwig
2020-02-24 21:41     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:41     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 11/24] mm: Move end_index check out of readahead loop Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21  3:50   ` John Hubbard
2020-02-21  3:50     ` [Cluster-devel] " John Hubbard
2020-02-21  3:50     ` John Hubbard
2020-02-21  3:50     ` [f2fs-dev] " John Hubbard
2020-02-21  3:50     ` [Ocfs2-devel] " John Hubbard
2020-02-21 15:35     ` Matthew Wilcox
2020-02-21 15:35       ` [Cluster-devel] " Matthew Wilcox
2020-02-21 15:35       ` Matthew Wilcox
2020-02-21 15:35       ` [f2fs-dev] " Matthew Wilcox
2020-02-21 15:35       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21 19:41       ` John Hubbard
2020-02-21 19:41         ` [Cluster-devel] " John Hubbard
2020-02-21 19:41         ` John Hubbard
2020-02-21 19:41         ` [f2fs-dev] " John Hubbard
2020-02-21 19:41         ` [Ocfs2-devel] " John Hubbard
2020-02-19 21:00 ` [PATCH v7 12/24] mm: Add page_cache_readahead_unbounded Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:53   ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:53     ` Christoph Hellwig
2020-02-24 21:53     ` Christoph Hellwig
2020-02-24 21:53     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:53     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 13/24] fs: Convert mpage_readpages to mpage_readahead Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:54   ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:54     ` Christoph Hellwig
2020-02-24 21:54     ` Christoph Hellwig
2020-02-24 21:54     ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:54     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 14/24] btrfs: Convert from readpages to readahead Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20  9:42   ` Johannes Thumshirn
2020-02-20  9:42     ` [Cluster-devel] " Johannes Thumshirn
2020-02-20  9:42     ` Johannes Thumshirn
2020-02-20  9:42     ` [f2fs-dev] " Johannes Thumshirn
2020-02-20  9:42     ` Johannes Thumshirn
2020-02-20  9:42     ` [Ocfs2-devel] " Johannes Thumshirn
2020-02-20 13:48     ` Matthew Wilcox
2020-02-20 13:48       ` [Cluster-devel] " Matthew Wilcox
2020-02-20 13:48       ` Matthew Wilcox
2020-02-20 13:48       ` [f2fs-dev] " Matthew Wilcox
2020-02-20 13:48       ` Matthew Wilcox
2020-02-20 13:48       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:46       ` Christoph Hellwig
2020-02-20 15:46         ` [Cluster-devel] " Christoph Hellwig
2020-02-20 15:46         ` Christoph Hellwig
2020-02-20 15:46         ` [f2fs-dev] " Christoph Hellwig
2020-02-20 15:46         ` Christoph Hellwig
2020-02-20 15:46         ` [Ocfs2-devel] " Christoph Hellwig
2020-02-20 15:54         ` Matthew Wilcox
2020-02-20 15:54           ` [Cluster-devel] " Matthew Wilcox
2020-02-20 15:54           ` Matthew Wilcox
2020-02-20 15:54           ` [f2fs-dev] " Matthew Wilcox
2020-02-20 15:54           ` Matthew Wilcox
2020-02-20 15:54           ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:57           ` Christoph Hellwig
2020-02-20 15:57             ` [Cluster-devel] " Christoph Hellwig
2020-02-20 15:57             ` Christoph Hellwig
2020-02-20 15:57             ` [f2fs-dev] " Christoph Hellwig
2020-02-20 15:57             ` Christoph Hellwig
2020-02-20 15:57             ` [Ocfs2-devel] " Christoph Hellwig
2020-02-24 21:43             ` Christoph Hellwig
2020-02-24 21:43               ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:43               ` Christoph Hellwig
2020-02-24 21:43               ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:43               ` Christoph Hellwig
2020-02-24 21:43               ` [Ocfs2-devel] " Christoph Hellwig
2020-02-24 21:54               ` Matthew Wilcox
2020-02-24 21:54                 ` [Cluster-devel] " Matthew Wilcox
2020-02-24 21:54                 ` Matthew Wilcox
2020-02-24 21:54                 ` [f2fs-dev] " Matthew Wilcox
2020-02-24 21:54                 ` Matthew Wilcox
2020-02-24 21:54                 ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 21:57                 ` Christoph Hellwig
2020-02-24 21:57                   ` [Cluster-devel] " Christoph Hellwig
2020-02-24 21:57                   ` Christoph Hellwig
2020-02-24 21:57                   ` [f2fs-dev] " Christoph Hellwig
2020-02-24 21:57                   ` Christoph Hellwig
2020-02-24 21:57                   ` [Ocfs2-devel] " Christoph Hellwig
2020-02-19 21:00 ` [PATCH v7 15/24] erofs: Convert uncompressed files " Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 16/24] erofs: Convert compressed " Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 17/24] ext4: Convert " Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 18/24] ext4: Pass the inode to ext4_mpage_readpages Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 19/24] f2fs: Convert from readpages to readahead Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:00 ` [PATCH v7 20/24] fuse: " Matthew Wilcox
2020-02-19 21:00   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:00   ` Matthew Wilcox
2020-02-19 21:00   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:00   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:01 ` [PATCH v7 21/24] iomap: Restructure iomap_readpages_actor Matthew Wilcox
2020-02-19 21:01   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:01   ` Matthew Wilcox
2020-02-19 21:01   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:01   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:47   ` Christoph Hellwig
2020-02-20 15:47     ` [Cluster-devel] " Christoph Hellwig
2020-02-20 15:47     ` Christoph Hellwig
2020-02-20 15:47     ` [f2fs-dev] " Christoph Hellwig
2020-02-20 15:47     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-20 16:24     ` Matthew Wilcox
2020-02-20 16:24       ` [Cluster-devel] " Matthew Wilcox
2020-02-20 16:24       ` Matthew Wilcox
2020-02-20 16:24       ` [f2fs-dev] " Matthew Wilcox
2020-02-20 16:24       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 22:17       ` Christoph Hellwig
2020-02-24 22:17         ` [Cluster-devel] " Christoph Hellwig
2020-02-24 22:17         ` Christoph Hellwig
2020-02-24 22:17         ` [f2fs-dev] " Christoph Hellwig
2020-02-24 22:17         ` [Ocfs2-devel] " Christoph Hellwig
2020-02-25  1:49         ` Matthew Wilcox
2020-02-25  1:49           ` [Cluster-devel] " Matthew Wilcox
2020-02-25  1:49           ` Matthew Wilcox
2020-02-25  1:49           ` [f2fs-dev] " Matthew Wilcox
2020-02-25  1:49           ` [Ocfs2-devel] " Matthew Wilcox
2020-02-22  0:44   ` Darrick J. Wong
2020-02-22  0:44     ` [Cluster-devel] " Darrick J. Wong
2020-02-22  0:44     ` Darrick J. Wong
2020-02-22  0:44     ` [f2fs-dev] " Darrick J. Wong
2020-02-22  0:44     ` [Ocfs2-devel] " Darrick J. Wong
2020-02-22  1:54     ` Matthew Wilcox
2020-02-22  1:54       ` [Cluster-devel] " Matthew Wilcox
2020-02-22  1:54       ` Matthew Wilcox
2020-02-22  1:54       ` [f2fs-dev] " Matthew Wilcox
2020-02-22  1:54       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-23 17:55       ` Darrick J. Wong
2020-02-23 17:55         ` [Cluster-devel] " Darrick J. Wong
2020-02-23 17:55         ` Darrick J. Wong
2020-02-23 17:55         ` [f2fs-dev] " Darrick J. Wong
2020-02-23 17:55         ` [Ocfs2-devel] " Darrick J. Wong
2020-02-19 21:01 ` [PATCH v7 22/24] iomap: Convert from readpages to readahead Matthew Wilcox
2020-02-19 21:01   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:01   ` Matthew Wilcox
2020-02-19 21:01   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:01   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 15:49   ` Christoph Hellwig
2020-02-20 15:49     ` [Cluster-devel] " Christoph Hellwig
2020-02-20 15:49     ` Christoph Hellwig
2020-02-20 15:49     ` [f2fs-dev] " Christoph Hellwig
2020-02-20 15:49     ` [Ocfs2-devel] " Christoph Hellwig
2020-02-20 16:57     ` Matthew Wilcox
2020-02-20 16:57       ` [Cluster-devel] " Matthew Wilcox
2020-02-20 16:57       ` Matthew Wilcox
2020-02-20 16:57       ` [f2fs-dev] " Matthew Wilcox
2020-02-20 16:57       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-22  1:00       ` Darrick J. Wong
2020-02-22  1:00         ` [Cluster-devel] " Darrick J. Wong
2020-02-22  1:00         ` Darrick J. Wong
2020-02-22  1:00         ` [f2fs-dev] " Darrick J. Wong
2020-02-22  1:00         ` [Ocfs2-devel] " Darrick J. Wong
2020-02-24  4:33         ` Matthew Wilcox
2020-02-24  4:33           ` [Cluster-devel] " Matthew Wilcox
2020-02-24  4:33           ` Matthew Wilcox
2020-02-24  4:33           ` [f2fs-dev] " Matthew Wilcox
2020-02-24  4:33           ` [Ocfs2-devel] " Matthew Wilcox
2020-02-24 16:52           ` Darrick J. Wong
2020-02-24 16:52             ` [Cluster-devel] " Darrick J. Wong
2020-02-24 16:52             ` Darrick J. Wong
2020-02-24 16:52             ` [f2fs-dev] " Darrick J. Wong
2020-02-24 16:52             ` [Ocfs2-devel] " Darrick J. Wong
2020-02-22  1:03   ` Darrick J. Wong
2020-02-22  1:03     ` [Cluster-devel] " Darrick J. Wong
2020-02-22  1:03     ` Darrick J. Wong
2020-02-22  1:03     ` [f2fs-dev] " Darrick J. Wong
2020-02-22  1:03     ` [Ocfs2-devel] " Darrick J. Wong
2020-02-22  1:09     ` Matthew Wilcox
2020-02-22  1:09       ` [Cluster-devel] " Matthew Wilcox
2020-02-22  1:09       ` Matthew Wilcox
2020-02-22  1:09       ` [f2fs-dev] " Matthew Wilcox
2020-02-22  1:09       ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:01 ` [PATCH v7 23/24] mm: Document why we don't set PageReadahead Matthew Wilcox
2020-02-19 21:01   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:01   ` Matthew Wilcox
2020-02-19 21:01   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:01   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-19 21:01 ` [PATCH v7 24/24] mm: Use memalloc_nofs_save in readahead path Matthew Wilcox
2020-02-19 21:01   ` [Cluster-devel] " Matthew Wilcox
2020-02-19 21:01   ` Matthew Wilcox
2020-02-19 21:01   ` [f2fs-dev] " Matthew Wilcox
2020-02-19 21:01   ` [Ocfs2-devel] " Matthew Wilcox
2020-02-20 17:54 ` [PATCH v7 00/23] Change readahead API David Sterba
2020-02-20 17:54   ` [Cluster-devel] " David Sterba
2020-02-20 17:54   ` David Sterba
2020-02-20 17:54   ` [f2fs-dev] " David Sterba
2020-02-20 17:54   ` [Ocfs2-devel] " David Sterba
2020-02-20 22:39   ` Matthew Wilcox
2020-02-20 22:39     ` [Cluster-devel] " Matthew Wilcox
2020-02-20 22:39     ` [f2fs-dev] " Matthew Wilcox
2020-02-20 22:39     ` [Ocfs2-devel] " Matthew Wilcox
2020-02-21 11:59     ` David Sterba
2020-02-21 11:59       ` [Cluster-devel] " David Sterba
2020-02-21 11:59       ` David Sterba
2020-02-21 11:59       ` [f2fs-dev] " David Sterba
2020-02-21 11:59       ` [Ocfs2-devel] " David Sterba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5691442b-56c7-7b0d-d91b-275be52abb42@nvidia.com \
    --to=jhubbard@nvidia.com \
    --cc=cluster-devel@redhat.com \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-erofs@lists.ozlabs.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=ocfs2-devel@oss.oracle.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.