linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH mm-unstable] ext4: Convert mext_page_double_lock() to mext_folio_double_lock()
@ 2022-12-06 20:41 Vishal Moola (Oracle)
  2022-12-06 20:50 ` Matthew Wilcox
  0 siblings, 1 reply; 4+ messages in thread
From: Vishal Moola (Oracle) @ 2022-12-06 20:41 UTC (permalink / raw)
  To: linux-mm; +Cc: tytso, linux-ext4, linux-kernel, akpm, Vishal Moola (Oracle)

Converts mext_page_double_lock() to use folios. This change saves
146 bytes of kernel text and removes 3 calls to compound_head().

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 fs/ext4/move_extent.c | 46 +++++++++++++++++++++----------------------
 1 file changed, 23 insertions(+), 23 deletions(-)

diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 8dbb87edf24c..2de9829aed63 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -110,22 +110,23 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
 }
 
 /**
- * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
+ * mext_folio_double_lock - Grab and lock folio on both @inode1 and @inode2
  *
  * @inode1:	the inode structure
  * @inode2:	the inode structure
- * @index1:	page index
- * @index2:	page index
- * @page:	result page vector
+ * @index1:	folio index
+ * @index2:	folio index
+ * @folio:	result folio vector
  *
- * Grab two locked pages for inode's by inode order
+ * Grab two locked folio for inode's by inode order
  */
 static int
-mext_page_double_lock(struct inode *inode1, struct inode *inode2,
-		      pgoff_t index1, pgoff_t index2, struct page *page[2])
+mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
+		      pgoff_t index1, pgoff_t index2, struct folio *folio[2])
 {
 	struct address_space *mapping[2];
 	unsigned int flags;
+	unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
 
 	BUG_ON(!inode1 || !inode2);
 	if (inode1 < inode2) {
@@ -138,28 +139,30 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
 	}
 
 	flags = memalloc_nofs_save();
-	page[0] = grab_cache_page_write_begin(mapping[0], index1);
-	if (!page[0]) {
+	folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
+			mapping_gfp_mask(mapping[0]));
+	if (!folio[0]) {
 		memalloc_nofs_restore(flags);
 		return -ENOMEM;
 	}
 
-	page[1] = grab_cache_page_write_begin(mapping[1], index2);
+	folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
+			mapping_gfp_mask(mapping[1]));
 	memalloc_nofs_restore(flags);
-	if (!page[1]) {
-		unlock_page(page[0]);
-		put_page(page[0]);
+	if (!folio[1]) {
+		folio_unlock(folio[0]);
+		folio_put(folio[0]);
 		return -ENOMEM;
 	}
 	/*
-	 * grab_cache_page_write_begin() may not wait on page's writeback if
+	 * __filemap_get_folio() may not wait on folio's writeback if
 	 * BDI not demand that. But it is reasonable to be very conservative
-	 * here and explicitly wait on page's writeback
+	 * here and explicitly wait on folio's writeback
 	 */
-	wait_on_page_writeback(page[0]);
-	wait_on_page_writeback(page[1]);
+	folio_wait_writeback(folio[0]);
+	folio_wait_writeback(folio[1]);
 	if (inode1 > inode2)
-		swap(page[0], page[1]);
+		swap(folio[0], folio[1]);
 
 	return 0;
 }
@@ -252,7 +255,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 		     int block_len_in_page, int unwritten, int *err)
 {
 	struct inode *orig_inode = file_inode(o_filp);
-	struct page *pagep[2] = {NULL, NULL};
 	struct folio *folio[2] = {NULL, NULL};
 	handle_t *handle;
 	ext4_lblk_t orig_blk_offset, donor_blk_offset;
@@ -303,8 +305,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 
 	replaced_size = data_size;
 
-	*err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
-				     donor_page_offset, pagep);
+	*err = mext_folio_double_lock(orig_inode, donor_inode, orig_page_offset,
+				     donor_page_offset, folio);
 	if (unlikely(*err < 0))
 		goto stop_journal;
 	/*
@@ -314,8 +316,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 	 * hold page's lock, if it is still the case data copy is not
 	 * necessary, just swap data blocks between orig and donor.
 	 */
-	folio[0] = page_folio(pagep[0]);
-	folio[1] = page_folio(pagep[1]);
 
 	VM_BUG_ON_FOLIO(folio_test_large(folio[0]), folio[0]);
 	VM_BUG_ON_FOLIO(folio_test_large(folio[1]), folio[1]);
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH mm-unstable] ext4: Convert mext_page_double_lock() to mext_folio_double_lock()
  2022-12-06 20:41 [PATCH mm-unstable] ext4: Convert mext_page_double_lock() to mext_folio_double_lock() Vishal Moola (Oracle)
@ 2022-12-06 20:50 ` Matthew Wilcox
  2022-12-06 20:53   ` Matthew Wilcox
  2022-12-07  2:22   ` Vishal Moola
  0 siblings, 2 replies; 4+ messages in thread
From: Matthew Wilcox @ 2022-12-06 20:50 UTC (permalink / raw)
  To: Vishal Moola (Oracle); +Cc: linux-mm, tytso, linux-ext4, linux-kernel, akpm

On Tue, Dec 06, 2022 at 12:41:15PM -0800, Vishal Moola (Oracle) wrote:
> Converts mext_page_double_lock() to use folios. This change saves
> 146 bytes of kernel text and removes 3 calls to compound_head().

I think it actually removes more than three ...

>  	flags = memalloc_nofs_save();
> -	page[0] = grab_cache_page_write_begin(mapping[0], index1);
> -	if (!page[0]) {
> +	folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
> +			mapping_gfp_mask(mapping[0]));

one

> +	if (!folio[0]) {
>  		memalloc_nofs_restore(flags);
>  		return -ENOMEM;
>  	}
>  
> -	page[1] = grab_cache_page_write_begin(mapping[1], index2);
> +	folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
> +			mapping_gfp_mask(mapping[1]));

two

>  	memalloc_nofs_restore(flags);
> -	if (!page[1]) {
> -		unlock_page(page[0]);
> -		put_page(page[0]);
> +	if (!folio[1]) {
> +		folio_unlock(folio[0]);
> +		folio_put(folio[0]);

four

>  		return -ENOMEM;
>  	}
>  	/*
> -	 * grab_cache_page_write_begin() may not wait on page's writeback if
> +	 * __filemap_get_folio() may not wait on folio's writeback if
>  	 * BDI not demand that. But it is reasonable to be very conservative
> -	 * here and explicitly wait on page's writeback
> +	 * here and explicitly wait on folio's writeback
>  	 */
> -	wait_on_page_writeback(page[0]);
> -	wait_on_page_writeback(page[1]);
> +	folio_wait_writeback(folio[0]);
> +	folio_wait_writeback(folio[1]);

six

>  	if (inode1 > inode2)
> -		swap(page[0], page[1]);
> +		swap(folio[0], folio[1]);
>  
>  	return 0;
>  }
> @@ -252,7 +255,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
>  		     int block_len_in_page, int unwritten, int *err)
>  {
>  	struct inode *orig_inode = file_inode(o_filp);
> -	struct page *pagep[2] = {NULL, NULL};
>  	struct folio *folio[2] = {NULL, NULL};
>  	handle_t *handle;
>  	ext4_lblk_t orig_blk_offset, donor_blk_offset;
> @@ -303,8 +305,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
>  
>  	replaced_size = data_size;
>  
> -	*err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
> -				     donor_page_offset, pagep);
> +	*err = mext_folio_double_lock(orig_inode, donor_inode, orig_page_offset,
> +				     donor_page_offset, folio);
>  	if (unlikely(*err < 0))
>  		goto stop_journal;
>  	/*
> @@ -314,8 +316,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
>  	 * hold page's lock, if it is still the case data copy is not
>  	 * necessary, just swap data blocks between orig and donor.
>  	 */
> -	folio[0] = page_folio(pagep[0]);
> -	folio[1] = page_folio(pagep[1]);

eight.

Three are inline, which makes sense for the 146 bytes, but we're also
removing out of line calls as well as the inline calls.

Anyway, whether the description is updated or not, this looks good to me.

Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH mm-unstable] ext4: Convert mext_page_double_lock() to mext_folio_double_lock()
  2022-12-06 20:50 ` Matthew Wilcox
@ 2022-12-06 20:53   ` Matthew Wilcox
  2022-12-07  2:22   ` Vishal Moola
  1 sibling, 0 replies; 4+ messages in thread
From: Matthew Wilcox @ 2022-12-06 20:53 UTC (permalink / raw)
  To: Vishal Moola (Oracle); +Cc: linux-mm, tytso, linux-ext4, linux-kernel, akpm

On Tue, Dec 06, 2022 at 08:50:02PM +0000, Matthew Wilcox wrote:
> >  	flags = memalloc_nofs_save();
> > -	page[0] = grab_cache_page_write_begin(mapping[0], index1);
> > -	if (!page[0]) {
> > +	folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
> > +			mapping_gfp_mask(mapping[0]));
> 
> one
> 
> > +	if (!folio[0]) {
> >  		memalloc_nofs_restore(flags);
> >  		return -ENOMEM;
> >  	}
> >  
> > -	page[1] = grab_cache_page_write_begin(mapping[1], index2);
> > +	folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
> > +			mapping_gfp_mask(mapping[1]));
> 
> two

*facepalm*.  Those don't contain calls to compound_head(), they contain
calls to folio_file_page(), which is still a moderately-expensive
unnecessary conversion, but a conversion in the other direction.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH mm-unstable] ext4: Convert mext_page_double_lock() to mext_folio_double_lock()
  2022-12-06 20:50 ` Matthew Wilcox
  2022-12-06 20:53   ` Matthew Wilcox
@ 2022-12-07  2:22   ` Vishal Moola
  1 sibling, 0 replies; 4+ messages in thread
From: Vishal Moola @ 2022-12-07  2:22 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, tytso, linux-ext4, linux-kernel, akpm

> Three are inline, which makes sense for the 146 bytes, but we're also
> removing out of line calls as well as the inline calls.
>
> Anyway, whether the description is updated or not, this looks good to me.

I seem to have miscounted. We can correct that part in the description
to "removes 6 calls to compound_head() and 2 calls to folio_file_page()."
Thanks for the review!

> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-12-07  2:23 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-06 20:41 [PATCH mm-unstable] ext4: Convert mext_page_double_lock() to mext_folio_double_lock() Vishal Moola (Oracle)
2022-12-06 20:50 ` Matthew Wilcox
2022-12-06 20:53   ` Matthew Wilcox
2022-12-07  2:22   ` Vishal Moola

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).