All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] btrfs: use helper to zero end of last page
@ 2019-12-16 18:50 David Sterba
  2019-12-17  9:31 ` Anand Jain
  0 siblings, 1 reply; 3+ messages in thread
From: David Sterba @ 2019-12-16 18:50 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

The zero_user_segment is open coded in several places, we should use the
helper. btrfs_page_mkwrite uses kmap/kunmap but replacing them by
_atomic variants is not a problem as they're more restrictive than
required.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/compression.c | 15 ++-------------
 fs/btrfs/extent_io.c   | 39 +++++++--------------------------------
 fs/btrfs/inode.c       | 10 +++-------
 3 files changed, 12 insertions(+), 52 deletions(-)

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ed05e5277399..299fcfdfb10f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -602,19 +602,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 		}
 		free_extent_map(em);
 
-		if (page->index == end_index) {
-			char *userpage;
-			size_t zero_offset = offset_in_page(isize);
-
-			if (zero_offset) {
-				int zeros;
-				zeros = PAGE_SIZE - zero_offset;
-				userpage = kmap_atomic(page);
-				memset(userpage + zero_offset, 0, zeros);
-				flush_dcache_page(page);
-				kunmap_atomic(userpage);
-			}
-		}
+		if (page->index == end_index)
+			zero_user_segment(page, offset_in_page(isize), PAGE_SIZE);
 
 		ret = bio_add_page(cb->orig_bio, page,
 				   PAGE_SIZE, 0);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 394beb474a69..d6b3af7f1675 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3093,31 +3093,18 @@ static int __do_readpage(struct extent_io_tree *tree,
 		}
 	}
 
-	if (page->index == last_byte >> PAGE_SHIFT) {
-		char *userpage;
-		size_t zero_offset = offset_in_page(last_byte);
-
-		if (zero_offset) {
-			iosize = PAGE_SIZE - zero_offset;
-			userpage = kmap_atomic(page);
-			memset(userpage + zero_offset, 0, iosize);
-			flush_dcache_page(page);
-			kunmap_atomic(userpage);
-		}
-	}
+	if (page->index == last_byte >> PAGE_SHIFT)
+		zero_user_segment(page, offset_in_page(last_byte), PAGE_SIZE);
+
 	while (cur <= end) {
 		bool force_bio_submit = false;
 		u64 offset;
 
 		if (cur >= last_byte) {
-			char *userpage;
 			struct extent_state *cached = NULL;
 
+			zero_user_segment(page, pg_offset, PAGE_SIZE);
 			iosize = PAGE_SIZE - pg_offset;
-			userpage = kmap_atomic(page);
-			memset(userpage + pg_offset, 0, iosize);
-			flush_dcache_page(page);
-			kunmap_atomic(userpage);
 			set_extent_uptodate(tree, cur, cur + iosize - 1,
 					    &cached, GFP_NOFS);
 			unlock_extent_cached(tree, cur,
@@ -3202,14 +3189,9 @@ static int __do_readpage(struct extent_io_tree *tree,
 
 		/* we've found a hole, just zero and go on */
 		if (block_start == EXTENT_MAP_HOLE) {
-			char *userpage;
 			struct extent_state *cached = NULL;
 
-			userpage = kmap_atomic(page);
-			memset(userpage + pg_offset, 0, iosize);
-			flush_dcache_page(page);
-			kunmap_atomic(userpage);
-
+			zero_user_segment(page, pg_offset, pg_offset + iosize);
 			set_extent_uptodate(tree, cur, cur + iosize - 1,
 					    &cached, GFP_NOFS);
 			unlock_extent_cached(tree, cur,
@@ -3564,15 +3546,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
 		return 0;
 	}
 
-	if (page->index == end_index) {
-		char *userpage;
-
-		userpage = kmap_atomic(page);
-		memset(userpage + pg_offset, 0,
-		       PAGE_SIZE - pg_offset);
-		kunmap_atomic(userpage);
-		flush_dcache_page(page);
-	}
+	if (page->index == end_index)
+		zero_user_segment(page, pg_offset, PAGE_SIZE);
 
 	set_page_extent_mapped(page);
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f27377d8ec55..24337de25a8b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8308,7 +8308,6 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 	struct btrfs_ordered_extent *ordered;
 	struct extent_state *cached_state = NULL;
 	struct extent_changeset *data_reserved = NULL;
-	char *kaddr;
 	unsigned long zero_start;
 	loff_t size;
 	vm_fault_t ret;
@@ -8414,12 +8413,9 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 	else
 		zero_start = PAGE_SIZE;
 
-	if (zero_start != PAGE_SIZE) {
-		kaddr = kmap(page);
-		memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
-		flush_dcache_page(page);
-		kunmap(page);
-	}
+	if (zero_start != PAGE_SIZE)
+		zero_user_segment(page, zero_start, PAGE_SIZE);
+
 	ClearPageChecked(page);
 	set_page_dirty(page);
 	SetPageUptodate(page);
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] btrfs: use helper to zero end of last page
  2019-12-16 18:50 [PATCH] btrfs: use helper to zero end of last page David Sterba
@ 2019-12-17  9:31 ` Anand Jain
  2019-12-17 17:49   ` David Sterba
  0 siblings, 1 reply; 3+ messages in thread
From: Anand Jain @ 2019-12-17  9:31 UTC (permalink / raw)
  To: David Sterba, linux-btrfs

On 17/12/19 2:50 AM, David Sterba wrote:
> The zero_user_segment is open coded in several places, we should use the
> helper. btrfs_page_mkwrite uses kmap/kunmap but replacing them by
> _atomic variants is not a problem as they're more restrictive than
> required.
> 
> Signed-off-by: David Sterba <dsterba@suse.com>
> ---
>   fs/btrfs/compression.c | 15 ++-------------
>   fs/btrfs/extent_io.c   | 39 +++++++--------------------------------
>   fs/btrfs/inode.c       | 10 +++-------
>   3 files changed, 12 insertions(+), 52 deletions(-)
> 
> diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
> index ed05e5277399..299fcfdfb10f 100644
> --- a/fs/btrfs/compression.c
> +++ b/fs/btrfs/compression.c
> @@ -602,19 +602,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
>   		}
>   		free_extent_map(em);
>   
> -		if (page->index == end_index) {
> -			char *userpage;
> -			size_t zero_offset = offset_in_page(isize);
> -
> -			if (zero_offset) {
> -				int zeros;
> -				zeros = PAGE_SIZE - zero_offset;
> -				userpage = kmap_atomic(page);
> -				memset(userpage + zero_offset, 0, zeros);
> -				flush_dcache_page(page);
> -				kunmap_atomic(userpage);
> -			}
> -		}
> +		if (page->index == end_index)
> +			zero_user_segment(page, offset_in_page(isize), PAGE_SIZE);
>   

  Before we zero-ed only when the page offset is not starting at 0.
  Can you confirm and update the change log if that is ok.


>   		ret = bio_add_page(cb->orig_bio, page,
>   				   PAGE_SIZE, 0);
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index 394beb474a69..d6b3af7f1675 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -3093,31 +3093,18 @@ static int __do_readpage(struct extent_io_tree *tree,
>   		}
>   	}
>   
> -	if (page->index == last_byte >> PAGE_SHIFT) {
> -		char *userpage;
> -		size_t zero_offset = offset_in_page(last_byte);
> -
> -		if (zero_offset) {
> -			iosize = PAGE_SIZE - zero_offset;
> -			userpage = kmap_atomic(page);
> -			memset(userpage + zero_offset, 0, iosize);
> -			flush_dcache_page(page);
> -			kunmap_atomic(userpage);
> -		}
> -	}
> +	if (page->index == last_byte >> PAGE_SHIFT)
> +		zero_user_segment(page, offset_in_page(last_byte), PAGE_SIZE);
> +

  Here also.

Thanks, Anand

>   	while (cur <= end) {
>   		bool force_bio_submit = false;
>   		u64 offset;
>   
>   		if (cur >= last_byte) {
> -			char *userpage;
>   			struct extent_state *cached = NULL;
>   
> +			zero_user_segment(page, pg_offset, PAGE_SIZE);
>   			iosize = PAGE_SIZE - pg_offset;
> -			userpage = kmap_atomic(page);
> -			memset(userpage + pg_offset, 0, iosize);
> -			flush_dcache_page(page);
> -			kunmap_atomic(userpage);
>   			set_extent_uptodate(tree, cur, cur + iosize - 1,
>   					    &cached, GFP_NOFS);
>   			unlock_extent_cached(tree, cur,
> @@ -3202,14 +3189,9 @@ static int __do_readpage(struct extent_io_tree *tree,
>   
>   		/* we've found a hole, just zero and go on */
>   		if (block_start == EXTENT_MAP_HOLE) {
> -			char *userpage;
>   			struct extent_state *cached = NULL;
>   
> -			userpage = kmap_atomic(page);
> -			memset(userpage + pg_offset, 0, iosize);
> -			flush_dcache_page(page);
> -			kunmap_atomic(userpage);
> -
> +			zero_user_segment(page, pg_offset, pg_offset + iosize);
>   			set_extent_uptodate(tree, cur, cur + iosize - 1,
>   					    &cached, GFP_NOFS);
>   			unlock_extent_cached(tree, cur,
> @@ -3564,15 +3546,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
>   		return 0;
>   	}
>   
> -	if (page->index == end_index) {
> -		char *userpage;
> -
> -		userpage = kmap_atomic(page);
> -		memset(userpage + pg_offset, 0,
> -		       PAGE_SIZE - pg_offset);
> -		kunmap_atomic(userpage);
> -		flush_dcache_page(page);
> -	}
> +	if (page->index == end_index)
> +		zero_user_segment(page, pg_offset, PAGE_SIZE);
>   
>   	set_page_extent_mapped(page);
>   
> diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
> index f27377d8ec55..24337de25a8b 100644
> --- a/fs/btrfs/inode.c
> +++ b/fs/btrfs/inode.c
> @@ -8308,7 +8308,6 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
>   	struct btrfs_ordered_extent *ordered;
>   	struct extent_state *cached_state = NULL;
>   	struct extent_changeset *data_reserved = NULL;
> -	char *kaddr;
>   	unsigned long zero_start;
>   	loff_t size;
>   	vm_fault_t ret;
> @@ -8414,12 +8413,9 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
>   	else
>   		zero_start = PAGE_SIZE;
>   
> -	if (zero_start != PAGE_SIZE) {
> -		kaddr = kmap(page);
> -		memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
> -		flush_dcache_page(page);
> -		kunmap(page);
> -	}
> +	if (zero_start != PAGE_SIZE)
> +		zero_user_segment(page, zero_start, PAGE_SIZE);
> +
>   	ClearPageChecked(page);
>   	set_page_dirty(page);
>   	SetPageUptodate(page);
> 


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] btrfs: use helper to zero end of last page
  2019-12-17  9:31 ` Anand Jain
@ 2019-12-17 17:49   ` David Sterba
  0 siblings, 0 replies; 3+ messages in thread
From: David Sterba @ 2019-12-17 17:49 UTC (permalink / raw)
  To: Anand Jain; +Cc: David Sterba, linux-btrfs

On Tue, Dec 17, 2019 at 05:31:14PM +0800, Anand Jain wrote:
> > -		if (page->index == end_index) {
> > -			char *userpage;
> > -			size_t zero_offset = offset_in_page(isize);
> > -
> > -			if (zero_offset) {
> > -				int zeros;
> > -				zeros = PAGE_SIZE - zero_offset;
> > -				userpage = kmap_atomic(page);
> > -				memset(userpage + zero_offset, 0, zeros);
> > -				flush_dcache_page(page);
> > -				kunmap_atomic(userpage);
> > -			}
> > -		}
> > +		if (page->index == end_index)
> > +			zero_user_segment(page, offset_in_page(isize), PAGE_SIZE);
> 
>   Before we zero-ed only when the page offset is not starting at 0.
>   Can you confirm and update the change log if that is ok.

If the page offset is 0 then this would mean the whole page will be
zeroed, but we can have entire page within i_size so that would
mistakenly clear the last page. So the check is still needed.

While reading the code around the index calculations for the last page I
also found some oddities and potential bugs so I'll drop this patch for
now so I can look at the bugs first.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-12-17 17:49 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-16 18:50 [PATCH] btrfs: use helper to zero end of last page David Sterba
2019-12-17  9:31 ` Anand Jain
2019-12-17 17:49   ` David Sterba

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.