All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nikolay Borisov <nborisov@suse.com>
To: Christoph Hellwig <hch@lst.de>, David Sterba <dsterba@suse.com>,
	Josef Bacik <josef@toxicpanda.com>, Qu Wenruo <wqu@suse.com>
Cc: linux-btrfs@vger.kernel.org
Subject: Re: [PATCH 06/10] btrfs: don't use btrfs_bio_wq_end_io for compressed writes
Date: Wed, 11 May 2022 22:20:48 +0300	[thread overview]
Message-ID: <f8d90519-9911-fde0-9b18-3e4f339590c3@suse.com> (raw)
In-Reply-To: <20220504122524.558088-7-hch@lst.de>



On 4.05.22 г. 15:25 ч., Christoph Hellwig wrote:
> Compressed write bio completion is the only user of btrfs_bio_wq_end_io
> for writes, and the use of btrfs_bio_wq_end_io is a little suboptimal
> here as we only real need user context for the final completion of a
> compressed_bio structure, and not every single bio completion.
> 
> Add a work_struct to struct compressed_bio instead and use that to call
> finish_compressed_bio_write.  This allows to remove all handling of
> write bios in the btrfs_bio_wq_end_io infrastructure.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   fs/btrfs/compression.c | 45 +++++++++++++++++++++---------------------
>   fs/btrfs/compression.h |  7 +++++--
>   fs/btrfs/ctree.h       |  2 +-
>   fs/btrfs/disk-io.c     | 30 +++++++++++-----------------
>   fs/btrfs/super.c       |  2 --
>   5 files changed, 41 insertions(+), 45 deletions(-)
> 
> diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
> index f4564f32f6d93..9d5986a30a4a2 100644
> --- a/fs/btrfs/compression.c
> +++ b/fs/btrfs/compression.c
> @@ -403,6 +403,14 @@ static void finish_compressed_bio_write(struct compressed_bio *cb)
>   	kfree(cb);
>   }
>   
> +static void btrfs_finish_compressed_write_work(struct work_struct *work)
> +{
> +	struct compressed_bio *cb =
> +		container_of(work, struct compressed_bio, write_end_work);
> +
> +	finish_compressed_bio_write(cb);
> +}
> +
>   /*
>    * Do the cleanup once all the compressed pages hit the disk.  This will clear
>    * writeback on the file pages and free the compressed pages.
> @@ -414,29 +422,16 @@ static void end_compressed_bio_write(struct bio *bio)
>   {
>   	struct compressed_bio *cb = bio->bi_private;
>   
> -	if (!dec_and_test_compressed_bio(cb, bio))
> -		goto out;
> -
> -	btrfs_record_physical_zoned(cb->inode, cb->start, bio);
> +	if (dec_and_test_compressed_bio(cb, bio)) {
> +		struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
>   
> -	finish_compressed_bio_write(cb);
> -out:
> +		btrfs_record_physical_zoned(cb->inode, cb->start, bio);
> +		queue_work(fs_info->compressed_write_workers,
> +			   &cb->write_end_work);
> +	}
>   	bio_put(bio);
>   }
>   
> -static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
> -					  struct bio *bio, int mirror_num)
> -{
> -	blk_status_t ret;
> -
> -	ASSERT(bio->bi_iter.bi_size);
> -	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
> -	if (ret)
> -		return ret;
> -	ret = btrfs_map_bio(fs_info, bio, mirror_num);
> -	return ret;
> -}
> -
>   /*
>    * Allocate a compressed_bio, which will be used to read/write on-disk
>    * (aka, compressed) * data.
> @@ -533,7 +528,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
>   	cb->compressed_pages = compressed_pages;
>   	cb->compressed_len = compressed_len;
>   	cb->writeback = writeback;
> -	cb->orig_bio = NULL;
> +	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
>   	cb->nr_pages = nr_pages;
>   
>   	if (blkcg_css)
> @@ -603,7 +598,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
>   					goto finish_cb;
>   			}
>   
> -			ret = submit_compressed_bio(fs_info, bio, 0);
> +			ASSERT(bio->bi_iter.bi_size);
> +			ret = btrfs_map_bio(fs_info, bio, 0);
>   			if (ret)
>   				goto finish_cb;
>   			bio = NULL;
> @@ -941,7 +937,12 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
>   						  fs_info->sectorsize);
>   			sums += fs_info->csum_size * nr_sectors;
>   
> -			ret = submit_compressed_bio(fs_info, comp_bio, mirror_num);
> +			ASSERT(comp_bio->bi_iter.bi_size);
> +			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
> +						  BTRFS_WQ_ENDIO_DATA);
> +			if (ret)
> +				goto finish_cb;
> +			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
>   			if (ret)
>   				goto finish_cb;
>   			comp_bio = NULL;
> diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
> index 2707404389a5d..4a40725cbf1db 100644
> --- a/fs/btrfs/compression.h
> +++ b/fs/btrfs/compression.h
> @@ -61,8 +61,11 @@ struct compressed_bio {
>   	blk_status_t status;
>   	int mirror_num;
>   
> -	/* for reads, this is the bio we are copying the data into */
> -	struct bio *orig_bio;
> +	union {
> +		/* for reads, this is the bio we are copying the data into */
> +		struct bio *orig_bio;
> +		struct work_struct write_end_work;
> +	};
>   
>   	/*
>   	 * the start of a variable length array of checksums only
> diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
> index a3739143bf44c..6cf699959286d 100644
> --- a/fs/btrfs/ctree.h
> +++ b/fs/btrfs/ctree.h
> @@ -855,7 +855,7 @@ struct btrfs_fs_info {
>   	struct btrfs_workqueue *endio_meta_workers;
>   	struct workqueue_struct *endio_raid56_workers;
>   	struct workqueue_struct *rmw_workers;
> -	struct btrfs_workqueue *endio_meta_write_workers;
> +	struct workqueue_struct *compressed_write_workers;
>   	struct btrfs_workqueue *endio_write_workers;
>   	struct btrfs_workqueue *endio_freespace_worker;
>   	struct btrfs_workqueue *caching_workers;
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index ef117eaab468c..aa56ba9378e1f 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -748,19 +748,10 @@ static void end_workqueue_bio(struct bio *bio)
>   	fs_info = end_io_wq->info;
>   	end_io_wq->status = bio->bi_status;
>   
> -	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
> -		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
> -			wq = fs_info->endio_meta_write_workers;
> -		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
> -			wq = fs_info->endio_freespace_worker;
> -		else
> -			wq = fs_info->endio_write_workers;
> -	} else {
> -		if (end_io_wq->metadata)
> -			wq = fs_info->endio_meta_workers;
> -		else
> -			wq = fs_info->endio_workers;
> -	}
> +	if (end_io_wq->metadata)
> +		wq = fs_info->endio_meta_workers;
> +	else
> +		wq = fs_info->endio_workers;
>   
>   	btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
>   	btrfs_queue_work(wq, &end_io_wq->work);
> @@ -771,6 +762,9 @@ blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
>   {
>   	struct btrfs_end_io_wq *end_io_wq;
>   
> +	if (WARN_ON_ONCE(btrfs_op(bio) != BTRFS_MAP_WRITE))
> +		return BLK_STS_IOERR;

This is broken w.r.t to btrfs_submit_metadata_bio since in
that function the code is:

  if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
                       /*
                        * called for a read, do the setup so that checksum validation
                        * can happen in the async kernel threads
                        */
                     ret = btrfs_bio_wq_end_io(fs_info, bio,
                                                 BTRFS_WQ_ENDIO_METADATA);
                       if (!ret)
                               ret = btrfs_map_bio(fs_info, bio, mirror_num);



So metadata reads  will end up in function which disallows reads..
This results in failure to mount when reading various metadata.

I guess the correct fix is to include the following hunk from patch 8 in this series:

@@ -916,14 +839,7 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
  	bio->bi_opf |= REQ_META;
  
  	if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
-		/*
-		 * called for a read, do the setup so that checksum validation
-		 * can happen in the async kernel threads
-		 */
-		ret = btrfs_bio_wq_end_io(fs_info, bio,
-					  BTRFS_WQ_ENDIO_METADATA);
-		if (!ret)
-			ret = btrfs_map_bio(fs_info, bio, mirror_num);
+		ret = btrfs_map_bio(fs_info, bio, mirror_num);





> +
>   	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
>   	if (!end_io_wq)
>   		return BLK_STS_RESOURCE;
> @@ -2273,6 +2267,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
>   		destroy_workqueue(fs_info->endio_raid56_workers);
>   	if (fs_info->rmw_workers)
>   		destroy_workqueue(fs_info->rmw_workers);
> +	if (fs_info->compressed_write_workers)
> +		destroy_workqueue(fs_info->compressed_write_workers);
>   	btrfs_destroy_workqueue(fs_info->endio_write_workers);
>   	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
>   	btrfs_destroy_workqueue(fs_info->delayed_workers);
> @@ -2287,7 +2283,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
>   	 * queues can do metadata I/O operations.
>   	 */
>   	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
> -	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
>   }
>   
>   static void free_root_extent_buffers(struct btrfs_root *root)
> @@ -2469,15 +2464,14 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
>   	fs_info->endio_meta_workers =
>   		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
>   				      max_active, 4);
> -	fs_info->endio_meta_write_workers =
> -		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
> -				      max_active, 2);
>   	fs_info->endio_raid56_workers =
>   		alloc_workqueue("btrfs-endio-raid56", flags, max_active);
>   	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
>   	fs_info->endio_write_workers =
>   		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
>   				      max_active, 2);
> +	fs_info->compressed_write_workers =
> +		alloc_workqueue("btrfs-compressed-write", flags, max_active);
>   	fs_info->endio_freespace_worker =
>   		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
>   				      max_active, 0);
> @@ -2492,7 +2486,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
>   	if (!(fs_info->workers && fs_info->hipri_workers &&
>   	      fs_info->delalloc_workers && fs_info->flush_workers &&
>   	      fs_info->endio_workers && fs_info->endio_meta_workers &&
> -	      fs_info->endio_meta_write_workers &&
> +	      fs_info->compressed_write_workers &&
>   	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
>   	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
>   	      fs_info->caching_workers && fs_info->fixup_workers &&
> diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
> index b1fdc6a26c76e..9c683c466d585 100644
> --- a/fs/btrfs/super.c
> +++ b/fs/btrfs/super.c
> @@ -1908,8 +1908,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
>   	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
> -	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
> -				new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);

  reply	other threads:[~2022-05-11 19:20 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-04 12:25 cleanup btrfs bio handling, part 2 v3 Christoph Hellwig
2022-05-04 12:25 ` [PATCH 01/10] btrfs: don't double-defer bio completions for compressed reads Christoph Hellwig
2022-05-04 12:25 ` [PATCH 02/10] btrfs: move more work into btrfs_end_bioc Christoph Hellwig
2022-05-04 12:25 ` [PATCH 03/10] btrfs: cleanup btrfs_submit_dio_bio Christoph Hellwig
2022-05-04 12:25 ` [PATCH 04/10] btrfs: split btrfs_submit_data_bio Christoph Hellwig
2022-05-04 12:38   ` Qu Wenruo
2022-05-04 14:08     ` Christoph Hellwig
2022-05-04 22:41       ` Qu Wenruo
2022-05-04 22:44         ` Qu Wenruo
2022-05-05 15:08         ` Christoph Hellwig
2022-05-04 12:25 ` [PATCH 05/10] btrfs: defer I/O completion based on the btrfs_raid_bio Christoph Hellwig
2022-05-04 12:25 ` [PATCH 06/10] btrfs: don't use btrfs_bio_wq_end_io for compressed writes Christoph Hellwig
2022-05-11 19:20   ` Nikolay Borisov [this message]
2022-05-11 19:28     ` Nikolay Borisov
2022-05-12  5:56       ` Christoph Hellwig
2022-05-04 12:25 ` [PATCH 07/10] btrfs: centralize setting REQ_META Christoph Hellwig
2022-05-04 12:25 ` [PATCH 08/10] btrfs: remove btrfs_end_io_wq Christoph Hellwig
2022-05-05  2:34   ` Qu Wenruo
2022-05-05 15:09     ` Christoph Hellwig
2022-05-04 12:25 ` [PATCH 09/10] btrfs: refactor btrfs_map_bio Christoph Hellwig
2022-05-04 12:46   ` Qu Wenruo
2022-05-04 12:25 ` [PATCH 10/10] btrfs: do not allocate a btrfs_bio for low-level bios Christoph Hellwig
2022-05-05 11:23   ` Qu Wenruo
2022-05-05  6:56 ` cleanup btrfs bio handling, part 2 v3 Qu Wenruo
2022-05-05 15:11   ` Christoph Hellwig
2022-05-12  6:22     ` Anand Jain
2022-05-12  6:30       ` Anand Jain
2022-05-05 15:34   ` David Sterba
  -- strict thread matches above, loose matches on Subject: below --
2022-05-26  7:36 cleanup btrfs bio handling, part 2 v4 Christoph Hellwig
2022-05-26  7:36 ` [PATCH 06/10] btrfs: don't use btrfs_bio_wq_end_io for compressed writes Christoph Hellwig
2022-06-01 19:33   ` David Sterba
2022-04-29 14:30 cleanup btrfs bio handling, part 2 v2 Christoph Hellwig
2022-04-29 14:30 ` [PATCH 06/10] btrfs: don't use btrfs_bio_wq_end_io for compressed writes Christoph Hellwig
2022-04-25  7:54 cleanup btrfs bio handling, part 2 Christoph Hellwig
2022-04-25  7:54 ` [PATCH 06/10] btrfs: don't use btrfs_bio_wq_end_io for compressed writes Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f8d90519-9911-fde0-9b18-3e4f339590c3@suse.com \
    --to=nborisov@suse.com \
    --cc=dsterba@suse.com \
    --cc=hch@lst.de \
    --cc=josef@toxicpanda.com \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=wqu@suse.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.