linux-btrfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Qu Wenruo <wqu@suse.com>
To: linux-btrfs@vger.kernel.org
Subject: [PATCH v4 40/68] btrfs: extent_io: introduce EXTENT_READ_SUBMITTED to handle subpage data read
Date: Wed, 21 Oct 2020 14:25:26 +0800	[thread overview]
Message-ID: <20201021062554.68132-41-wqu@suse.com> (raw)
In-Reply-To: <20201021062554.68132-1-wqu@suse.com>

In end_bio_extent_readpage(), we will unlock the page for each segment,
this is fine for regular sectorsize == PAGE_SIZE case.

But for subpage size case, we may have several bio segments for the same
page, and unlock the page unconditionally could easily screw up the
locking.

To address the problem:
- Introduce a new bit, EXTENT_READ_SUBMITTED
  Now for subpage data read, each submitted read bio will have its range
  with EXTENT_READ_SUBMITTED set.

- Set the EXTENT_READ_SUBMITTED in __do_readpage()
  Set the full page with EXTENT_READ_SUBMITTED set.

- Clear and test if we're the last owner of EXTENT_READ_SUBMITTED in
  end_bio_extent_readpage() and __do_readpage()
  This ensures that no matter who finishes filling the page, the last
  owner will unlock the page.

  This is quite different from regular sectorsize case, where one page
  either get unlocked in __do_readpage() or in
  end_bio_extent_readpage().

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent-io-tree.h |  22 ++++++++
 fs/btrfs/extent_io.c      | 115 +++++++++++++++++++++++++++++++++++---
 2 files changed, 129 insertions(+), 8 deletions(-)

diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index bdafac1bd15f..d3b21c732634 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -26,6 +26,15 @@ struct io_failure_record;
 /* For subpage btree io tree, indicates there is an in-tree extent buffer */
 #define EXTENT_HAS_TREE_BLOCK	(1U << 15)
 
+/*
+ * For subpage data io tree, indicates there is an read bio submitted.
+ * The last one to clear the bit in the page will be responsible to unlock
+ * the containg page.
+ *
+ * TODO: Remove this if we use iomap for data read.
+ */
+#define EXTENT_READ_SUBMITTED	(1U << 16)
+
 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
 				 EXTENT_CLEAR_DATA_RESV)
 #define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING)
@@ -115,6 +124,19 @@ struct extent_io_extra_options {
 	 */
 	bool wake;
 	bool delete;
+
+	/*
+	 * For __clear_extent_bit(), to skip the spin lock and rely on caller
+	 * for the lock.
+	 * This allows the caller to do test-and-clear in a spinlock.
+	 */
+	bool skip_lock;
+
+	/*
+	 * For __clear_extent_bit(), paired with skip_lock, to provide the
+	 * preallocated extent_state.
+	 */
+	struct extent_state *prealloc;
 };
 
 int __init extent_state_cache_init(void);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 37593b599522..5254a4ce2598 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -710,6 +710,7 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 	struct rb_node *node;
 	bool wake;
 	bool delete;
+	bool skip_lock;
 	u64 last_end;
 	int err;
 	int clear = 0;
@@ -719,8 +720,13 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 	changeset = extra_opts->changeset;
 	wake = extra_opts->wake;
 	delete = extra_opts->delete;
+	skip_lock = extra_opts->skip_lock;
 
-	btrfs_debug_check_extent_io_range(tree, start, end);
+	if (skip_lock)
+		ASSERT(!gfpflags_allow_blocking(mask));
+
+	if (!skip_lock)
+		btrfs_debug_check_extent_io_range(tree, start, end);
 	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
 
 	if (bits & EXTENT_DELALLOC)
@@ -742,8 +748,11 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 		 */
 		prealloc = alloc_extent_state(mask);
 	}
+	if (!prealloc && skip_lock)
+		prealloc = extra_opts->prealloc;
 
-	spin_lock(&tree->lock);
+	if (!skip_lock)
+		spin_lock(&tree->lock);
 	if (cached_state) {
 		cached = *cached_state;
 
@@ -848,15 +857,20 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 search_again:
 	if (start > end)
 		goto out;
-	spin_unlock(&tree->lock);
-	if (gfpflags_allow_blocking(mask))
-		cond_resched();
+	if (!skip_lock) {
+		spin_unlock(&tree->lock);
+		if (gfpflags_allow_blocking(mask))
+			cond_resched();
+	}
 	goto again;
 
 out:
-	spin_unlock(&tree->lock);
+	if (!skip_lock)
+		spin_unlock(&tree->lock);
 	if (prealloc)
 		free_extent_state(prealloc);
+	if (skip_lock)
+		extra_opts->prealloc = NULL;
 
 	return 0;
 
@@ -2926,6 +2940,70 @@ endio_readpage_release_extent(struct extent_io_tree *tree, struct page *page,
 	unlock_extent_cached_atomic(tree, start, end, &cached);
 }
 
+/*
+ * Finish the read and unlock the page if needed.
+ *
+ * For regular sectorsize == PAGE_SIZE case, just unlock the page.
+ * For subpage case, clear the EXTENT_READ_SUBMITTED bit, then if and
+ * only if we're the last EXTENT_READ_SUBMITTED of the page.
+ */
+static void finish_and_unlock_read_page(struct btrfs_fs_info *fs_info,
+		struct extent_io_tree *tree, u64 start, u64 end,
+		struct page *page, bool in_endio_context)
+{
+	struct extent_io_extra_options extra_opts = {
+		.skip_lock = true,
+	};
+	u64 page_start = round_down(start, PAGE_SIZE);
+	u64 page_end = page_start + PAGE_SIZE - 1;
+	bool metadata = (tree->owner == IO_TREE_BTREE_INODE_IO);
+	bool has_bit = true;
+	bool last_owner = false;
+
+	/*
+	 * For subpage metadata, we don't lock page for read/write at all,
+	 * just exit.
+	 */
+	if (btrfs_is_subpage(fs_info) && metadata)
+		return;
+
+	/* For regular sector size, we need to unlock the full page for endio */
+	if (!btrfs_is_subpage(fs_info)) {
+		/*
+		 * This function can be called in __do_readpage(), in that case we
+		 * shouldn't unlock the page.
+		 */
+		if (in_endio_context)
+			unlock_page(page);
+		return;
+	}
+
+	/*
+	 * The remaining case is subpage data read, which we need to update
+	 * EXTENT_READ_SUBMITTED and unlock the page for the last reader.
+	 */
+	ASSERT(end <= page_end);
+
+	/* Will be freed in __clear_extent_bit() */
+	extra_opts.prealloc = alloc_extent_state(GFP_NOFS);
+
+	spin_lock(&tree->lock);
+	/* Check if we have the bit first */
+	if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) {
+		has_bit = test_range_bit_nolock(tree, start, end,
+				EXTENT_READ_SUBMITTED, 1, NULL);
+		WARN_ON(!has_bit);
+	}
+
+	__clear_extent_bit(tree, start, end, EXTENT_READ_SUBMITTED, NULL,
+			   GFP_ATOMIC, &extra_opts);
+	last_owner = !test_range_bit_nolock(tree, page_start, page_end,
+					    EXTENT_READ_SUBMITTED, 0, NULL);
+	spin_unlock(&tree->lock);
+	if (has_bit && last_owner)
+		unlock_page(page);
+}
+
 /*
  * after a readpage IO is done, we need to:
  * clear the uptodate bits on error
@@ -3050,7 +3128,7 @@ static void end_bio_extent_readpage(struct bio *bio)
 		offset += len;
 
 		endio_readpage_release_extent(tree, page, start, end, uptodate);
-		unlock_page(page);
+		finish_and_unlock_read_page(fs_info, tree, start, end, page, true);
 	}
 
 	btrfs_io_bio_free_csum(io_bio);
@@ -3277,6 +3355,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
 	}
 	return em;
 }
+
 /*
  * basic readpage implementation.  Locked extent state structs are inserted
  * into the tree that are removed when the IO is done (by the end_io
@@ -3292,6 +3371,7 @@ static int __do_readpage(struct page *page,
 			 u64 *prev_em_start)
 {
 	struct inode *inode = page->mapping->host;
+	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
 	u64 start = page_offset(page);
 	const u64 end = start + PAGE_SIZE - 1;
 	u64 cur = start;
@@ -3330,6 +3410,9 @@ static int __do_readpage(struct page *page,
 			kunmap_atomic(userpage);
 		}
 	}
+
+	if (btrfs_is_subpage(fs_info))
+		set_extent_bits(tree, start, end, EXTENT_READ_SUBMITTED);
 	while (cur <= end) {
 		bool force_bio_submit = false;
 		u64 offset;
@@ -3347,6 +3430,8 @@ static int __do_readpage(struct page *page,
 					    &cached, GFP_NOFS);
 			unlock_extent_cached(tree, cur,
 					     cur + iosize - 1, &cached);
+			finish_and_unlock_read_page(fs_info, tree, cur,
+						cur + iosize - 1, page, false);
 			break;
 		}
 		em = __get_extent_map(inode, page, pg_offset, cur,
@@ -3354,6 +3439,8 @@ static int __do_readpage(struct page *page,
 		if (IS_ERR_OR_NULL(em)) {
 			SetPageError(page);
 			unlock_extent(tree, cur, end);
+			finish_and_unlock_read_page(fs_info, tree, cur,
+						cur + iosize - 1, page, false);
 			break;
 		}
 		extent_offset = cur - em->start;
@@ -3436,6 +3523,8 @@ static int __do_readpage(struct page *page,
 					    &cached, GFP_NOFS);
 			unlock_extent_cached(tree, cur,
 					     cur + iosize - 1, &cached);
+			finish_and_unlock_read_page(fs_info, tree, cur,
+						cur + iosize - 1, page, false);
 			cur = cur + iosize;
 			pg_offset += iosize;
 			continue;
@@ -3445,6 +3534,8 @@ static int __do_readpage(struct page *page,
 				   EXTENT_UPTODATE, 1, NULL)) {
 			check_page_uptodate(tree, page);
 			unlock_extent(tree, cur, cur + iosize - 1);
+			finish_and_unlock_read_page(fs_info, tree, cur,
+						cur + iosize - 1, page, false);
 			cur = cur + iosize;
 			pg_offset += iosize;
 			continue;
@@ -3455,6 +3546,8 @@ static int __do_readpage(struct page *page,
 		if (block_start == EXTENT_MAP_INLINE) {
 			SetPageError(page);
 			unlock_extent(tree, cur, cur + iosize - 1);
+			finish_and_unlock_read_page(fs_info, tree, cur,
+						cur + iosize - 1, page, false);
 			cur = cur + iosize;
 			pg_offset += iosize;
 			continue;
@@ -3482,7 +3575,13 @@ static int __do_readpage(struct page *page,
 	if (!nr) {
 		if (!PageError(page))
 			SetPageUptodate(page);
-		unlock_page(page);
+		/*
+		 * Subpage case will unlock the page in
+		 * finish_and_unlock_read_page() according to the
+		 * EXTENT_READ_SUBMITTED status.
+		 */
+		if (!btrfs_is_subpage(fs_info))
+			unlock_page(page);
 	}
 	return ret;
 }
-- 
2.28.0


  parent reply	other threads:[~2020-10-21  6:27 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-21  6:24 [PATCH v4 00/68] btrfs: add basic rw support for subpage sector size Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 01/68] btrfs: extent-io-tests: remove invalid tests Qu Wenruo
2020-10-26 23:26   ` David Sterba
2020-10-27  0:44     ` Qu Wenruo
2020-11-03  6:07       ` Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 02/68] btrfs: use iosize while reading compressed pages Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 03/68] btrfs: extent_io: fix the comment on lock_extent_buffer_for_io() Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 04/68] btrfs: extent_io: update the comment for find_first_extent_bit() Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 05/68] btrfs: extent_io: sink the @failed_start parameter for set_extent_bit() Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 06/68] btrfs: make btree inode io_tree has its special owner Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 07/68] btrfs: disk-io: replace @fs_info and @private_data with @inode for btrfs_wq_submit_bio() Qu Wenruo
2020-10-21 22:00   ` Goldwyn Rodrigues
2020-10-21  6:24 ` [PATCH v4 08/68] btrfs: inode: sink parameter @start and @len for check_data_csum() Qu Wenruo
2020-10-21 22:11   ` Goldwyn Rodrigues
2020-10-27  0:13   ` David Sterba
2020-10-27  0:50     ` Qu Wenruo
2020-10-27 23:17       ` David Sterba
2020-10-28  0:57         ` Qu Wenruo
2020-10-29 19:38           ` David Sterba
2020-10-21  6:24 ` [PATCH v4 09/68] btrfs: extent_io: unexport extent_invalidatepage() Qu Wenruo
2020-10-27  0:24   ` David Sterba
2020-10-21  6:24 ` [PATCH v4 10/68] btrfs: extent_io: remove the forward declaration and rename __process_pages_contig Qu Wenruo
2020-10-27  0:28   ` David Sterba
2020-10-27  0:50     ` Qu Wenruo
2020-10-27 23:25       ` David Sterba
2020-10-21  6:24 ` [PATCH v4 11/68] btrfs: extent_io: rename pages_locked in process_pages_contig() Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 12/68] btrfs: extent_io: only require sector size alignment for page read Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 13/68] btrfs: extent_io: remove the extent_start/extent_len for end_bio_extent_readpage() Qu Wenruo
2020-10-27 10:29   ` David Sterba
2020-10-27 12:15     ` Qu Wenruo
2020-10-27 23:31       ` David Sterba
2020-10-21  6:25 ` [PATCH v4 14/68] btrfs: extent_io: integrate page status update into endio_readpage_release_extent() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 15/68] btrfs: extent_io: rename page_size to io_size in submit_extent_page() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 16/68] btrfs: extent_io: add assert_spin_locked() for attach_extent_buffer_page() Qu Wenruo
2020-10-27 10:43   ` David Sterba
2020-10-21  6:25 ` [PATCH v4 17/68] btrfs: extent_io: extract the btree page submission code into its own helper function Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 18/68] btrfs: extent_io: calculate inline extent buffer page size based on page size Qu Wenruo
2020-10-27 11:16   ` David Sterba
2020-10-27 11:20     ` David Sterba
2020-10-21  6:25 ` [PATCH v4 19/68] btrfs: extent_io: make btrfs_fs_info::buffer_radix to take sector size devided values Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 20/68] btrfs: extent_io: sink less common parameters for __set_extent_bit() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 21/68] btrfs: extent_io: sink less common parameters for __clear_extent_bit() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 22/68] btrfs: disk_io: grab fs_info from extent_buffer::fs_info directly for btrfs_mark_buffer_dirty() Qu Wenruo
2020-10-27 15:43   ` Goldwyn Rodrigues
2020-10-21  6:25 ` [PATCH v4 23/68] btrfs: disk-io: make csum_tree_block() handle sectorsize smaller than page size Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 24/68] btrfs: disk-io: extract the extent buffer verification from btree_readpage_end_io_hook() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 25/68] btrfs: disk-io: accept bvec directly for csum_dirty_buffer() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 26/68] btrfs: inode: make btrfs_readpage_end_io_hook() follow sector size Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 27/68] btrfs: introduce a helper to determine if the sectorsize is smaller than PAGE_SIZE Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 28/68] btrfs: extent_io: allow find_first_extent_bit() to find a range with exact bits match Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 29/68] btrfs: extent_io: don't allow tree block to cross page boundary for subpage support Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 30/68] btrfs: extent_io: update num_extent_pages() to support subpage sized extent buffer Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 31/68] btrfs: handle sectorsize < PAGE_SIZE case for extent buffer accessors Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 32/68] btrfs: disk-io: only clear EXTENT_LOCK bit for extent_invalidatepage() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 33/68] btrfs: extent-io: make type of extent_state::state to be at least 32 bits Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 34/68] btrfs: extent_io: use extent_io_tree to handle subpage extent buffer allocation Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 35/68] btrfs: extent_io: make set/clear_extent_buffer_uptodate() to support subpage size Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 36/68] btrfs: extent_io: make the assert test on page uptodate able to handle subpage Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 37/68] btrfs: extent_io: implement subpage metadata read and its endio function Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 38/68] btrfs: extent_io: implement try_release_extent_buffer() for subpage metadata support Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 39/68] btrfs: extent_io: extra the core of test_range_bit() into test_range_bit_nolock() Qu Wenruo
2020-10-21  6:25 ` Qu Wenruo [this message]
2020-10-21  6:25 ` [PATCH v4 41/68] btrfs: set btree inode track_uptodate for subpage support Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 42/68] btrfs: allow RO mount of 4K sector size fs on 64K page system Qu Wenruo
2020-10-29 20:11   ` David Sterba
2020-10-29 23:34   ` Michał Mirosław
2020-10-29 23:56     ` Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 43/68] btrfs: disk-io: allow btree_set_page_dirty() to do more sanity check on subpage metadata Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 44/68] btrfs: disk-io: support subpage metadata csum calculation at write time Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 45/68] btrfs: extent_io: prevent extent_state from being merged for btree io tree Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 46/68] btrfs: extent_io: make set_extent_buffer_dirty() to support subpage sized metadata Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 47/68] btrfs: extent_io: add subpage support for clear_extent_buffer_dirty() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 48/68] btrfs: extent_io: make set_btree_ioerr() accept extent buffer Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 49/68] btrfs: extent_io: introduce write_one_subpage_eb() function Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 50/68] btrfs: extent_io: make lock_extent_buffer_for_io() subpage compatible Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 51/68] btrfs: extent_io: introduce submit_btree_subpage() to submit a page for subpage metadata write Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 52/68] btrfs: extent_io: introduce end_bio_subpage_eb_writepage() function Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 53/68] btrfs: inode: make can_nocow_extent() check only return 1 if the range is no smaller than PAGE_SIZE Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 54/68] btrfs: file: calculate reserve space based on PAGE_SIZE for buffered write Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 55/68] btrfs: file: make hole punching page aligned for subpage Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 56/68] btrfs: file: make btrfs_dirty_pages() follow page size to mark extent io tree Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 57/68] btrfs: file: make btrfs_file_write_iter() to be page aligned Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 58/68] btrfs: output extra info for space info update underflow Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 59/68] btrfs: delalloc-space: make data space reservation to be page aligned Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 60/68] btrfs: scrub: allow scrub to work with subpage sectorsize Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 61/68] btrfs: inode: make btrfs_truncate_block() to do page alignment Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 62/68] btrfs: file: make hole punch and zero range to be page aligned Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 63/68] btrfs: file: make btrfs_fallocate() to use PAGE_SIZE as blocksize Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 64/68] btrfs: inode: always mark the full page range delalloc for btrfs_page_mkwrite() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 65/68] btrfs: inode: require page alignement for direct io Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 66/68] btrfs: inode: only do NOCOW write for page aligned extent Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 67/68] btrfs: reflink: do full page writeback for reflink prepare Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 68/68] btrfs: support subpage read write for test Qu Wenruo
2020-10-21 11:22 ` [PATCH v4 00/68] btrfs: add basic rw support for subpage sector size David Sterba
2020-10-21 11:50   ` Qu Wenruo
2020-11-02 14:56 ` David Sterba
2020-11-03  0:06   ` Qu Wenruo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201021062554.68132-41-wqu@suse.com \
    --to=wqu@suse.com \
    --cc=linux-btrfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).