All of lore.kernel.org
 help / color / mirror / Atom feed
From: Qu Wenruo <wqu@suse.com>
To: linux-btrfs@vger.kernel.org
Subject: [PATCH v4 33/68] btrfs: extent-io: make type of extent_state::state to be at least 32 bits
Date: Wed, 21 Oct 2020 14:25:19 +0800	[thread overview]
Message-ID: <20201021062554.68132-34-wqu@suse.com> (raw)
In-Reply-To: <20201021062554.68132-1-wqu@suse.com>

Currently we use 'unsigned' for extent_state::state, which is only ensured
to be at least 16 bits.

But for incoming subpage support, we are going to introduce more bits to
at least match the following page bits:
- PageUptodate
- PagePrivate2

Thus we will go beyond 16 bits.

To support this, make extent_state::state at least 32bit and to be more
explicit, we use "u32" to be clear about the max supported bits.

This doesn't increase the memory usage for x86_64, but may affect other
architectures.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent-io-tree.h | 37 ++++++++++++++++-------------
 fs/btrfs/extent_io.c      | 50 +++++++++++++++++++--------------------
 fs/btrfs/extent_io.h      |  2 +-
 3 files changed, 45 insertions(+), 44 deletions(-)

diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index 48fdaf5f3a19..176e0e8e1f7c 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -22,6 +22,10 @@ struct io_failure_record;
 #define EXTENT_QGROUP_RESERVED	(1U << 12)
 #define EXTENT_CLEAR_DATA_RESV	(1U << 13)
 #define EXTENT_DELALLOC_NEW	(1U << 14)
+
+/* For subpage btree io tree, to indicate there is an extent buffer */
+#define EXTENT_HAS_TREE_BLOCK	(1U << 15)
+
 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
 				 EXTENT_CLEAR_DATA_RESV)
 #define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING)
@@ -73,7 +77,7 @@ struct extent_state {
 	/* ADD NEW ELEMENTS AFTER THIS */
 	wait_queue_head_t wq;
 	refcount_t refs;
-	unsigned state;
+	u32 state;
 
 	struct io_failure_record *failrec;
 
@@ -136,19 +140,19 @@ void __cold extent_io_exit(void);
 
 u64 count_range_bits(struct extent_io_tree *tree,
 		     u64 *start, u64 search_end,
-		     u64 max_bytes, unsigned bits, int contig);
+		     u64 max_bytes, u32 bits, int contig);
 
 void free_extent_state(struct extent_state *state);
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   unsigned bits, int filled,
+		   u32 bits, int filled,
 		   struct extent_state *cached_state);
 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		unsigned bits, struct extent_changeset *changeset);
+			     u32 bits, struct extent_changeset *changeset);
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		     unsigned bits, int wake, int delete,
+		     u32 bits, int wake, int delete,
 		     struct extent_state **cached);
 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		       unsigned bits, struct extent_state **cached_state,
+		       u32 bits, struct extent_state **cached_state,
 		       gfp_t mask, struct extent_io_extra_options *extra_opts);
 
 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
@@ -177,7 +181,7 @@ static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
 }
 
 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
-		u64 end, unsigned bits)
+				    u64 end, u32 bits)
 {
 	int wake = 0;
 
@@ -188,15 +192,14 @@ static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
 }
 
 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-			   unsigned bits, struct extent_changeset *changeset);
+			   u32 bits, struct extent_changeset *changeset);
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   unsigned bits, struct extent_state **cached_state,
-		   gfp_t mask);
+		   u32 bits, struct extent_state **cached_state, gfp_t mask);
 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
-			   unsigned bits);
+			   u32 bits);
 
 static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
-		u64 end, unsigned bits)
+		u64 end, u32 bits)
 {
 	return set_extent_bit(tree, start, end, bits, NULL, GFP_NOFS);
 }
@@ -223,11 +226,11 @@ static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
 }
 
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		       unsigned bits, unsigned clear_bits,
+		       u32 bits, u32 clear_bits,
 		       struct extent_state **cached_state);
 
 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
-				      u64 end, unsigned int extra_bits,
+				      u64 end, u32 extra_bits,
 				      struct extent_state **cached_state)
 {
 	return set_extent_bit(tree, start, end,
@@ -257,12 +260,12 @@ static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
 }
 
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-			  u64 *start_ret, u64 *end_ret, unsigned bits,
+			  u64 *start_ret, u64 *end_ret, u32 bits,
 			  bool exact_match, struct extent_state **cached_state);
 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
-				 u64 *start_ret, u64 *end_ret, unsigned bits);
+				 u64 *start_ret, u64 *end_ret, u32 bits);
 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
-			       u64 *start_ret, u64 *end_ret, unsigned bits);
+			       u64 *start_ret, u64 *end_ret, u32 bits);
 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
 			       u64 *end, u64 max_bytes,
 			       struct extent_state **cached_state);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ea248e2689c9..a7e4d3c65162 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -143,7 +143,7 @@ struct extent_page_data {
 	unsigned int sync_io:1;
 };
 
-static int add_extent_changeset(struct extent_state *state, unsigned bits,
+static int add_extent_changeset(struct extent_state *state, u32 bits,
 				 struct extent_changeset *changeset,
 				 int set)
 {
@@ -531,7 +531,7 @@ static void merge_state(struct extent_io_tree *tree,
 }
 
 static void set_state_bits(struct extent_io_tree *tree,
-			   struct extent_state *state, unsigned *bits,
+			   struct extent_state *state, u32 *bits,
 			   struct extent_changeset *changeset);
 
 /*
@@ -548,7 +548,7 @@ static int insert_state(struct extent_io_tree *tree,
 			struct extent_state *state, u64 start, u64 end,
 			struct rb_node ***p,
 			struct rb_node **parent,
-			unsigned *bits, struct extent_changeset *changeset)
+			u32 *bits, struct extent_changeset *changeset)
 {
 	struct rb_node *node;
 
@@ -629,11 +629,11 @@ static struct extent_state *next_state(struct extent_state *state)
  */
 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
 					    struct extent_state *state,
-					    unsigned *bits, int wake,
+					    u32 *bits, int wake,
 					    struct extent_changeset *changeset)
 {
 	struct extent_state *next;
-	unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
+	u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
 	int ret;
 
 	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
@@ -700,7 +700,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
  * No error can be returned yet, the ENOMEM for memory is handled by BUG_ON().
  */
 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		       unsigned bits, struct extent_state **cached_state,
+		       u32 bits, struct extent_state **cached_state,
 		       gfp_t mask, struct extent_io_extra_options *extra_opts)
 {
 	struct extent_changeset *changeset;
@@ -881,7 +881,7 @@ static void wait_on_state(struct extent_io_tree *tree,
  * The tree lock is taken by this function
  */
 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-			    unsigned long bits)
+			    u32 bits)
 {
 	struct extent_state *state;
 	struct rb_node *node;
@@ -928,9 +928,9 @@ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 
 static void set_state_bits(struct extent_io_tree *tree,
 			   struct extent_state *state,
-			   unsigned *bits, struct extent_changeset *changeset)
+			   u32 *bits, struct extent_changeset *changeset)
 {
-	unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
+	u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
 	int ret;
 
 	if (tree->private_data && is_data_inode(tree->private_data))
@@ -977,7 +977,7 @@ static void cache_state(struct extent_state *state,
 
 static int __must_check
 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		 unsigned bits, struct extent_state **cached_state,
+		 u32 bits, struct extent_state **cached_state,
 		 gfp_t mask, struct extent_io_extra_options *extra_opts)
 {
 	struct extent_state *state;
@@ -1201,8 +1201,7 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 }
 
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   unsigned bits, struct extent_state **cached_state,
-		   gfp_t mask)
+		   u32 bits, struct extent_state **cached_state, gfp_t mask)
 {
 	return __set_extent_bit(tree, start, end, bits, cached_state,
 			        mask, NULL);
@@ -1228,7 +1227,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  * All allocations are done with GFP_NOFS.
  */
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		       unsigned bits, unsigned clear_bits,
+		       u32 bits, u32 clear_bits,
 		       struct extent_state **cached_state)
 {
 	struct extent_state *state;
@@ -1429,7 +1428,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 
 /* wrappers around set/clear extent bit */
 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-			   unsigned bits, struct extent_changeset *changeset)
+			   u32 bits, struct extent_changeset *changeset)
 {
 	struct extent_io_extra_options extra_opts = {
 		.changeset = changeset,
@@ -1448,13 +1447,13 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 }
 
 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
-			   unsigned bits)
+			   u32 bits)
 {
 	return __set_extent_bit(tree, start, end, bits, NULL, GFP_NOWAIT, NULL);
 }
 
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		     unsigned bits, int wake, int delete,
+		     u32 bits, int wake, int delete,
 		     struct extent_state **cached)
 {
 	struct extent_io_extra_options extra_opts = {
@@ -1467,7 +1466,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 }
 
 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		unsigned bits, struct extent_changeset *changeset)
+		u32 bits, struct extent_changeset *changeset)
 {
 	struct extent_io_extra_options extra_opts = {
 		.changeset = changeset,
@@ -1559,7 +1558,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
 	}
 }
 
-static bool match_extent_state(struct extent_state *state, unsigned bits,
+static bool match_extent_state(struct extent_state *state, u32 bits,
 			       bool exact_match)
 {
 	if (exact_match)
@@ -1579,7 +1578,7 @@ static bool match_extent_state(struct extent_state *state, unsigned bits,
  */
 static struct extent_state *
 find_first_extent_bit_state(struct extent_io_tree *tree,
-			    u64 start, unsigned bits, bool exact_match)
+			    u64 start, u32 bits, bool exact_match)
 {
 	struct rb_node *node;
 	struct extent_state *state;
@@ -1615,7 +1614,7 @@ find_first_extent_bit_state(struct extent_io_tree *tree,
  * Return 1 if we found nothing.
  */
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-			  u64 *start_ret, u64 *end_ret, unsigned bits,
+			  u64 *start_ret, u64 *end_ret, u32 bits,
 			  bool exact_match, struct extent_state **cached_state)
 {
 	struct extent_state *state;
@@ -1667,7 +1666,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  * returned will be the full contiguous area with the bits set.
  */
 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
-			       u64 *start_ret, u64 *end_ret, unsigned bits)
+			       u64 *start_ret, u64 *end_ret, u32 bits)
 {
 	struct extent_state *state;
 	int ret = 1;
@@ -1704,7 +1703,7 @@ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
  * trim @end_ret to the appropriate size.
  */
 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
-				 u64 *start_ret, u64 *end_ret, unsigned bits)
+				 u64 *start_ret, u64 *end_ret, u32 bits)
 {
 	struct extent_state *state;
 	struct rb_node *node, *prev = NULL, *next;
@@ -2085,8 +2084,7 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
 
 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 				  struct page *locked_page,
-				  unsigned clear_bits,
-				  unsigned long page_ops)
+				  u32 clear_bits, unsigned long page_ops)
 {
 	clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
 
@@ -2102,7 +2100,7 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
  */
 u64 count_range_bits(struct extent_io_tree *tree,
 		     u64 *start, u64 search_end, u64 max_bytes,
-		     unsigned bits, int contig)
+		     u32 bits, int contig)
 {
 	struct rb_node *node;
 	struct extent_state *state;
@@ -2222,7 +2220,7 @@ struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 sta
  * range is found set.
  */
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   unsigned bits, int filled, struct extent_state *cached)
+		   u32 bits, int filled, struct extent_state *cached)
 {
 	struct extent_state *state = NULL;
 	struct rb_node *node;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 552afc1c0bbc..602d6568c8ea 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -288,7 +288,7 @@ void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 				  struct page *locked_page,
-				  unsigned bits_to_clear,
+				  u32 bits_to_clear,
 				  unsigned long page_ops);
 struct bio *btrfs_bio_alloc(u64 first_byte);
 struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
-- 
2.28.0


  parent reply	other threads:[~2020-10-21  6:27 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-21  6:24 [PATCH v4 00/68] btrfs: add basic rw support for subpage sector size Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 01/68] btrfs: extent-io-tests: remove invalid tests Qu Wenruo
2020-10-26 23:26   ` David Sterba
2020-10-27  0:44     ` Qu Wenruo
2020-11-03  6:07       ` Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 02/68] btrfs: use iosize while reading compressed pages Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 03/68] btrfs: extent_io: fix the comment on lock_extent_buffer_for_io() Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 04/68] btrfs: extent_io: update the comment for find_first_extent_bit() Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 05/68] btrfs: extent_io: sink the @failed_start parameter for set_extent_bit() Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 06/68] btrfs: make btree inode io_tree has its special owner Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 07/68] btrfs: disk-io: replace @fs_info and @private_data with @inode for btrfs_wq_submit_bio() Qu Wenruo
2020-10-21 22:00   ` Goldwyn Rodrigues
2020-10-21  6:24 ` [PATCH v4 08/68] btrfs: inode: sink parameter @start and @len for check_data_csum() Qu Wenruo
2020-10-21 22:11   ` Goldwyn Rodrigues
2020-10-27  0:13   ` David Sterba
2020-10-27  0:50     ` Qu Wenruo
2020-10-27 23:17       ` David Sterba
2020-10-28  0:57         ` Qu Wenruo
2020-10-29 19:38           ` David Sterba
2020-10-21  6:24 ` [PATCH v4 09/68] btrfs: extent_io: unexport extent_invalidatepage() Qu Wenruo
2020-10-27  0:24   ` David Sterba
2020-10-21  6:24 ` [PATCH v4 10/68] btrfs: extent_io: remove the forward declaration and rename __process_pages_contig Qu Wenruo
2020-10-27  0:28   ` David Sterba
2020-10-27  0:50     ` Qu Wenruo
2020-10-27 23:25       ` David Sterba
2020-10-21  6:24 ` [PATCH v4 11/68] btrfs: extent_io: rename pages_locked in process_pages_contig() Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 12/68] btrfs: extent_io: only require sector size alignment for page read Qu Wenruo
2020-10-21  6:24 ` [PATCH v4 13/68] btrfs: extent_io: remove the extent_start/extent_len for end_bio_extent_readpage() Qu Wenruo
2020-10-27 10:29   ` David Sterba
2020-10-27 12:15     ` Qu Wenruo
2020-10-27 23:31       ` David Sterba
2020-10-21  6:25 ` [PATCH v4 14/68] btrfs: extent_io: integrate page status update into endio_readpage_release_extent() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 15/68] btrfs: extent_io: rename page_size to io_size in submit_extent_page() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 16/68] btrfs: extent_io: add assert_spin_locked() for attach_extent_buffer_page() Qu Wenruo
2020-10-27 10:43   ` David Sterba
2020-10-21  6:25 ` [PATCH v4 17/68] btrfs: extent_io: extract the btree page submission code into its own helper function Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 18/68] btrfs: extent_io: calculate inline extent buffer page size based on page size Qu Wenruo
2020-10-27 11:16   ` David Sterba
2020-10-27 11:20     ` David Sterba
2020-10-21  6:25 ` [PATCH v4 19/68] btrfs: extent_io: make btrfs_fs_info::buffer_radix to take sector size devided values Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 20/68] btrfs: extent_io: sink less common parameters for __set_extent_bit() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 21/68] btrfs: extent_io: sink less common parameters for __clear_extent_bit() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 22/68] btrfs: disk_io: grab fs_info from extent_buffer::fs_info directly for btrfs_mark_buffer_dirty() Qu Wenruo
2020-10-27 15:43   ` Goldwyn Rodrigues
2020-10-21  6:25 ` [PATCH v4 23/68] btrfs: disk-io: make csum_tree_block() handle sectorsize smaller than page size Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 24/68] btrfs: disk-io: extract the extent buffer verification from btree_readpage_end_io_hook() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 25/68] btrfs: disk-io: accept bvec directly for csum_dirty_buffer() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 26/68] btrfs: inode: make btrfs_readpage_end_io_hook() follow sector size Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 27/68] btrfs: introduce a helper to determine if the sectorsize is smaller than PAGE_SIZE Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 28/68] btrfs: extent_io: allow find_first_extent_bit() to find a range with exact bits match Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 29/68] btrfs: extent_io: don't allow tree block to cross page boundary for subpage support Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 30/68] btrfs: extent_io: update num_extent_pages() to support subpage sized extent buffer Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 31/68] btrfs: handle sectorsize < PAGE_SIZE case for extent buffer accessors Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 32/68] btrfs: disk-io: only clear EXTENT_LOCK bit for extent_invalidatepage() Qu Wenruo
2020-10-21  6:25 ` Qu Wenruo [this message]
2020-10-21  6:25 ` [PATCH v4 34/68] btrfs: extent_io: use extent_io_tree to handle subpage extent buffer allocation Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 35/68] btrfs: extent_io: make set/clear_extent_buffer_uptodate() to support subpage size Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 36/68] btrfs: extent_io: make the assert test on page uptodate able to handle subpage Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 37/68] btrfs: extent_io: implement subpage metadata read and its endio function Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 38/68] btrfs: extent_io: implement try_release_extent_buffer() for subpage metadata support Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 39/68] btrfs: extent_io: extra the core of test_range_bit() into test_range_bit_nolock() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 40/68] btrfs: extent_io: introduce EXTENT_READ_SUBMITTED to handle subpage data read Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 41/68] btrfs: set btree inode track_uptodate for subpage support Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 42/68] btrfs: allow RO mount of 4K sector size fs on 64K page system Qu Wenruo
2020-10-29 20:11   ` David Sterba
2020-10-29 23:34   ` Michał Mirosław
2020-10-29 23:56     ` Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 43/68] btrfs: disk-io: allow btree_set_page_dirty() to do more sanity check on subpage metadata Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 44/68] btrfs: disk-io: support subpage metadata csum calculation at write time Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 45/68] btrfs: extent_io: prevent extent_state from being merged for btree io tree Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 46/68] btrfs: extent_io: make set_extent_buffer_dirty() to support subpage sized metadata Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 47/68] btrfs: extent_io: add subpage support for clear_extent_buffer_dirty() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 48/68] btrfs: extent_io: make set_btree_ioerr() accept extent buffer Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 49/68] btrfs: extent_io: introduce write_one_subpage_eb() function Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 50/68] btrfs: extent_io: make lock_extent_buffer_for_io() subpage compatible Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 51/68] btrfs: extent_io: introduce submit_btree_subpage() to submit a page for subpage metadata write Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 52/68] btrfs: extent_io: introduce end_bio_subpage_eb_writepage() function Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 53/68] btrfs: inode: make can_nocow_extent() check only return 1 if the range is no smaller than PAGE_SIZE Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 54/68] btrfs: file: calculate reserve space based on PAGE_SIZE for buffered write Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 55/68] btrfs: file: make hole punching page aligned for subpage Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 56/68] btrfs: file: make btrfs_dirty_pages() follow page size to mark extent io tree Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 57/68] btrfs: file: make btrfs_file_write_iter() to be page aligned Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 58/68] btrfs: output extra info for space info update underflow Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 59/68] btrfs: delalloc-space: make data space reservation to be page aligned Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 60/68] btrfs: scrub: allow scrub to work with subpage sectorsize Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 61/68] btrfs: inode: make btrfs_truncate_block() to do page alignment Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 62/68] btrfs: file: make hole punch and zero range to be page aligned Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 63/68] btrfs: file: make btrfs_fallocate() to use PAGE_SIZE as blocksize Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 64/68] btrfs: inode: always mark the full page range delalloc for btrfs_page_mkwrite() Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 65/68] btrfs: inode: require page alignement for direct io Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 66/68] btrfs: inode: only do NOCOW write for page aligned extent Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 67/68] btrfs: reflink: do full page writeback for reflink prepare Qu Wenruo
2020-10-21  6:25 ` [PATCH v4 68/68] btrfs: support subpage read write for test Qu Wenruo
2020-10-21 11:22 ` [PATCH v4 00/68] btrfs: add basic rw support for subpage sector size David Sterba
2020-10-21 11:50   ` Qu Wenruo
2020-11-02 14:56 ` David Sterba
2020-11-03  0:06   ` Qu Wenruo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201021062554.68132-34-wqu@suse.com \
    --to=wqu@suse.com \
    --cc=linux-btrfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.