All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] xfs: rework online repair incore bitmap
@ 2019-10-09 16:48 Darrick J. Wong
  2019-10-09 16:49 ` [PATCH 1/4] xfs: rename xfs_bitmap to xbitmap Darrick J. Wong
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Darrick J. Wong @ 2019-10-09 16:48 UTC (permalink / raw)
  To: darrick.wong; +Cc: linux-xfs

Hi all,

Convert the incore bitmap to use an interval tree instead of linked
lists.  This lifts the limitation that callers had to be careful not to
set a range that was already set; and gets us ready for the btree
rebuilder functions needing to be able to set bits in a bitmap and
generate maximal contiguous extents for the set ranges.

If you're going to start using this mess, you probably ought to just
pull from my git trees, which are linked below.

This is an extraordinary way to destroy everything.  Enjoy!
Comments and questions are, as always, welcome.

--D

kernel git tree:
https://git.kernel.org/cgit/linux/kernel/git/djwong/xfs-linux.git/log/?h=repair-bitmap-rework

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/4] xfs: rename xfs_bitmap to xbitmap
  2019-10-09 16:48 [PATCH 0/4] xfs: rework online repair incore bitmap Darrick J. Wong
@ 2019-10-09 16:49 ` Darrick J. Wong
  2019-10-21 14:34   ` Brian Foster
  2019-10-09 16:49 ` [PATCH 2/4] xfs: replace open-coded bitmap weight logic Darrick J. Wong
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 11+ messages in thread
From: Darrick J. Wong @ 2019-10-09 16:49 UTC (permalink / raw)
  To: darrick.wong; +Cc: linux-xfs

From: Darrick J. Wong <darrick.wong@oracle.com>

Shorten the name of xfs_bitmap to xbitmap since the scrub bitmap has
nothing to do with the libxfs bitmap.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/scrub/agheader_repair.c |   42 ++++++++++++-----------
 fs/xfs/scrub/bitmap.c          |   72 ++++++++++++++++++++--------------------
 fs/xfs/scrub/bitmap.h          |   22 ++++++------
 fs/xfs/scrub/repair.c          |    8 ++--
 fs/xfs/scrub/repair.h          |    4 +-
 5 files changed, 74 insertions(+), 74 deletions(-)


diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 8fcd43040c96..9fbb6035f4e2 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -429,10 +429,10 @@ xrep_agf(
 
 struct xrep_agfl {
 	/* Bitmap of other OWN_AG metadata blocks. */
-	struct xfs_bitmap	agmetablocks;
+	struct xbitmap		agmetablocks;
 
 	/* Bitmap of free space. */
-	struct xfs_bitmap	*freesp;
+	struct xbitmap		*freesp;
 
 	struct xfs_scrub	*sc;
 };
@@ -455,12 +455,12 @@ xrep_agfl_walk_rmap(
 	if (rec->rm_owner == XFS_RMAP_OWN_AG) {
 		fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
 				rec->rm_startblock);
-		error = xfs_bitmap_set(ra->freesp, fsb, rec->rm_blockcount);
+		error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount);
 		if (error)
 			return error;
 	}
 
-	return xfs_bitmap_set_btcur_path(&ra->agmetablocks, cur);
+	return xbitmap_set_btcur_path(&ra->agmetablocks, cur);
 }
 
 /*
@@ -476,19 +476,19 @@ STATIC int
 xrep_agfl_collect_blocks(
 	struct xfs_scrub	*sc,
 	struct xfs_buf		*agf_bp,
-	struct xfs_bitmap	*agfl_extents,
+	struct xbitmap		*agfl_extents,
 	xfs_agblock_t		*flcount)
 {
 	struct xrep_agfl	ra;
 	struct xfs_mount	*mp = sc->mp;
 	struct xfs_btree_cur	*cur;
-	struct xfs_bitmap_range	*br;
-	struct xfs_bitmap_range	*n;
+	struct xbitmap_range	*br;
+	struct xbitmap_range	*n;
 	int			error;
 
 	ra.sc = sc;
 	ra.freesp = agfl_extents;
-	xfs_bitmap_init(&ra.agmetablocks);
+	xbitmap_init(&ra.agmetablocks);
 
 	/* Find all space used by the free space btrees & rmapbt. */
 	cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
@@ -500,7 +500,7 @@ xrep_agfl_collect_blocks(
 	/* Find all blocks currently being used by the bnobt. */
 	cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
 			XFS_BTNUM_BNO);
-	error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
+	error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
 	if (error)
 		goto err;
 	xfs_btree_del_cursor(cur, error);
@@ -508,7 +508,7 @@ xrep_agfl_collect_blocks(
 	/* Find all blocks currently being used by the cntbt. */
 	cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
 			XFS_BTNUM_CNT);
-	error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
+	error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
 	if (error)
 		goto err;
 
@@ -518,8 +518,8 @@ xrep_agfl_collect_blocks(
 	 * Drop the freesp meta blocks that are in use by btrees.
 	 * The remaining blocks /should/ be AGFL blocks.
 	 */
-	error = xfs_bitmap_disunion(agfl_extents, &ra.agmetablocks);
-	xfs_bitmap_destroy(&ra.agmetablocks);
+	error = xbitmap_disunion(agfl_extents, &ra.agmetablocks);
+	xbitmap_destroy(&ra.agmetablocks);
 	if (error)
 		return error;
 
@@ -528,7 +528,7 @@ xrep_agfl_collect_blocks(
 	 * the AGFL we'll free them later.
 	 */
 	*flcount = 0;
-	for_each_xfs_bitmap_extent(br, n, agfl_extents) {
+	for_each_xbitmap_extent(br, n, agfl_extents) {
 		*flcount += br->len;
 		if (*flcount > xfs_agfl_size(mp))
 			break;
@@ -538,7 +538,7 @@ xrep_agfl_collect_blocks(
 	return 0;
 
 err:
-	xfs_bitmap_destroy(&ra.agmetablocks);
+	xbitmap_destroy(&ra.agmetablocks);
 	xfs_btree_del_cursor(cur, error);
 	return error;
 }
@@ -573,13 +573,13 @@ STATIC void
 xrep_agfl_init_header(
 	struct xfs_scrub	*sc,
 	struct xfs_buf		*agfl_bp,
-	struct xfs_bitmap	*agfl_extents,
+	struct xbitmap		*agfl_extents,
 	xfs_agblock_t		flcount)
 {
 	struct xfs_mount	*mp = sc->mp;
 	__be32			*agfl_bno;
-	struct xfs_bitmap_range	*br;
-	struct xfs_bitmap_range	*n;
+	struct xbitmap_range	*br;
+	struct xbitmap_range	*n;
 	struct xfs_agfl		*agfl;
 	xfs_agblock_t		agbno;
 	unsigned int		fl_off;
@@ -603,7 +603,7 @@ xrep_agfl_init_header(
 	 */
 	fl_off = 0;
 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
-	for_each_xfs_bitmap_extent(br, n, agfl_extents) {
+	for_each_xbitmap_extent(br, n, agfl_extents) {
 		agbno = XFS_FSB_TO_AGBNO(mp, br->start);
 
 		trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
@@ -637,7 +637,7 @@ int
 xrep_agfl(
 	struct xfs_scrub	*sc)
 {
-	struct xfs_bitmap	agfl_extents;
+	struct xbitmap		agfl_extents;
 	struct xfs_mount	*mp = sc->mp;
 	struct xfs_buf		*agf_bp;
 	struct xfs_buf		*agfl_bp;
@@ -649,7 +649,7 @@ xrep_agfl(
 		return -EOPNOTSUPP;
 
 	xchk_perag_get(sc->mp, &sc->sa);
-	xfs_bitmap_init(&agfl_extents);
+	xbitmap_init(&agfl_extents);
 
 	/*
 	 * Read the AGF so that we can query the rmapbt.  We hope that there's
@@ -701,7 +701,7 @@ xrep_agfl(
 	error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
 			XFS_AG_RESV_AGFL);
 err:
-	xfs_bitmap_destroy(&agfl_extents);
+	xbitmap_destroy(&agfl_extents);
 	return error;
 }
 
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 3d47d111be5a..5b07b46c89c9 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -18,14 +18,14 @@
  * This is the logical equivalent of bitmap |= mask(start, len).
  */
 int
-xfs_bitmap_set(
-	struct xfs_bitmap	*bitmap,
+xbitmap_set(
+	struct xbitmap		*bitmap,
 	uint64_t		start,
 	uint64_t		len)
 {
-	struct xfs_bitmap_range	*bmr;
+	struct xbitmap_range	*bmr;
 
-	bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
+	bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
 	if (!bmr)
 		return -ENOMEM;
 
@@ -39,13 +39,13 @@ xfs_bitmap_set(
 
 /* Free everything related to this bitmap. */
 void
-xfs_bitmap_destroy(
-	struct xfs_bitmap	*bitmap)
+xbitmap_destroy(
+	struct xbitmap		*bitmap)
 {
-	struct xfs_bitmap_range	*bmr;
-	struct xfs_bitmap_range	*n;
+	struct xbitmap_range	*bmr;
+	struct xbitmap_range	*n;
 
-	for_each_xfs_bitmap_extent(bmr, n, bitmap) {
+	for_each_xbitmap_extent(bmr, n, bitmap) {
 		list_del(&bmr->list);
 		kmem_free(bmr);
 	}
@@ -53,24 +53,24 @@ xfs_bitmap_destroy(
 
 /* Set up a per-AG block bitmap. */
 void
-xfs_bitmap_init(
-	struct xfs_bitmap	*bitmap)
+xbitmap_init(
+	struct xbitmap		*bitmap)
 {
 	INIT_LIST_HEAD(&bitmap->list);
 }
 
 /* Compare two btree extents. */
 static int
-xfs_bitmap_range_cmp(
+xbitmap_range_cmp(
 	void			*priv,
 	struct list_head	*a,
 	struct list_head	*b)
 {
-	struct xfs_bitmap_range	*ap;
-	struct xfs_bitmap_range	*bp;
+	struct xbitmap_range	*ap;
+	struct xbitmap_range	*bp;
 
-	ap = container_of(a, struct xfs_bitmap_range, list);
-	bp = container_of(b, struct xfs_bitmap_range, list);
+	ap = container_of(a, struct xbitmap_range, list);
+	bp = container_of(b, struct xbitmap_range, list);
 
 	if (ap->start > bp->start)
 		return 1;
@@ -96,14 +96,14 @@ xfs_bitmap_range_cmp(
 #define LEFT_ALIGNED	(1 << 0)
 #define RIGHT_ALIGNED	(1 << 1)
 int
-xfs_bitmap_disunion(
-	struct xfs_bitmap	*bitmap,
-	struct xfs_bitmap	*sub)
+xbitmap_disunion(
+	struct xbitmap		*bitmap,
+	struct xbitmap		*sub)
 {
 	struct list_head	*lp;
-	struct xfs_bitmap_range	*br;
-	struct xfs_bitmap_range	*new_br;
-	struct xfs_bitmap_range	*sub_br;
+	struct xbitmap_range	*br;
+	struct xbitmap_range	*new_br;
+	struct xbitmap_range	*sub_br;
 	uint64_t		sub_start;
 	uint64_t		sub_len;
 	int			state;
@@ -113,8 +113,8 @@ xfs_bitmap_disunion(
 		return 0;
 	ASSERT(!list_empty(&sub->list));
 
-	list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
-	list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
+	list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
+	list_sort(NULL, &sub->list, xbitmap_range_cmp);
 
 	/*
 	 * Now that we've sorted both lists, we iterate bitmap once, rolling
@@ -124,11 +124,11 @@ xfs_bitmap_disunion(
 	 * list traversal is similar to merge sort, but we're deleting
 	 * instead.  In this manner we avoid O(n^2) operations.
 	 */
-	sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
+	sub_br = list_first_entry(&sub->list, struct xbitmap_range,
 			list);
 	lp = bitmap->list.next;
 	while (lp != &bitmap->list) {
-		br = list_entry(lp, struct xfs_bitmap_range, list);
+		br = list_entry(lp, struct xbitmap_range, list);
 
 		/*
 		 * Advance sub_br and/or br until we find a pair that
@@ -181,7 +181,7 @@ xfs_bitmap_disunion(
 			 * Deleting from the middle: add the new right extent
 			 * and then shrink the left extent.
 			 */
-			new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
+			new_br = kmem_alloc(sizeof(struct xbitmap_range),
 					KM_MAYFAIL);
 			if (!new_br) {
 				error = -ENOMEM;
@@ -247,8 +247,8 @@ xfs_bitmap_disunion(
  * blocks going from the leaf towards the root.
  */
 int
-xfs_bitmap_set_btcur_path(
-	struct xfs_bitmap	*bitmap,
+xbitmap_set_btcur_path(
+	struct xbitmap		*bitmap,
 	struct xfs_btree_cur	*cur)
 {
 	struct xfs_buf		*bp;
@@ -261,7 +261,7 @@ xfs_bitmap_set_btcur_path(
 		if (!bp)
 			continue;
 		fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
-		error = xfs_bitmap_set(bitmap, fsb, 1);
+		error = xbitmap_set(bitmap, fsb, 1);
 		if (error)
 			return error;
 	}
@@ -271,12 +271,12 @@ xfs_bitmap_set_btcur_path(
 
 /* Collect a btree's block in the bitmap. */
 STATIC int
-xfs_bitmap_collect_btblock(
+xbitmap_collect_btblock(
 	struct xfs_btree_cur	*cur,
 	int			level,
 	void			*priv)
 {
-	struct xfs_bitmap	*bitmap = priv;
+	struct xbitmap		*bitmap = priv;
 	struct xfs_buf		*bp;
 	xfs_fsblock_t		fsbno;
 
@@ -285,14 +285,14 @@ xfs_bitmap_collect_btblock(
 		return 0;
 
 	fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
-	return xfs_bitmap_set(bitmap, fsbno, 1);
+	return xbitmap_set(bitmap, fsbno, 1);
 }
 
 /* Walk the btree and mark the bitmap wherever a btree block is found. */
 int
-xfs_bitmap_set_btblocks(
-	struct xfs_bitmap	*bitmap,
+xbitmap_set_btblocks(
+	struct xbitmap		*bitmap,
 	struct xfs_btree_cur	*cur)
 {
-	return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
+	return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock, bitmap);
 }
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
index ae8ecbce6fa6..8db4017ac78e 100644
--- a/fs/xfs/scrub/bitmap.h
+++ b/fs/xfs/scrub/bitmap.h
@@ -6,31 +6,31 @@
 #ifndef __XFS_SCRUB_BITMAP_H__
 #define __XFS_SCRUB_BITMAP_H__
 
-struct xfs_bitmap_range {
+struct xbitmap_range {
 	struct list_head	list;
 	uint64_t		start;
 	uint64_t		len;
 };
 
-struct xfs_bitmap {
+struct xbitmap {
 	struct list_head	list;
 };
 
-void xfs_bitmap_init(struct xfs_bitmap *bitmap);
-void xfs_bitmap_destroy(struct xfs_bitmap *bitmap);
+void xbitmap_init(struct xbitmap *bitmap);
+void xbitmap_destroy(struct xbitmap *bitmap);
 
-#define for_each_xfs_bitmap_extent(bex, n, bitmap) \
+#define for_each_xbitmap_extent(bex, n, bitmap) \
 	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
 
-#define for_each_xfs_bitmap_block(b, bex, n, bitmap) \
+#define for_each_xbitmap_block(b, bex, n, bitmap) \
 	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
-		for ((b) = bex->start; (b) < bex->start + bex->len; (b)++)
+		for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++)
 
-int xfs_bitmap_set(struct xfs_bitmap *bitmap, uint64_t start, uint64_t len);
-int xfs_bitmap_disunion(struct xfs_bitmap *bitmap, struct xfs_bitmap *sub);
-int xfs_bitmap_set_btcur_path(struct xfs_bitmap *bitmap,
+int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
+int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
+int xbitmap_set_btcur_path(struct xbitmap *bitmap,
 		struct xfs_btree_cur *cur);
-int xfs_bitmap_set_btblocks(struct xfs_bitmap *bitmap,
+int xbitmap_set_btblocks(struct xbitmap *bitmap,
 		struct xfs_btree_cur *cur);
 
 #endif	/* __XFS_SCRUB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 8349694f985d..d41da4c44f10 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -600,19 +600,19 @@ xrep_reap_block(
 int
 xrep_reap_extents(
 	struct xfs_scrub		*sc,
-	struct xfs_bitmap		*bitmap,
+	struct xbitmap			*bitmap,
 	const struct xfs_owner_info	*oinfo,
 	enum xfs_ag_resv_type		type)
 {
-	struct xfs_bitmap_range		*bmr;
-	struct xfs_bitmap_range		*n;
+	struct xbitmap_range		*bmr;
+	struct xbitmap_range		*n;
 	xfs_fsblock_t			fsbno;
 	unsigned int			deferred = 0;
 	int				error = 0;
 
 	ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
 
-	for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
+	for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
 		ASSERT(sc->ip != NULL ||
 		       XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
 		trace_xrep_dispose_btree_extent(sc->mp,
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index eab41928990f..479cfe38065e 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -28,10 +28,10 @@ int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
 		struct xfs_buf **bpp, xfs_btnum_t btnum,
 		const struct xfs_buf_ops *ops);
 
-struct xfs_bitmap;
+struct xbitmap;
 
 int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
-int xrep_reap_extents(struct xfs_scrub *sc, struct xfs_bitmap *exlist,
+int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *exlist,
 		const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
 
 struct xrep_find_ag_btree {


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/4] xfs: replace open-coded bitmap weight logic
  2019-10-09 16:48 [PATCH 0/4] xfs: rework online repair incore bitmap Darrick J. Wong
  2019-10-09 16:49 ` [PATCH 1/4] xfs: rename xfs_bitmap to xbitmap Darrick J. Wong
@ 2019-10-09 16:49 ` Darrick J. Wong
  2019-10-21 14:34   ` Brian Foster
  2019-10-09 16:49 ` [PATCH 3/4] xfs: remove the for_each_xbitmap_ helpers Darrick J. Wong
  2019-10-09 16:49 ` [PATCH 4/4] xfs: convert xbitmap to interval tree Darrick J. Wong
  3 siblings, 1 reply; 11+ messages in thread
From: Darrick J. Wong @ 2019-10-09 16:49 UTC (permalink / raw)
  To: darrick.wong; +Cc: linux-xfs

From: Darrick J. Wong <darrick.wong@oracle.com>

Add a xbitmap_hweight helper function so that we can get rid of the
open-coded loop.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/scrub/agheader_repair.c |   12 ++----------
 fs/xfs/scrub/bitmap.c          |   15 +++++++++++++++
 fs/xfs/scrub/bitmap.h          |    1 +
 3 files changed, 18 insertions(+), 10 deletions(-)


diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 9fbb6035f4e2..f35596cc26fb 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -482,8 +482,6 @@ xrep_agfl_collect_blocks(
 	struct xrep_agfl	ra;
 	struct xfs_mount	*mp = sc->mp;
 	struct xfs_btree_cur	*cur;
-	struct xbitmap_range	*br;
-	struct xbitmap_range	*n;
 	int			error;
 
 	ra.sc = sc;
@@ -527,14 +525,8 @@ xrep_agfl_collect_blocks(
 	 * Calculate the new AGFL size.  If we found more blocks than fit in
 	 * the AGFL we'll free them later.
 	 */
-	*flcount = 0;
-	for_each_xbitmap_extent(br, n, agfl_extents) {
-		*flcount += br->len;
-		if (*flcount > xfs_agfl_size(mp))
-			break;
-	}
-	if (*flcount > xfs_agfl_size(mp))
-		*flcount = xfs_agfl_size(mp);
+	*flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents),
+			 xfs_agfl_size(mp));
 	return 0;
 
 err:
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 5b07b46c89c9..8b704d7b5855 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -296,3 +296,18 @@ xbitmap_set_btblocks(
 {
 	return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock, bitmap);
 }
+
+/* How many bits are set in this bitmap? */
+uint64_t
+xbitmap_hweight(
+	struct xbitmap		*bitmap)
+{
+	struct xbitmap_range	*bmr;
+	struct xbitmap_range	*n;
+	uint64_t		ret = 0;
+
+	for_each_xbitmap_extent(bmr, n, bitmap)
+		ret += bmr->len;
+
+	return ret;
+}
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
index 8db4017ac78e..900646b72de1 100644
--- a/fs/xfs/scrub/bitmap.h
+++ b/fs/xfs/scrub/bitmap.h
@@ -32,5 +32,6 @@ int xbitmap_set_btcur_path(struct xbitmap *bitmap,
 		struct xfs_btree_cur *cur);
 int xbitmap_set_btblocks(struct xbitmap *bitmap,
 		struct xfs_btree_cur *cur);
+uint64_t xbitmap_hweight(struct xbitmap *bitmap);
 
 #endif	/* __XFS_SCRUB_BITMAP_H__ */


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/4] xfs: remove the for_each_xbitmap_ helpers
  2019-10-09 16:48 [PATCH 0/4] xfs: rework online repair incore bitmap Darrick J. Wong
  2019-10-09 16:49 ` [PATCH 1/4] xfs: rename xfs_bitmap to xbitmap Darrick J. Wong
  2019-10-09 16:49 ` [PATCH 2/4] xfs: replace open-coded bitmap weight logic Darrick J. Wong
@ 2019-10-09 16:49 ` Darrick J. Wong
  2019-10-22 13:35   ` Brian Foster
  2019-10-09 16:49 ` [PATCH 4/4] xfs: convert xbitmap to interval tree Darrick J. Wong
  3 siblings, 1 reply; 11+ messages in thread
From: Darrick J. Wong @ 2019-10-09 16:49 UTC (permalink / raw)
  To: darrick.wong; +Cc: linux-xfs

From: Darrick J. Wong <darrick.wong@oracle.com>

Remove the for_each_xbitmap_ macros in favor of proper iterator
functions.  We'll soon be switching this data structure over to an
interval tree implementation, which means that we can't allow callers to
modify the bitmap during iteration without telling us.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/scrub/agheader_repair.c |   73 ++++++++++++++++++++++++----------------
 fs/xfs/scrub/bitmap.c          |   59 ++++++++++++++++++++++++++++++++
 fs/xfs/scrub/bitmap.h          |   22 ++++++++----
 fs/xfs/scrub/repair.c          |   60 +++++++++++++++++----------------
 4 files changed, 148 insertions(+), 66 deletions(-)


diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index f35596cc26fb..145e9d359d2f 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -560,6 +560,40 @@ xrep_agfl_update_agf(
 			XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
 }
 
+struct xrep_agfl_fill {
+	struct xbitmap		used_extents;
+	struct xfs_scrub	*sc;
+	__be32			*agfl_bno;
+	xfs_agblock_t		flcount;
+	unsigned int		fl_off;
+};
+
+/* Fill the AGFL with whatever blocks are in this extent. */
+static int
+xrep_agfl_fill(
+	uint64_t		start,
+	uint64_t		len,
+	void			*priv)
+{
+	struct xrep_agfl_fill	*af = priv;
+	struct xfs_scrub	*sc = af->sc;
+	xfs_fsblock_t		fsbno = start;
+
+	while (fsbno < start + len && af->fl_off < af->flcount)
+		af->agfl_bno[af->fl_off++] =
+				cpu_to_be32(XFS_FSB_TO_AGBNO(sc->mp, fsbno++));
+
+	trace_xrep_agfl_insert(sc->mp, sc->sa.agno,
+			XFS_FSB_TO_AGBNO(sc->mp, start), len);
+
+	xbitmap_set(&af->used_extents, start, fsbno - 1);
+
+	if (af->fl_off == af->flcount)
+		return -ECANCELED;
+
+	return 0;
+}
+
 /* Write out a totally new AGFL. */
 STATIC void
 xrep_agfl_init_header(
@@ -568,13 +602,12 @@ xrep_agfl_init_header(
 	struct xbitmap		*agfl_extents,
 	xfs_agblock_t		flcount)
 {
+	struct xrep_agfl_fill	af = {
+		.sc		= sc,
+		.flcount	= flcount,
+	};
 	struct xfs_mount	*mp = sc->mp;
-	__be32			*agfl_bno;
-	struct xbitmap_range	*br;
-	struct xbitmap_range	*n;
 	struct xfs_agfl		*agfl;
-	xfs_agblock_t		agbno;
-	unsigned int		fl_off;
 
 	ASSERT(flcount <= xfs_agfl_size(mp));
 
@@ -593,35 +626,15 @@ xrep_agfl_init_header(
 	 * blocks than fit in the AGFL, they will be freed in a subsequent
 	 * step.
 	 */
-	fl_off = 0;
-	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
-	for_each_xbitmap_extent(br, n, agfl_extents) {
-		agbno = XFS_FSB_TO_AGBNO(mp, br->start);
-
-		trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
-
-		while (br->len > 0 && fl_off < flcount) {
-			agfl_bno[fl_off] = cpu_to_be32(agbno);
-			fl_off++;
-			agbno++;
-
-			/*
-			 * We've now used br->start by putting it in the AGFL,
-			 * so bump br so that we don't reap the block later.
-			 */
-			br->start++;
-			br->len--;
-		}
-
-		if (br->len)
-			break;
-		list_del(&br->list);
-		kmem_free(br);
-	}
+	xbitmap_init(&af.used_extents);
+	af.agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp),
+	xbitmap_iter_set(agfl_extents, xrep_agfl_fill, &af);
+	xbitmap_disunion(agfl_extents, &af.used_extents);
 
 	/* Write new AGFL to disk. */
 	xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
 	xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
+	xbitmap_destroy(&af.used_extents);
 }
 
 /* Repair the AGFL. */
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 8b704d7b5855..1041f17f6bb6 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -12,6 +12,9 @@
 #include "xfs_btree.h"
 #include "scrub/bitmap.h"
 
+#define for_each_xbitmap_extent(bex, n, bitmap) \
+	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
+
 /*
  * Set a range of this bitmap.  Caller must ensure the range is not set.
  *
@@ -311,3 +314,59 @@ xbitmap_hweight(
 
 	return ret;
 }
+
+/* Call a function for every run of set bits in this bitmap. */
+int
+xbitmap_iter_set(
+	struct xbitmap		*bitmap,
+	xbitmap_walk_run_fn	fn,
+	void			*priv)
+{
+	struct xbitmap_range	*bex, *n;
+	int			error;
+
+	for_each_xbitmap_extent(bex, n, bitmap) {
+		error = fn(bex->start, bex->len, priv);
+		if (error)
+			break;
+	}
+
+	return error;
+}
+
+struct xbitmap_walk_bits {
+	xbitmap_walk_bit_fn	fn;
+	void			*priv;
+};
+
+/* Walk all the bits in a run. */
+static int
+xbitmap_walk_bits_in_run(
+	uint64_t			start,
+	uint64_t			len,
+	void				*priv)
+{
+	struct xbitmap_walk_bits	*wb = priv;
+	uint64_t			i;
+	int				error;
+
+	for (i = start; i < start + len; i++) {
+		error = wb->fn(i, wb->priv);
+		if (error)
+			break;
+	}
+
+	return error;
+}
+
+/* Call a function for every set bit in this bitmap. */
+int
+xbitmap_iter_set_bits(
+	struct xbitmap			*bitmap,
+	xbitmap_walk_bit_fn		fn,
+	void				*priv)
+{
+	struct xbitmap_walk_bits	wb = {.fn = fn, .priv = priv};
+
+	return xbitmap_iter_set(bitmap, xbitmap_walk_bits_in_run, &wb);
+}
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
index 900646b72de1..27fde5b4a753 100644
--- a/fs/xfs/scrub/bitmap.h
+++ b/fs/xfs/scrub/bitmap.h
@@ -19,13 +19,6 @@ struct xbitmap {
 void xbitmap_init(struct xbitmap *bitmap);
 void xbitmap_destroy(struct xbitmap *bitmap);
 
-#define for_each_xbitmap_extent(bex, n, bitmap) \
-	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
-
-#define for_each_xbitmap_block(b, bex, n, bitmap) \
-	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
-		for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++)
-
 int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
 int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
 int xbitmap_set_btcur_path(struct xbitmap *bitmap,
@@ -34,4 +27,19 @@ int xbitmap_set_btblocks(struct xbitmap *bitmap,
 		struct xfs_btree_cur *cur);
 uint64_t xbitmap_hweight(struct xbitmap *bitmap);
 
+/*
+ * Return codes for the bitmap iterator functions are 0 to continue iterating,
+ * and non-zero to stop iterating.  Any non-zero value will be passed up to the
+ * iteration caller.  The special value -ECANCELED can be used to stop
+ * iteration, because neither bitmap iterator ever generates that error code on
+ * its own.
+ */
+typedef int (*xbitmap_walk_run_fn)(uint64_t start, uint64_t len, void *priv);
+int xbitmap_iter_set(struct xbitmap *bitmap, xbitmap_walk_run_fn fn,
+		void *priv);
+
+typedef int (*xbitmap_walk_bit_fn)(uint64_t bit, void *priv);
+int xbitmap_iter_set_bits(struct xbitmap *bitmap, xbitmap_walk_bit_fn fn,
+		void *priv);
+
 #endif	/* __XFS_SCRUB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index d41da4c44f10..588bc054db5c 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -507,15 +507,21 @@ xrep_reap_invalidate_block(
 	xfs_trans_binval(sc->tp, bp);
 }
 
+struct xrep_reap_block {
+	struct xfs_scrub		*sc;
+	const struct xfs_owner_info	*oinfo;
+	enum xfs_ag_resv_type		resv;
+	unsigned int			deferred;
+};
+
 /* Dispose of a single block. */
 STATIC int
 xrep_reap_block(
-	struct xfs_scrub		*sc,
-	xfs_fsblock_t			fsbno,
-	const struct xfs_owner_info	*oinfo,
-	enum xfs_ag_resv_type		resv,
-	unsigned int			*deferred)
+	uint64_t			fsbno,
+	void				*priv)
 {
+	struct xrep_reap_block		*rb = priv;
+	struct xfs_scrub		*sc = rb->sc;
 	struct xfs_btree_cur		*cur;
 	struct xfs_buf			*agf_bp = NULL;
 	xfs_agnumber_t			agno;
@@ -527,6 +533,10 @@ xrep_reap_block(
 	agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
 	agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
 
+	ASSERT(sc->ip != NULL || agno == sc->sa.agno);
+
+	trace_xrep_dispose_btree_extent(sc->mp, agno, agbno, 1);
+
 	/*
 	 * If we are repairing per-inode metadata, we need to read in the AGF
 	 * buffer.  Otherwise, we're repairing a per-AG structure, so reuse
@@ -544,7 +554,8 @@ xrep_reap_block(
 	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, agno);
 
 	/* Can we find any other rmappings? */
-	error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
+	error = xfs_rmap_has_other_keys(cur, agbno, 1, rb->oinfo,
+			&has_other_rmap);
 	xfs_btree_del_cursor(cur, error);
 	if (error)
 		goto out_free;
@@ -563,8 +574,9 @@ xrep_reap_block(
 	 * to run xfs_repair.
 	 */
 	if (has_other_rmap) {
-		error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
-	} else if (resv == XFS_AG_RESV_AGFL) {
+		error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1,
+				rb->oinfo);
+	} else if (rb->resv == XFS_AG_RESV_AGFL) {
 		xrep_reap_invalidate_block(sc, fsbno);
 		error = xrep_put_freelist(sc, agbno);
 	} else {
@@ -576,16 +588,16 @@ xrep_reap_block(
 		 * reservation.
 		 */
 		xrep_reap_invalidate_block(sc, fsbno);
-		__xfs_bmap_add_free(sc->tp, fsbno, 1, oinfo, true);
-		(*deferred)++;
-		need_roll = *deferred > 100;
+		__xfs_bmap_add_free(sc->tp, fsbno, 1, rb->oinfo, true);
+		rb->deferred++;
+		need_roll = rb->deferred > 100;
 	}
 	if (agf_bp != sc->sa.agf_bp)
 		xfs_trans_brelse(sc->tp, agf_bp);
 	if (error || !need_roll)
 		return error;
 
-	*deferred = 0;
+	rb->deferred = 0;
 	if (sc->ip)
 		return xfs_trans_roll_inode(&sc->tp, sc->ip);
 	return xrep_roll_ag_trans(sc);
@@ -604,27 +616,17 @@ xrep_reap_extents(
 	const struct xfs_owner_info	*oinfo,
 	enum xfs_ag_resv_type		type)
 {
-	struct xbitmap_range		*bmr;
-	struct xbitmap_range		*n;
-	xfs_fsblock_t			fsbno;
-	unsigned int			deferred = 0;
+	struct xrep_reap_block		rb = {
+		.sc			= sc,
+		.oinfo			= oinfo,
+		.resv			= type,
+	};
 	int				error = 0;
 
 	ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
 
-	for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
-		ASSERT(sc->ip != NULL ||
-		       XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
-		trace_xrep_dispose_btree_extent(sc->mp,
-				XFS_FSB_TO_AGNO(sc->mp, fsbno),
-				XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
-
-		error = xrep_reap_block(sc, fsbno, oinfo, type, &deferred);
-		if (error)
-			break;
-	}
-
-	if (error || deferred == 0)
+	error = xbitmap_iter_set_bits(bitmap, xrep_reap_block, &rb);
+	if (error || rb.deferred == 0)
 		return error;
 
 	if (sc->ip)


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/4] xfs: convert xbitmap to interval tree
  2019-10-09 16:48 [PATCH 0/4] xfs: rework online repair incore bitmap Darrick J. Wong
                   ` (2 preceding siblings ...)
  2019-10-09 16:49 ` [PATCH 3/4] xfs: remove the for_each_xbitmap_ helpers Darrick J. Wong
@ 2019-10-09 16:49 ` Darrick J. Wong
  2019-10-22 13:38   ` Brian Foster
  3 siblings, 1 reply; 11+ messages in thread
From: Darrick J. Wong @ 2019-10-09 16:49 UTC (permalink / raw)
  To: darrick.wong; +Cc: linux-xfs

From: Darrick J. Wong <darrick.wong@oracle.com>

Convert the xbitmap code to use interval trees instead of linked lists.
This reduces the amount of coding required to handle the disunion
operation and in the future will make it easier to set bits in arbitrary
order yet later be able to extract maximally sized extents, which we'll
need for rebuilding certain structures.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/Kconfig                 |    1 
 fs/xfs/scrub/agheader_repair.c |    4 -
 fs/xfs/scrub/bitmap.c          |  292 +++++++++++++++++-----------------------
 fs/xfs/scrub/bitmap.h          |   13 +-
 4 files changed, 135 insertions(+), 175 deletions(-)


diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index e685299eb3d2..ba939d258fa5 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -89,6 +89,7 @@ config XFS_ONLINE_REPAIR
 	bool "XFS online metadata repair support"
 	default n
 	depends on XFS_FS && XFS_ONLINE_SCRUB
+	select INTERVAL_TREE
 	help
 	  If you say Y here you will be able to repair metadata on a
 	  mounted XFS filesystem.  This feature is intended to reduce
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 145e9d359d2f..f0cf205d3e73 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -516,10 +516,8 @@ xrep_agfl_collect_blocks(
 	 * Drop the freesp meta blocks that are in use by btrees.
 	 * The remaining blocks /should/ be AGFL blocks.
 	 */
-	error = xbitmap_disunion(agfl_extents, &ra.agmetablocks);
+	xbitmap_disunion(agfl_extents, &ra.agmetablocks);
 	xbitmap_destroy(&ra.agmetablocks);
-	if (error)
-		return error;
 
 	/*
 	 * Calculate the new AGFL size.  If we found more blocks than fit in
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 1041f17f6bb6..e1da103bce78 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -12,30 +12,105 @@
 #include "xfs_btree.h"
 #include "scrub/bitmap.h"
 
-#define for_each_xbitmap_extent(bex, n, bitmap) \
-	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
+#define for_each_xbitmap_extent(itn, n, bitmap) \
+	rbtree_postorder_for_each_entry_safe((itn), (n), \
+			&(bitmap)->root.rb_root, rb)
 
-/*
- * Set a range of this bitmap.  Caller must ensure the range is not set.
- *
- * This is the logical equivalent of bitmap |= mask(start, len).
- */
+/* Clear a range of this bitmap. */
+static void
+__xbitmap_clear(
+	struct xbitmap			*bitmap,
+	uint64_t			start,
+	uint64_t			last)
+{
+	struct interval_tree_node	*itn;
+
+	while ((itn = interval_tree_iter_first(&bitmap->root, start, last))) {
+		if (itn->start < start) {
+			/* overlaps with the left side of the clearing range */
+			interval_tree_remove(itn, &bitmap->root);
+			itn->last = start - 1;
+			interval_tree_insert(itn, &bitmap->root);
+		} else if (itn->last > last) {
+			/* overlaps with the right side of the clearing range */
+			interval_tree_remove(itn, &bitmap->root);
+			itn->start = last + 1;
+			interval_tree_insert(itn, &bitmap->root);
+			break;
+		} else {
+			/* in the middle of the clearing range */
+			interval_tree_remove(itn, &bitmap->root);
+			kmem_free(itn);
+		}
+	}
+}
+
+/* Clear a range of this bitmap. */
+void
+xbitmap_clear(
+	struct xbitmap			*bitmap,
+	uint64_t			start,
+	uint64_t			len)
+{
+	__xbitmap_clear(bitmap, start, start + len - 1);
+}
+
+/* Set a range of this bitmap. */
 int
 xbitmap_set(
-	struct xbitmap		*bitmap,
-	uint64_t		start,
-	uint64_t		len)
+	struct xbitmap			*bitmap,
+	uint64_t			start,
+	uint64_t			len)
 {
-	struct xbitmap_range	*bmr;
+	struct interval_tree_node	*left;
+	struct interval_tree_node	*right;
+	uint64_t			last = start + len - 1;
 
-	bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
-	if (!bmr)
-		return -ENOMEM;
+	/* Is this whole range already set? */
+	left = interval_tree_iter_first(&bitmap->root, start, last);
+	if (left && left->start <= start && left->last >= last)
+		return 0;
 
-	INIT_LIST_HEAD(&bmr->list);
-	bmr->start = start;
-	bmr->len = len;
-	list_add_tail(&bmr->list, &bitmap->list);
+	/* Clear out everything in the range we want to set. */
+	xbitmap_clear(bitmap, start, len);
+
+	/* Do we have a left-adjacent extent? */
+	left = interval_tree_iter_first(&bitmap->root, start - 1, start - 1);
+	if (left && left->last + 1 != start)
+		left = NULL;
+
+	/* Do we have a right-adjacent extent? */
+	right = interval_tree_iter_first(&bitmap->root, last + 1, last + 1);
+	if (right && right->start != last + 1)
+		right = NULL;
+
+	if (left && right) {
+		/* combine left and right adjacent extent */
+		interval_tree_remove(left, &bitmap->root);
+		interval_tree_remove(right, &bitmap->root);
+		left->last = right->last;
+		interval_tree_insert(left, &bitmap->root);
+		kmem_free(right);
+	} else if (left) {
+		/* combine with left extent */
+		interval_tree_remove(left, &bitmap->root);
+		left->last = last;
+		interval_tree_insert(left, &bitmap->root);
+	} else if (right) {
+		/* combine with right extent */
+		interval_tree_remove(right, &bitmap->root);
+		right->start = start;
+		interval_tree_insert(right, &bitmap->root);
+	} else {
+		/* add an extent */
+		left = kmem_alloc(sizeof(struct interval_tree_node),
+				KM_MAYFAIL);
+		if (!left)
+			return -ENOMEM;
+		left->start = start;
+		left->last = last;
+		interval_tree_insert(left, &bitmap->root);
+	}
 
 	return 0;
 }
@@ -43,14 +118,13 @@ xbitmap_set(
 /* Free everything related to this bitmap. */
 void
 xbitmap_destroy(
-	struct xbitmap		*bitmap)
+	struct xbitmap			*bitmap)
 {
-	struct xbitmap_range	*bmr;
-	struct xbitmap_range	*n;
+	struct interval_tree_node	*itn, *p;
 
-	for_each_xbitmap_extent(bmr, n, bitmap) {
-		list_del(&bmr->list);
-		kmem_free(bmr);
+	for_each_xbitmap_extent(itn, p, bitmap) {
+		interval_tree_remove(itn, &bitmap->root);
+		kfree(itn);
 	}
 }
 
@@ -59,27 +133,7 @@ void
 xbitmap_init(
 	struct xbitmap		*bitmap)
 {
-	INIT_LIST_HEAD(&bitmap->list);
-}
-
-/* Compare two btree extents. */
-static int
-xbitmap_range_cmp(
-	void			*priv,
-	struct list_head	*a,
-	struct list_head	*b)
-{
-	struct xbitmap_range	*ap;
-	struct xbitmap_range	*bp;
-
-	ap = container_of(a, struct xbitmap_range, list);
-	bp = container_of(b, struct xbitmap_range, list);
-
-	if (ap->start > bp->start)
-		return 1;
-	if (ap->start < bp->start)
-		return -1;
-	return 0;
+	bitmap->root = RB_ROOT_CACHED;
 }
 
 /*
@@ -96,118 +150,19 @@ xbitmap_range_cmp(
  *
  * This is the logical equivalent of bitmap &= ~sub.
  */
-#define LEFT_ALIGNED	(1 << 0)
-#define RIGHT_ALIGNED	(1 << 1)
-int
+void
 xbitmap_disunion(
-	struct xbitmap		*bitmap,
-	struct xbitmap		*sub)
+	struct xbitmap			*bitmap,
+	struct xbitmap			*sub)
 {
-	struct list_head	*lp;
-	struct xbitmap_range	*br;
-	struct xbitmap_range	*new_br;
-	struct xbitmap_range	*sub_br;
-	uint64_t		sub_start;
-	uint64_t		sub_len;
-	int			state;
-	int			error = 0;
-
-	if (list_empty(&bitmap->list) || list_empty(&sub->list))
-		return 0;
-	ASSERT(!list_empty(&sub->list));
-
-	list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
-	list_sort(NULL, &sub->list, xbitmap_range_cmp);
-
-	/*
-	 * Now that we've sorted both lists, we iterate bitmap once, rolling
-	 * forward through sub and/or bitmap as necessary until we find an
-	 * overlap or reach the end of either list.  We do not reset lp to the
-	 * head of bitmap nor do we reset sub_br to the head of sub.  The
-	 * list traversal is similar to merge sort, but we're deleting
-	 * instead.  In this manner we avoid O(n^2) operations.
-	 */
-	sub_br = list_first_entry(&sub->list, struct xbitmap_range,
-			list);
-	lp = bitmap->list.next;
-	while (lp != &bitmap->list) {
-		br = list_entry(lp, struct xbitmap_range, list);
-
-		/*
-		 * Advance sub_br and/or br until we find a pair that
-		 * intersect or we run out of extents.
-		 */
-		while (sub_br->start + sub_br->len <= br->start) {
-			if (list_is_last(&sub_br->list, &sub->list))
-				goto out;
-			sub_br = list_next_entry(sub_br, list);
-		}
-		if (sub_br->start >= br->start + br->len) {
-			lp = lp->next;
-			continue;
-		}
+	struct interval_tree_node	*itn, *n;
 
-		/* trim sub_br to fit the extent we have */
-		sub_start = sub_br->start;
-		sub_len = sub_br->len;
-		if (sub_br->start < br->start) {
-			sub_len -= br->start - sub_br->start;
-			sub_start = br->start;
-		}
-		if (sub_len > br->len)
-			sub_len = br->len;
-
-		state = 0;
-		if (sub_start == br->start)
-			state |= LEFT_ALIGNED;
-		if (sub_start + sub_len == br->start + br->len)
-			state |= RIGHT_ALIGNED;
-		switch (state) {
-		case LEFT_ALIGNED:
-			/* Coincides with only the left. */
-			br->start += sub_len;
-			br->len -= sub_len;
-			break;
-		case RIGHT_ALIGNED:
-			/* Coincides with only the right. */
-			br->len -= sub_len;
-			lp = lp->next;
-			break;
-		case LEFT_ALIGNED | RIGHT_ALIGNED:
-			/* Total overlap, just delete ex. */
-			lp = lp->next;
-			list_del(&br->list);
-			kmem_free(br);
-			break;
-		case 0:
-			/*
-			 * Deleting from the middle: add the new right extent
-			 * and then shrink the left extent.
-			 */
-			new_br = kmem_alloc(sizeof(struct xbitmap_range),
-					KM_MAYFAIL);
-			if (!new_br) {
-				error = -ENOMEM;
-				goto out;
-			}
-			INIT_LIST_HEAD(&new_br->list);
-			new_br->start = sub_start + sub_len;
-			new_br->len = br->start + br->len - new_br->start;
-			list_add(&new_br->list, &br->list);
-			br->len = sub_start - br->start;
-			lp = lp->next;
-			break;
-		default:
-			ASSERT(0);
-			break;
-		}
-	}
+	if (xbitmap_empty(bitmap) || xbitmap_empty(sub))
+		return;
 
-out:
-	return error;
+	for_each_xbitmap_extent(itn, n, sub)
+		__xbitmap_clear(bitmap, itn->start, itn->last);
 }
-#undef LEFT_ALIGNED
-#undef RIGHT_ALIGNED
 
 /*
  * Record all btree blocks seen while iterating all records of a btree.
@@ -303,14 +258,13 @@ xbitmap_set_btblocks(
 /* How many bits are set in this bitmap? */
 uint64_t
 xbitmap_hweight(
-	struct xbitmap		*bitmap)
+	struct xbitmap			*bitmap)
 {
-	struct xbitmap_range	*bmr;
-	struct xbitmap_range	*n;
-	uint64_t		ret = 0;
+	struct interval_tree_node	*itn, *n;
+	uint64_t			ret = 0;
 
-	for_each_xbitmap_extent(bmr, n, bitmap)
-		ret += bmr->len;
+	for_each_xbitmap_extent(itn, n, bitmap)
+		ret += itn->last - itn->start + 1;
 
 	return ret;
 }
@@ -318,15 +272,15 @@ xbitmap_hweight(
 /* Call a function for every run of set bits in this bitmap. */
 int
 xbitmap_iter_set(
-	struct xbitmap		*bitmap,
-	xbitmap_walk_run_fn	fn,
-	void			*priv)
+	struct xbitmap			*bitmap,
+	xbitmap_walk_run_fn		fn,
+	void				*priv)
 {
-	struct xbitmap_range	*bex, *n;
-	int			error;
+	struct interval_tree_node	*itn, *n;
+	int				error;
 
-	for_each_xbitmap_extent(bex, n, bitmap) {
-		error = fn(bex->start, bex->len, priv);
+	for_each_xbitmap_extent(itn, n, bitmap) {
+		error = fn(itn->start, itn->last - itn->start + 1, priv);
 		if (error)
 			break;
 	}
@@ -370,3 +324,11 @@ xbitmap_iter_set_bits(
 
 	return xbitmap_iter_set(bitmap, xbitmap_walk_bits_in_run, &wb);
 }
+
+/* Does this bitmap have no bits set at all? */
+bool
+xbitmap_empty(
+	struct xbitmap		*bitmap)
+{
+	return bitmap->root.rb_root.rb_node == NULL;
+}
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
index 27fde5b4a753..6be596e60ac8 100644
--- a/fs/xfs/scrub/bitmap.h
+++ b/fs/xfs/scrub/bitmap.h
@@ -6,21 +6,18 @@
 #ifndef __XFS_SCRUB_BITMAP_H__
 #define __XFS_SCRUB_BITMAP_H__
 
-struct xbitmap_range {
-	struct list_head	list;
-	uint64_t		start;
-	uint64_t		len;
-};
+#include <linux/interval_tree.h>
 
 struct xbitmap {
-	struct list_head	list;
+	struct rb_root_cached	root;
 };
 
 void xbitmap_init(struct xbitmap *bitmap);
 void xbitmap_destroy(struct xbitmap *bitmap);
 
+void xbitmap_clear(struct xbitmap *bitmap, uint64_t start, uint64_t len);
 int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
-int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
+void xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
 int xbitmap_set_btcur_path(struct xbitmap *bitmap,
 		struct xfs_btree_cur *cur);
 int xbitmap_set_btblocks(struct xbitmap *bitmap,
@@ -42,4 +39,6 @@ typedef int (*xbitmap_walk_bit_fn)(uint64_t bit, void *priv);
 int xbitmap_iter_set_bits(struct xbitmap *bitmap, xbitmap_walk_bit_fn fn,
 		void *priv);
 
+bool xbitmap_empty(struct xbitmap *bitmap);
+
 #endif	/* __XFS_SCRUB_BITMAP_H__ */


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] xfs: rename xfs_bitmap to xbitmap
  2019-10-09 16:49 ` [PATCH 1/4] xfs: rename xfs_bitmap to xbitmap Darrick J. Wong
@ 2019-10-21 14:34   ` Brian Foster
  0 siblings, 0 replies; 11+ messages in thread
From: Brian Foster @ 2019-10-21 14:34 UTC (permalink / raw)
  To: Darrick J. Wong; +Cc: linux-xfs

On Wed, Oct 09, 2019 at 09:49:02AM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Shorten the name of xfs_bitmap to xbitmap since the scrub bitmap has
> nothing to do with the libxfs bitmap.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> ---

Reviewed-by: Brian Foster <bfoster@redhat.com>

>  fs/xfs/scrub/agheader_repair.c |   42 ++++++++++++-----------
>  fs/xfs/scrub/bitmap.c          |   72 ++++++++++++++++++++--------------------
>  fs/xfs/scrub/bitmap.h          |   22 ++++++------
>  fs/xfs/scrub/repair.c          |    8 ++--
>  fs/xfs/scrub/repair.h          |    4 +-
>  5 files changed, 74 insertions(+), 74 deletions(-)
> 
> 
> diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
> index 8fcd43040c96..9fbb6035f4e2 100644
> --- a/fs/xfs/scrub/agheader_repair.c
> +++ b/fs/xfs/scrub/agheader_repair.c
> @@ -429,10 +429,10 @@ xrep_agf(
>  
>  struct xrep_agfl {
>  	/* Bitmap of other OWN_AG metadata blocks. */
> -	struct xfs_bitmap	agmetablocks;
> +	struct xbitmap		agmetablocks;
>  
>  	/* Bitmap of free space. */
> -	struct xfs_bitmap	*freesp;
> +	struct xbitmap		*freesp;
>  
>  	struct xfs_scrub	*sc;
>  };
> @@ -455,12 +455,12 @@ xrep_agfl_walk_rmap(
>  	if (rec->rm_owner == XFS_RMAP_OWN_AG) {
>  		fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
>  				rec->rm_startblock);
> -		error = xfs_bitmap_set(ra->freesp, fsb, rec->rm_blockcount);
> +		error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount);
>  		if (error)
>  			return error;
>  	}
>  
> -	return xfs_bitmap_set_btcur_path(&ra->agmetablocks, cur);
> +	return xbitmap_set_btcur_path(&ra->agmetablocks, cur);
>  }
>  
>  /*
> @@ -476,19 +476,19 @@ STATIC int
>  xrep_agfl_collect_blocks(
>  	struct xfs_scrub	*sc,
>  	struct xfs_buf		*agf_bp,
> -	struct xfs_bitmap	*agfl_extents,
> +	struct xbitmap		*agfl_extents,
>  	xfs_agblock_t		*flcount)
>  {
>  	struct xrep_agfl	ra;
>  	struct xfs_mount	*mp = sc->mp;
>  	struct xfs_btree_cur	*cur;
> -	struct xfs_bitmap_range	*br;
> -	struct xfs_bitmap_range	*n;
> +	struct xbitmap_range	*br;
> +	struct xbitmap_range	*n;
>  	int			error;
>  
>  	ra.sc = sc;
>  	ra.freesp = agfl_extents;
> -	xfs_bitmap_init(&ra.agmetablocks);
> +	xbitmap_init(&ra.agmetablocks);
>  
>  	/* Find all space used by the free space btrees & rmapbt. */
>  	cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
> @@ -500,7 +500,7 @@ xrep_agfl_collect_blocks(
>  	/* Find all blocks currently being used by the bnobt. */
>  	cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
>  			XFS_BTNUM_BNO);
> -	error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
> +	error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
>  	if (error)
>  		goto err;
>  	xfs_btree_del_cursor(cur, error);
> @@ -508,7 +508,7 @@ xrep_agfl_collect_blocks(
>  	/* Find all blocks currently being used by the cntbt. */
>  	cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
>  			XFS_BTNUM_CNT);
> -	error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
> +	error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
>  	if (error)
>  		goto err;
>  
> @@ -518,8 +518,8 @@ xrep_agfl_collect_blocks(
>  	 * Drop the freesp meta blocks that are in use by btrees.
>  	 * The remaining blocks /should/ be AGFL blocks.
>  	 */
> -	error = xfs_bitmap_disunion(agfl_extents, &ra.agmetablocks);
> -	xfs_bitmap_destroy(&ra.agmetablocks);
> +	error = xbitmap_disunion(agfl_extents, &ra.agmetablocks);
> +	xbitmap_destroy(&ra.agmetablocks);
>  	if (error)
>  		return error;
>  
> @@ -528,7 +528,7 @@ xrep_agfl_collect_blocks(
>  	 * the AGFL we'll free them later.
>  	 */
>  	*flcount = 0;
> -	for_each_xfs_bitmap_extent(br, n, agfl_extents) {
> +	for_each_xbitmap_extent(br, n, agfl_extents) {
>  		*flcount += br->len;
>  		if (*flcount > xfs_agfl_size(mp))
>  			break;
> @@ -538,7 +538,7 @@ xrep_agfl_collect_blocks(
>  	return 0;
>  
>  err:
> -	xfs_bitmap_destroy(&ra.agmetablocks);
> +	xbitmap_destroy(&ra.agmetablocks);
>  	xfs_btree_del_cursor(cur, error);
>  	return error;
>  }
> @@ -573,13 +573,13 @@ STATIC void
>  xrep_agfl_init_header(
>  	struct xfs_scrub	*sc,
>  	struct xfs_buf		*agfl_bp,
> -	struct xfs_bitmap	*agfl_extents,
> +	struct xbitmap		*agfl_extents,
>  	xfs_agblock_t		flcount)
>  {
>  	struct xfs_mount	*mp = sc->mp;
>  	__be32			*agfl_bno;
> -	struct xfs_bitmap_range	*br;
> -	struct xfs_bitmap_range	*n;
> +	struct xbitmap_range	*br;
> +	struct xbitmap_range	*n;
>  	struct xfs_agfl		*agfl;
>  	xfs_agblock_t		agbno;
>  	unsigned int		fl_off;
> @@ -603,7 +603,7 @@ xrep_agfl_init_header(
>  	 */
>  	fl_off = 0;
>  	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
> -	for_each_xfs_bitmap_extent(br, n, agfl_extents) {
> +	for_each_xbitmap_extent(br, n, agfl_extents) {
>  		agbno = XFS_FSB_TO_AGBNO(mp, br->start);
>  
>  		trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
> @@ -637,7 +637,7 @@ int
>  xrep_agfl(
>  	struct xfs_scrub	*sc)
>  {
> -	struct xfs_bitmap	agfl_extents;
> +	struct xbitmap		agfl_extents;
>  	struct xfs_mount	*mp = sc->mp;
>  	struct xfs_buf		*agf_bp;
>  	struct xfs_buf		*agfl_bp;
> @@ -649,7 +649,7 @@ xrep_agfl(
>  		return -EOPNOTSUPP;
>  
>  	xchk_perag_get(sc->mp, &sc->sa);
> -	xfs_bitmap_init(&agfl_extents);
> +	xbitmap_init(&agfl_extents);
>  
>  	/*
>  	 * Read the AGF so that we can query the rmapbt.  We hope that there's
> @@ -701,7 +701,7 @@ xrep_agfl(
>  	error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
>  			XFS_AG_RESV_AGFL);
>  err:
> -	xfs_bitmap_destroy(&agfl_extents);
> +	xbitmap_destroy(&agfl_extents);
>  	return error;
>  }
>  
> diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
> index 3d47d111be5a..5b07b46c89c9 100644
> --- a/fs/xfs/scrub/bitmap.c
> +++ b/fs/xfs/scrub/bitmap.c
> @@ -18,14 +18,14 @@
>   * This is the logical equivalent of bitmap |= mask(start, len).
>   */
>  int
> -xfs_bitmap_set(
> -	struct xfs_bitmap	*bitmap,
> +xbitmap_set(
> +	struct xbitmap		*bitmap,
>  	uint64_t		start,
>  	uint64_t		len)
>  {
> -	struct xfs_bitmap_range	*bmr;
> +	struct xbitmap_range	*bmr;
>  
> -	bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
> +	bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
>  	if (!bmr)
>  		return -ENOMEM;
>  
> @@ -39,13 +39,13 @@ xfs_bitmap_set(
>  
>  /* Free everything related to this bitmap. */
>  void
> -xfs_bitmap_destroy(
> -	struct xfs_bitmap	*bitmap)
> +xbitmap_destroy(
> +	struct xbitmap		*bitmap)
>  {
> -	struct xfs_bitmap_range	*bmr;
> -	struct xfs_bitmap_range	*n;
> +	struct xbitmap_range	*bmr;
> +	struct xbitmap_range	*n;
>  
> -	for_each_xfs_bitmap_extent(bmr, n, bitmap) {
> +	for_each_xbitmap_extent(bmr, n, bitmap) {
>  		list_del(&bmr->list);
>  		kmem_free(bmr);
>  	}
> @@ -53,24 +53,24 @@ xfs_bitmap_destroy(
>  
>  /* Set up a per-AG block bitmap. */
>  void
> -xfs_bitmap_init(
> -	struct xfs_bitmap	*bitmap)
> +xbitmap_init(
> +	struct xbitmap		*bitmap)
>  {
>  	INIT_LIST_HEAD(&bitmap->list);
>  }
>  
>  /* Compare two btree extents. */
>  static int
> -xfs_bitmap_range_cmp(
> +xbitmap_range_cmp(
>  	void			*priv,
>  	struct list_head	*a,
>  	struct list_head	*b)
>  {
> -	struct xfs_bitmap_range	*ap;
> -	struct xfs_bitmap_range	*bp;
> +	struct xbitmap_range	*ap;
> +	struct xbitmap_range	*bp;
>  
> -	ap = container_of(a, struct xfs_bitmap_range, list);
> -	bp = container_of(b, struct xfs_bitmap_range, list);
> +	ap = container_of(a, struct xbitmap_range, list);
> +	bp = container_of(b, struct xbitmap_range, list);
>  
>  	if (ap->start > bp->start)
>  		return 1;
> @@ -96,14 +96,14 @@ xfs_bitmap_range_cmp(
>  #define LEFT_ALIGNED	(1 << 0)
>  #define RIGHT_ALIGNED	(1 << 1)
>  int
> -xfs_bitmap_disunion(
> -	struct xfs_bitmap	*bitmap,
> -	struct xfs_bitmap	*sub)
> +xbitmap_disunion(
> +	struct xbitmap		*bitmap,
> +	struct xbitmap		*sub)
>  {
>  	struct list_head	*lp;
> -	struct xfs_bitmap_range	*br;
> -	struct xfs_bitmap_range	*new_br;
> -	struct xfs_bitmap_range	*sub_br;
> +	struct xbitmap_range	*br;
> +	struct xbitmap_range	*new_br;
> +	struct xbitmap_range	*sub_br;
>  	uint64_t		sub_start;
>  	uint64_t		sub_len;
>  	int			state;
> @@ -113,8 +113,8 @@ xfs_bitmap_disunion(
>  		return 0;
>  	ASSERT(!list_empty(&sub->list));
>  
> -	list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
> -	list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
> +	list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
> +	list_sort(NULL, &sub->list, xbitmap_range_cmp);
>  
>  	/*
>  	 * Now that we've sorted both lists, we iterate bitmap once, rolling
> @@ -124,11 +124,11 @@ xfs_bitmap_disunion(
>  	 * list traversal is similar to merge sort, but we're deleting
>  	 * instead.  In this manner we avoid O(n^2) operations.
>  	 */
> -	sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
> +	sub_br = list_first_entry(&sub->list, struct xbitmap_range,
>  			list);
>  	lp = bitmap->list.next;
>  	while (lp != &bitmap->list) {
> -		br = list_entry(lp, struct xfs_bitmap_range, list);
> +		br = list_entry(lp, struct xbitmap_range, list);
>  
>  		/*
>  		 * Advance sub_br and/or br until we find a pair that
> @@ -181,7 +181,7 @@ xfs_bitmap_disunion(
>  			 * Deleting from the middle: add the new right extent
>  			 * and then shrink the left extent.
>  			 */
> -			new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
> +			new_br = kmem_alloc(sizeof(struct xbitmap_range),
>  					KM_MAYFAIL);
>  			if (!new_br) {
>  				error = -ENOMEM;
> @@ -247,8 +247,8 @@ xfs_bitmap_disunion(
>   * blocks going from the leaf towards the root.
>   */
>  int
> -xfs_bitmap_set_btcur_path(
> -	struct xfs_bitmap	*bitmap,
> +xbitmap_set_btcur_path(
> +	struct xbitmap		*bitmap,
>  	struct xfs_btree_cur	*cur)
>  {
>  	struct xfs_buf		*bp;
> @@ -261,7 +261,7 @@ xfs_bitmap_set_btcur_path(
>  		if (!bp)
>  			continue;
>  		fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
> -		error = xfs_bitmap_set(bitmap, fsb, 1);
> +		error = xbitmap_set(bitmap, fsb, 1);
>  		if (error)
>  			return error;
>  	}
> @@ -271,12 +271,12 @@ xfs_bitmap_set_btcur_path(
>  
>  /* Collect a btree's block in the bitmap. */
>  STATIC int
> -xfs_bitmap_collect_btblock(
> +xbitmap_collect_btblock(
>  	struct xfs_btree_cur	*cur,
>  	int			level,
>  	void			*priv)
>  {
> -	struct xfs_bitmap	*bitmap = priv;
> +	struct xbitmap		*bitmap = priv;
>  	struct xfs_buf		*bp;
>  	xfs_fsblock_t		fsbno;
>  
> @@ -285,14 +285,14 @@ xfs_bitmap_collect_btblock(
>  		return 0;
>  
>  	fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
> -	return xfs_bitmap_set(bitmap, fsbno, 1);
> +	return xbitmap_set(bitmap, fsbno, 1);
>  }
>  
>  /* Walk the btree and mark the bitmap wherever a btree block is found. */
>  int
> -xfs_bitmap_set_btblocks(
> -	struct xfs_bitmap	*bitmap,
> +xbitmap_set_btblocks(
> +	struct xbitmap		*bitmap,
>  	struct xfs_btree_cur	*cur)
>  {
> -	return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
> +	return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock, bitmap);
>  }
> diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
> index ae8ecbce6fa6..8db4017ac78e 100644
> --- a/fs/xfs/scrub/bitmap.h
> +++ b/fs/xfs/scrub/bitmap.h
> @@ -6,31 +6,31 @@
>  #ifndef __XFS_SCRUB_BITMAP_H__
>  #define __XFS_SCRUB_BITMAP_H__
>  
> -struct xfs_bitmap_range {
> +struct xbitmap_range {
>  	struct list_head	list;
>  	uint64_t		start;
>  	uint64_t		len;
>  };
>  
> -struct xfs_bitmap {
> +struct xbitmap {
>  	struct list_head	list;
>  };
>  
> -void xfs_bitmap_init(struct xfs_bitmap *bitmap);
> -void xfs_bitmap_destroy(struct xfs_bitmap *bitmap);
> +void xbitmap_init(struct xbitmap *bitmap);
> +void xbitmap_destroy(struct xbitmap *bitmap);
>  
> -#define for_each_xfs_bitmap_extent(bex, n, bitmap) \
> +#define for_each_xbitmap_extent(bex, n, bitmap) \
>  	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
>  
> -#define for_each_xfs_bitmap_block(b, bex, n, bitmap) \
> +#define for_each_xbitmap_block(b, bex, n, bitmap) \
>  	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
> -		for ((b) = bex->start; (b) < bex->start + bex->len; (b)++)
> +		for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++)
>  
> -int xfs_bitmap_set(struct xfs_bitmap *bitmap, uint64_t start, uint64_t len);
> -int xfs_bitmap_disunion(struct xfs_bitmap *bitmap, struct xfs_bitmap *sub);
> -int xfs_bitmap_set_btcur_path(struct xfs_bitmap *bitmap,
> +int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
> +int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
> +int xbitmap_set_btcur_path(struct xbitmap *bitmap,
>  		struct xfs_btree_cur *cur);
> -int xfs_bitmap_set_btblocks(struct xfs_bitmap *bitmap,
> +int xbitmap_set_btblocks(struct xbitmap *bitmap,
>  		struct xfs_btree_cur *cur);
>  
>  #endif	/* __XFS_SCRUB_BITMAP_H__ */
> diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
> index 8349694f985d..d41da4c44f10 100644
> --- a/fs/xfs/scrub/repair.c
> +++ b/fs/xfs/scrub/repair.c
> @@ -600,19 +600,19 @@ xrep_reap_block(
>  int
>  xrep_reap_extents(
>  	struct xfs_scrub		*sc,
> -	struct xfs_bitmap		*bitmap,
> +	struct xbitmap			*bitmap,
>  	const struct xfs_owner_info	*oinfo,
>  	enum xfs_ag_resv_type		type)
>  {
> -	struct xfs_bitmap_range		*bmr;
> -	struct xfs_bitmap_range		*n;
> +	struct xbitmap_range		*bmr;
> +	struct xbitmap_range		*n;
>  	xfs_fsblock_t			fsbno;
>  	unsigned int			deferred = 0;
>  	int				error = 0;
>  
>  	ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
>  
> -	for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
> +	for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
>  		ASSERT(sc->ip != NULL ||
>  		       XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
>  		trace_xrep_dispose_btree_extent(sc->mp,
> diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
> index eab41928990f..479cfe38065e 100644
> --- a/fs/xfs/scrub/repair.h
> +++ b/fs/xfs/scrub/repair.h
> @@ -28,10 +28,10 @@ int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
>  		struct xfs_buf **bpp, xfs_btnum_t btnum,
>  		const struct xfs_buf_ops *ops);
>  
> -struct xfs_bitmap;
> +struct xbitmap;
>  
>  int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
> -int xrep_reap_extents(struct xfs_scrub *sc, struct xfs_bitmap *exlist,
> +int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *exlist,
>  		const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
>  
>  struct xrep_find_ag_btree {
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/4] xfs: replace open-coded bitmap weight logic
  2019-10-09 16:49 ` [PATCH 2/4] xfs: replace open-coded bitmap weight logic Darrick J. Wong
@ 2019-10-21 14:34   ` Brian Foster
  0 siblings, 0 replies; 11+ messages in thread
From: Brian Foster @ 2019-10-21 14:34 UTC (permalink / raw)
  To: Darrick J. Wong; +Cc: linux-xfs

On Wed, Oct 09, 2019 at 09:49:12AM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Add a xbitmap_hweight helper function so that we can get rid of the
> open-coded loop.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> ---

Reviewed-by: Brian Foster <bfoster@redhat.com>

>  fs/xfs/scrub/agheader_repair.c |   12 ++----------
>  fs/xfs/scrub/bitmap.c          |   15 +++++++++++++++
>  fs/xfs/scrub/bitmap.h          |    1 +
>  3 files changed, 18 insertions(+), 10 deletions(-)
> 
> 
> diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
> index 9fbb6035f4e2..f35596cc26fb 100644
> --- a/fs/xfs/scrub/agheader_repair.c
> +++ b/fs/xfs/scrub/agheader_repair.c
> @@ -482,8 +482,6 @@ xrep_agfl_collect_blocks(
>  	struct xrep_agfl	ra;
>  	struct xfs_mount	*mp = sc->mp;
>  	struct xfs_btree_cur	*cur;
> -	struct xbitmap_range	*br;
> -	struct xbitmap_range	*n;
>  	int			error;
>  
>  	ra.sc = sc;
> @@ -527,14 +525,8 @@ xrep_agfl_collect_blocks(
>  	 * Calculate the new AGFL size.  If we found more blocks than fit in
>  	 * the AGFL we'll free them later.
>  	 */
> -	*flcount = 0;
> -	for_each_xbitmap_extent(br, n, agfl_extents) {
> -		*flcount += br->len;
> -		if (*flcount > xfs_agfl_size(mp))
> -			break;
> -	}
> -	if (*flcount > xfs_agfl_size(mp))
> -		*flcount = xfs_agfl_size(mp);
> +	*flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents),
> +			 xfs_agfl_size(mp));
>  	return 0;
>  
>  err:
> diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
> index 5b07b46c89c9..8b704d7b5855 100644
> --- a/fs/xfs/scrub/bitmap.c
> +++ b/fs/xfs/scrub/bitmap.c
> @@ -296,3 +296,18 @@ xbitmap_set_btblocks(
>  {
>  	return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock, bitmap);
>  }
> +
> +/* How many bits are set in this bitmap? */
> +uint64_t
> +xbitmap_hweight(
> +	struct xbitmap		*bitmap)
> +{
> +	struct xbitmap_range	*bmr;
> +	struct xbitmap_range	*n;
> +	uint64_t		ret = 0;
> +
> +	for_each_xbitmap_extent(bmr, n, bitmap)
> +		ret += bmr->len;
> +
> +	return ret;
> +}
> diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
> index 8db4017ac78e..900646b72de1 100644
> --- a/fs/xfs/scrub/bitmap.h
> +++ b/fs/xfs/scrub/bitmap.h
> @@ -32,5 +32,6 @@ int xbitmap_set_btcur_path(struct xbitmap *bitmap,
>  		struct xfs_btree_cur *cur);
>  int xbitmap_set_btblocks(struct xbitmap *bitmap,
>  		struct xfs_btree_cur *cur);
> +uint64_t xbitmap_hweight(struct xbitmap *bitmap);
>  
>  #endif	/* __XFS_SCRUB_BITMAP_H__ */
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/4] xfs: remove the for_each_xbitmap_ helpers
  2019-10-09 16:49 ` [PATCH 3/4] xfs: remove the for_each_xbitmap_ helpers Darrick J. Wong
@ 2019-10-22 13:35   ` Brian Foster
  2019-10-22 16:56     ` Darrick J. Wong
  0 siblings, 1 reply; 11+ messages in thread
From: Brian Foster @ 2019-10-22 13:35 UTC (permalink / raw)
  To: Darrick J. Wong; +Cc: linux-xfs

On Wed, Oct 09, 2019 at 09:49:22AM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Remove the for_each_xbitmap_ macros in favor of proper iterator
> functions.  We'll soon be switching this data structure over to an
> interval tree implementation, which means that we can't allow callers to
> modify the bitmap during iteration without telling us.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> ---
>  fs/xfs/scrub/agheader_repair.c |   73 ++++++++++++++++++++++++----------------
>  fs/xfs/scrub/bitmap.c          |   59 ++++++++++++++++++++++++++++++++
>  fs/xfs/scrub/bitmap.h          |   22 ++++++++----
>  fs/xfs/scrub/repair.c          |   60 +++++++++++++++++----------------
>  4 files changed, 148 insertions(+), 66 deletions(-)
> 
> 
...
> diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
> index 900646b72de1..27fde5b4a753 100644
> --- a/fs/xfs/scrub/bitmap.h
> +++ b/fs/xfs/scrub/bitmap.h
...
> @@ -34,4 +27,19 @@ int xbitmap_set_btblocks(struct xbitmap *bitmap,
>  		struct xfs_btree_cur *cur);
>  uint64_t xbitmap_hweight(struct xbitmap *bitmap);
>  
> +/*
> + * Return codes for the bitmap iterator functions are 0 to continue iterating,
> + * and non-zero to stop iterating.  Any non-zero value will be passed up to the
> + * iteration caller.  The special value -ECANCELED can be used to stop
> + * iteration, because neither bitmap iterator ever generates that error code on
> + * its own.
> + */
> +typedef int (*xbitmap_walk_run_fn)(uint64_t start, uint64_t len, void *priv);
> +int xbitmap_iter_set(struct xbitmap *bitmap, xbitmap_walk_run_fn fn,
> +		void *priv);
> +
> +typedef int (*xbitmap_walk_bit_fn)(uint64_t bit, void *priv);
> +int xbitmap_iter_set_bits(struct xbitmap *bitmap, xbitmap_walk_bit_fn fn,
> +		void *priv);
> +

Somewhat of a nit, but I read "set" as a verb in the above function
names which tends to confuse me over what these functions do (i.e.
iterate bits, not set bits). Could we call them something a bit more
neutral, like xbitmap[_bit]_iter() perhaps? That aside the rest of the
patch looks Ok to me.

Brian

>  #endif	/* __XFS_SCRUB_BITMAP_H__ */
> diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
> index d41da4c44f10..588bc054db5c 100644
> --- a/fs/xfs/scrub/repair.c
> +++ b/fs/xfs/scrub/repair.c
> @@ -507,15 +507,21 @@ xrep_reap_invalidate_block(
>  	xfs_trans_binval(sc->tp, bp);
>  }
>  
> +struct xrep_reap_block {
> +	struct xfs_scrub		*sc;
> +	const struct xfs_owner_info	*oinfo;
> +	enum xfs_ag_resv_type		resv;
> +	unsigned int			deferred;
> +};
> +
>  /* Dispose of a single block. */
>  STATIC int
>  xrep_reap_block(
> -	struct xfs_scrub		*sc,
> -	xfs_fsblock_t			fsbno,
> -	const struct xfs_owner_info	*oinfo,
> -	enum xfs_ag_resv_type		resv,
> -	unsigned int			*deferred)
> +	uint64_t			fsbno,
> +	void				*priv)
>  {
> +	struct xrep_reap_block		*rb = priv;
> +	struct xfs_scrub		*sc = rb->sc;
>  	struct xfs_btree_cur		*cur;
>  	struct xfs_buf			*agf_bp = NULL;
>  	xfs_agnumber_t			agno;
> @@ -527,6 +533,10 @@ xrep_reap_block(
>  	agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
>  	agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
>  
> +	ASSERT(sc->ip != NULL || agno == sc->sa.agno);
> +
> +	trace_xrep_dispose_btree_extent(sc->mp, agno, agbno, 1);
> +
>  	/*
>  	 * If we are repairing per-inode metadata, we need to read in the AGF
>  	 * buffer.  Otherwise, we're repairing a per-AG structure, so reuse
> @@ -544,7 +554,8 @@ xrep_reap_block(
>  	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, agno);
>  
>  	/* Can we find any other rmappings? */
> -	error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
> +	error = xfs_rmap_has_other_keys(cur, agbno, 1, rb->oinfo,
> +			&has_other_rmap);
>  	xfs_btree_del_cursor(cur, error);
>  	if (error)
>  		goto out_free;
> @@ -563,8 +574,9 @@ xrep_reap_block(
>  	 * to run xfs_repair.
>  	 */
>  	if (has_other_rmap) {
> -		error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
> -	} else if (resv == XFS_AG_RESV_AGFL) {
> +		error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1,
> +				rb->oinfo);
> +	} else if (rb->resv == XFS_AG_RESV_AGFL) {
>  		xrep_reap_invalidate_block(sc, fsbno);
>  		error = xrep_put_freelist(sc, agbno);
>  	} else {
> @@ -576,16 +588,16 @@ xrep_reap_block(
>  		 * reservation.
>  		 */
>  		xrep_reap_invalidate_block(sc, fsbno);
> -		__xfs_bmap_add_free(sc->tp, fsbno, 1, oinfo, true);
> -		(*deferred)++;
> -		need_roll = *deferred > 100;
> +		__xfs_bmap_add_free(sc->tp, fsbno, 1, rb->oinfo, true);
> +		rb->deferred++;
> +		need_roll = rb->deferred > 100;
>  	}
>  	if (agf_bp != sc->sa.agf_bp)
>  		xfs_trans_brelse(sc->tp, agf_bp);
>  	if (error || !need_roll)
>  		return error;
>  
> -	*deferred = 0;
> +	rb->deferred = 0;
>  	if (sc->ip)
>  		return xfs_trans_roll_inode(&sc->tp, sc->ip);
>  	return xrep_roll_ag_trans(sc);
> @@ -604,27 +616,17 @@ xrep_reap_extents(
>  	const struct xfs_owner_info	*oinfo,
>  	enum xfs_ag_resv_type		type)
>  {
> -	struct xbitmap_range		*bmr;
> -	struct xbitmap_range		*n;
> -	xfs_fsblock_t			fsbno;
> -	unsigned int			deferred = 0;
> +	struct xrep_reap_block		rb = {
> +		.sc			= sc,
> +		.oinfo			= oinfo,
> +		.resv			= type,
> +	};
>  	int				error = 0;
>  
>  	ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
>  
> -	for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
> -		ASSERT(sc->ip != NULL ||
> -		       XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
> -		trace_xrep_dispose_btree_extent(sc->mp,
> -				XFS_FSB_TO_AGNO(sc->mp, fsbno),
> -				XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
> -
> -		error = xrep_reap_block(sc, fsbno, oinfo, type, &deferred);
> -		if (error)
> -			break;
> -	}
> -
> -	if (error || deferred == 0)
> +	error = xbitmap_iter_set_bits(bitmap, xrep_reap_block, &rb);
> +	if (error || rb.deferred == 0)
>  		return error;
>  
>  	if (sc->ip)
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/4] xfs: convert xbitmap to interval tree
  2019-10-09 16:49 ` [PATCH 4/4] xfs: convert xbitmap to interval tree Darrick J. Wong
@ 2019-10-22 13:38   ` Brian Foster
  2019-10-22 17:59     ` Darrick J. Wong
  0 siblings, 1 reply; 11+ messages in thread
From: Brian Foster @ 2019-10-22 13:38 UTC (permalink / raw)
  To: Darrick J. Wong; +Cc: linux-xfs

On Wed, Oct 09, 2019 at 09:49:30AM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Convert the xbitmap code to use interval trees instead of linked lists.
> This reduces the amount of coding required to handle the disunion
> operation and in the future will make it easier to set bits in arbitrary
> order yet later be able to extract maximally sized extents, which we'll
> need for rebuilding certain structures.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> ---

Looks mostly straightforward provided my lack of knowledge on interval
trees. A few random comments..

>  fs/xfs/Kconfig                 |    1 
>  fs/xfs/scrub/agheader_repair.c |    4 -
>  fs/xfs/scrub/bitmap.c          |  292 +++++++++++++++++-----------------------
>  fs/xfs/scrub/bitmap.h          |   13 +-
>  4 files changed, 135 insertions(+), 175 deletions(-)
> 
> 
...
> diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
> index 1041f17f6bb6..e1da103bce78 100644
> --- a/fs/xfs/scrub/bitmap.c
> +++ b/fs/xfs/scrub/bitmap.c
> @@ -12,30 +12,105 @@
>  #include "xfs_btree.h"
>  #include "scrub/bitmap.h"
>  
> -#define for_each_xbitmap_extent(bex, n, bitmap) \
> -	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
> +#define for_each_xbitmap_extent(itn, n, bitmap) \
> +	rbtree_postorder_for_each_entry_safe((itn), (n), \
> +			&(bitmap)->root.rb_root, rb)
>  

I'm not familiar with the interval tree, but the header for this rbtree_
macro mentions it is unsafe with respect to rbtree_erase(). Is that a
concern for any of the users that might call interval_tree_remove()? It
looks like that calls down into rb_erase_augmented(), but it's not clear
to me if that's a problem..

> -/*
> - * Set a range of this bitmap.  Caller must ensure the range is not set.
> - *
> - * This is the logical equivalent of bitmap |= mask(start, len).
> - */
> +/* Clear a range of this bitmap. */
> +static void
> +__xbitmap_clear(
> +	struct xbitmap			*bitmap,
> +	uint64_t			start,
> +	uint64_t			last)
> +{
> +	struct interval_tree_node	*itn;
> +
> +	while ((itn = interval_tree_iter_first(&bitmap->root, start, last))) {
> +		if (itn->start < start) {
> +			/* overlaps with the left side of the clearing range */
> +			interval_tree_remove(itn, &bitmap->root);
> +			itn->last = start - 1;
> +			interval_tree_insert(itn, &bitmap->root);
> +		} else if (itn->last > last) {
> +			/* overlaps with the right side of the clearing range */
> +			interval_tree_remove(itn, &bitmap->root);
> +			itn->start = last + 1;
> +			interval_tree_insert(itn, &bitmap->root);
> +			break;
> +		} else {
> +			/* in the middle of the clearing range */
> +			interval_tree_remove(itn, &bitmap->root);
> +			kmem_free(itn);
> +		}
> +	}
> +}
> +
> +/* Clear a range of this bitmap. */
> +void
> +xbitmap_clear(
> +	struct xbitmap			*bitmap,
> +	uint64_t			start,
> +	uint64_t			len)
> +{
> +	__xbitmap_clear(bitmap, start, start + len - 1);
> +}

It seems unnecessary to split the functions like this just to preserve
the interface. Could we have the other __xbitmap_clear() caller just
calculate the len itself and call xbitmap_clear() instead?

> +
> +/* Set a range of this bitmap. */
>  int
>  xbitmap_set(
> -	struct xbitmap		*bitmap,
> -	uint64_t		start,
> -	uint64_t		len)
> +	struct xbitmap			*bitmap,
> +	uint64_t			start,
> +	uint64_t			len)
>  {
> -	struct xbitmap_range	*bmr;
> +	struct interval_tree_node	*left;
> +	struct interval_tree_node	*right;
> +	uint64_t			last = start + len - 1;
>  
> -	bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
> -	if (!bmr)
> -		return -ENOMEM;
> +	/* Is this whole range already set? */
> +	left = interval_tree_iter_first(&bitmap->root, start, last);
> +	if (left && left->start <= start && left->last >= last)
> +		return 0;
>  
> -	INIT_LIST_HEAD(&bmr->list);
> -	bmr->start = start;
> -	bmr->len = len;
> -	list_add_tail(&bmr->list, &bitmap->list);
> +	/* Clear out everything in the range we want to set. */
> +	xbitmap_clear(bitmap, start, len);
> +
> +	/* Do we have a left-adjacent extent? */
> +	left = interval_tree_iter_first(&bitmap->root, start - 1, start - 1);
> +	if (left && left->last + 1 != start)
> +		left = NULL;
> +
> +	/* Do we have a right-adjacent extent? */
> +	right = interval_tree_iter_first(&bitmap->root, last + 1, last + 1);
> +	if (right && right->start != last + 1)
> +		right = NULL;

If we just cleared the range to set above, shouldn't these left/right
checks always return an adjacent extent or NULL? It seems harmless,
FWIW, but I'm curious if the logic is necessary.

Brian

> +
> +	if (left && right) {
> +		/* combine left and right adjacent extent */
> +		interval_tree_remove(left, &bitmap->root);
> +		interval_tree_remove(right, &bitmap->root);
> +		left->last = right->last;
> +		interval_tree_insert(left, &bitmap->root);
> +		kmem_free(right);
> +	} else if (left) {
> +		/* combine with left extent */
> +		interval_tree_remove(left, &bitmap->root);
> +		left->last = last;
> +		interval_tree_insert(left, &bitmap->root);
> +	} else if (right) {
> +		/* combine with right extent */
> +		interval_tree_remove(right, &bitmap->root);
> +		right->start = start;
> +		interval_tree_insert(right, &bitmap->root);
> +	} else {
> +		/* add an extent */
> +		left = kmem_alloc(sizeof(struct interval_tree_node),
> +				KM_MAYFAIL);
> +		if (!left)
> +			return -ENOMEM;
> +		left->start = start;
> +		left->last = last;
> +		interval_tree_insert(left, &bitmap->root);
> +	}
>  
>  	return 0;
>  }
> @@ -43,14 +118,13 @@ xbitmap_set(
>  /* Free everything related to this bitmap. */
>  void
>  xbitmap_destroy(
> -	struct xbitmap		*bitmap)
> +	struct xbitmap			*bitmap)
>  {
> -	struct xbitmap_range	*bmr;
> -	struct xbitmap_range	*n;
> +	struct interval_tree_node	*itn, *p;
>  
> -	for_each_xbitmap_extent(bmr, n, bitmap) {
> -		list_del(&bmr->list);
> -		kmem_free(bmr);
> +	for_each_xbitmap_extent(itn, p, bitmap) {
> +		interval_tree_remove(itn, &bitmap->root);
> +		kfree(itn);
>  	}
>  }
>  
> @@ -59,27 +133,7 @@ void
>  xbitmap_init(
>  	struct xbitmap		*bitmap)
>  {
> -	INIT_LIST_HEAD(&bitmap->list);
> -}
> -
> -/* Compare two btree extents. */
> -static int
> -xbitmap_range_cmp(
> -	void			*priv,
> -	struct list_head	*a,
> -	struct list_head	*b)
> -{
> -	struct xbitmap_range	*ap;
> -	struct xbitmap_range	*bp;
> -
> -	ap = container_of(a, struct xbitmap_range, list);
> -	bp = container_of(b, struct xbitmap_range, list);
> -
> -	if (ap->start > bp->start)
> -		return 1;
> -	if (ap->start < bp->start)
> -		return -1;
> -	return 0;
> +	bitmap->root = RB_ROOT_CACHED;
>  }
>  
>  /*
> @@ -96,118 +150,19 @@ xbitmap_range_cmp(
>   *
>   * This is the logical equivalent of bitmap &= ~sub.
>   */
> -#define LEFT_ALIGNED	(1 << 0)
> -#define RIGHT_ALIGNED	(1 << 1)
> -int
> +void
>  xbitmap_disunion(
> -	struct xbitmap		*bitmap,
> -	struct xbitmap		*sub)
> +	struct xbitmap			*bitmap,
> +	struct xbitmap			*sub)
>  {
> -	struct list_head	*lp;
> -	struct xbitmap_range	*br;
> -	struct xbitmap_range	*new_br;
> -	struct xbitmap_range	*sub_br;
> -	uint64_t		sub_start;
> -	uint64_t		sub_len;
> -	int			state;
> -	int			error = 0;
> -
> -	if (list_empty(&bitmap->list) || list_empty(&sub->list))
> -		return 0;
> -	ASSERT(!list_empty(&sub->list));
> -
> -	list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
> -	list_sort(NULL, &sub->list, xbitmap_range_cmp);
> -
> -	/*
> -	 * Now that we've sorted both lists, we iterate bitmap once, rolling
> -	 * forward through sub and/or bitmap as necessary until we find an
> -	 * overlap or reach the end of either list.  We do not reset lp to the
> -	 * head of bitmap nor do we reset sub_br to the head of sub.  The
> -	 * list traversal is similar to merge sort, but we're deleting
> -	 * instead.  In this manner we avoid O(n^2) operations.
> -	 */
> -	sub_br = list_first_entry(&sub->list, struct xbitmap_range,
> -			list);
> -	lp = bitmap->list.next;
> -	while (lp != &bitmap->list) {
> -		br = list_entry(lp, struct xbitmap_range, list);
> -
> -		/*
> -		 * Advance sub_br and/or br until we find a pair that
> -		 * intersect or we run out of extents.
> -		 */
> -		while (sub_br->start + sub_br->len <= br->start) {
> -			if (list_is_last(&sub_br->list, &sub->list))
> -				goto out;
> -			sub_br = list_next_entry(sub_br, list);
> -		}
> -		if (sub_br->start >= br->start + br->len) {
> -			lp = lp->next;
> -			continue;
> -		}
> +	struct interval_tree_node	*itn, *n;
>  
> -		/* trim sub_br to fit the extent we have */
> -		sub_start = sub_br->start;
> -		sub_len = sub_br->len;
> -		if (sub_br->start < br->start) {
> -			sub_len -= br->start - sub_br->start;
> -			sub_start = br->start;
> -		}
> -		if (sub_len > br->len)
> -			sub_len = br->len;
> -
> -		state = 0;
> -		if (sub_start == br->start)
> -			state |= LEFT_ALIGNED;
> -		if (sub_start + sub_len == br->start + br->len)
> -			state |= RIGHT_ALIGNED;
> -		switch (state) {
> -		case LEFT_ALIGNED:
> -			/* Coincides with only the left. */
> -			br->start += sub_len;
> -			br->len -= sub_len;
> -			break;
> -		case RIGHT_ALIGNED:
> -			/* Coincides with only the right. */
> -			br->len -= sub_len;
> -			lp = lp->next;
> -			break;
> -		case LEFT_ALIGNED | RIGHT_ALIGNED:
> -			/* Total overlap, just delete ex. */
> -			lp = lp->next;
> -			list_del(&br->list);
> -			kmem_free(br);
> -			break;
> -		case 0:
> -			/*
> -			 * Deleting from the middle: add the new right extent
> -			 * and then shrink the left extent.
> -			 */
> -			new_br = kmem_alloc(sizeof(struct xbitmap_range),
> -					KM_MAYFAIL);
> -			if (!new_br) {
> -				error = -ENOMEM;
> -				goto out;
> -			}
> -			INIT_LIST_HEAD(&new_br->list);
> -			new_br->start = sub_start + sub_len;
> -			new_br->len = br->start + br->len - new_br->start;
> -			list_add(&new_br->list, &br->list);
> -			br->len = sub_start - br->start;
> -			lp = lp->next;
> -			break;
> -		default:
> -			ASSERT(0);
> -			break;
> -		}
> -	}
> +	if (xbitmap_empty(bitmap) || xbitmap_empty(sub))
> +		return;
>  
> -out:
> -	return error;
> +	for_each_xbitmap_extent(itn, n, sub)
> +		__xbitmap_clear(bitmap, itn->start, itn->last);
>  }
> -#undef LEFT_ALIGNED
> -#undef RIGHT_ALIGNED
>  
>  /*
>   * Record all btree blocks seen while iterating all records of a btree.
> @@ -303,14 +258,13 @@ xbitmap_set_btblocks(
>  /* How many bits are set in this bitmap? */
>  uint64_t
>  xbitmap_hweight(
> -	struct xbitmap		*bitmap)
> +	struct xbitmap			*bitmap)
>  {
> -	struct xbitmap_range	*bmr;
> -	struct xbitmap_range	*n;
> -	uint64_t		ret = 0;
> +	struct interval_tree_node	*itn, *n;
> +	uint64_t			ret = 0;
>  
> -	for_each_xbitmap_extent(bmr, n, bitmap)
> -		ret += bmr->len;
> +	for_each_xbitmap_extent(itn, n, bitmap)
> +		ret += itn->last - itn->start + 1;
>  
>  	return ret;
>  }
> @@ -318,15 +272,15 @@ xbitmap_hweight(
>  /* Call a function for every run of set bits in this bitmap. */
>  int
>  xbitmap_iter_set(
> -	struct xbitmap		*bitmap,
> -	xbitmap_walk_run_fn	fn,
> -	void			*priv)
> +	struct xbitmap			*bitmap,
> +	xbitmap_walk_run_fn		fn,
> +	void				*priv)
>  {
> -	struct xbitmap_range	*bex, *n;
> -	int			error;
> +	struct interval_tree_node	*itn, *n;
> +	int				error;
>  
> -	for_each_xbitmap_extent(bex, n, bitmap) {
> -		error = fn(bex->start, bex->len, priv);
> +	for_each_xbitmap_extent(itn, n, bitmap) {
> +		error = fn(itn->start, itn->last - itn->start + 1, priv);
>  		if (error)
>  			break;
>  	}
> @@ -370,3 +324,11 @@ xbitmap_iter_set_bits(
>  
>  	return xbitmap_iter_set(bitmap, xbitmap_walk_bits_in_run, &wb);
>  }
> +
> +/* Does this bitmap have no bits set at all? */
> +bool
> +xbitmap_empty(
> +	struct xbitmap		*bitmap)
> +{
> +	return bitmap->root.rb_root.rb_node == NULL;
> +}
> diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
> index 27fde5b4a753..6be596e60ac8 100644
> --- a/fs/xfs/scrub/bitmap.h
> +++ b/fs/xfs/scrub/bitmap.h
> @@ -6,21 +6,18 @@
>  #ifndef __XFS_SCRUB_BITMAP_H__
>  #define __XFS_SCRUB_BITMAP_H__
>  
> -struct xbitmap_range {
> -	struct list_head	list;
> -	uint64_t		start;
> -	uint64_t		len;
> -};
> +#include <linux/interval_tree.h>
>  
>  struct xbitmap {
> -	struct list_head	list;
> +	struct rb_root_cached	root;
>  };
>  
>  void xbitmap_init(struct xbitmap *bitmap);
>  void xbitmap_destroy(struct xbitmap *bitmap);
>  
> +void xbitmap_clear(struct xbitmap *bitmap, uint64_t start, uint64_t len);
>  int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
> -int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
> +void xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
>  int xbitmap_set_btcur_path(struct xbitmap *bitmap,
>  		struct xfs_btree_cur *cur);
>  int xbitmap_set_btblocks(struct xbitmap *bitmap,
> @@ -42,4 +39,6 @@ typedef int (*xbitmap_walk_bit_fn)(uint64_t bit, void *priv);
>  int xbitmap_iter_set_bits(struct xbitmap *bitmap, xbitmap_walk_bit_fn fn,
>  		void *priv);
>  
> +bool xbitmap_empty(struct xbitmap *bitmap);
> +
>  #endif	/* __XFS_SCRUB_BITMAP_H__ */
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/4] xfs: remove the for_each_xbitmap_ helpers
  2019-10-22 13:35   ` Brian Foster
@ 2019-10-22 16:56     ` Darrick J. Wong
  0 siblings, 0 replies; 11+ messages in thread
From: Darrick J. Wong @ 2019-10-22 16:56 UTC (permalink / raw)
  To: Brian Foster; +Cc: linux-xfs

On Tue, Oct 22, 2019 at 09:35:18AM -0400, Brian Foster wrote:
> On Wed, Oct 09, 2019 at 09:49:22AM -0700, Darrick J. Wong wrote:
> > From: Darrick J. Wong <darrick.wong@oracle.com>
> > 
> > Remove the for_each_xbitmap_ macros in favor of proper iterator
> > functions.  We'll soon be switching this data structure over to an
> > interval tree implementation, which means that we can't allow callers to
> > modify the bitmap during iteration without telling us.
> > 
> > Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> > ---
> >  fs/xfs/scrub/agheader_repair.c |   73 ++++++++++++++++++++++++----------------
> >  fs/xfs/scrub/bitmap.c          |   59 ++++++++++++++++++++++++++++++++
> >  fs/xfs/scrub/bitmap.h          |   22 ++++++++----
> >  fs/xfs/scrub/repair.c          |   60 +++++++++++++++++----------------
> >  4 files changed, 148 insertions(+), 66 deletions(-)
> > 
> > 
> ...
> > diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
> > index 900646b72de1..27fde5b4a753 100644
> > --- a/fs/xfs/scrub/bitmap.h
> > +++ b/fs/xfs/scrub/bitmap.h
> ...
> > @@ -34,4 +27,19 @@ int xbitmap_set_btblocks(struct xbitmap *bitmap,
> >  		struct xfs_btree_cur *cur);
> >  uint64_t xbitmap_hweight(struct xbitmap *bitmap);
> >  
> > +/*
> > + * Return codes for the bitmap iterator functions are 0 to continue iterating,
> > + * and non-zero to stop iterating.  Any non-zero value will be passed up to the
> > + * iteration caller.  The special value -ECANCELED can be used to stop
> > + * iteration, because neither bitmap iterator ever generates that error code on
> > + * its own.
> > + */
> > +typedef int (*xbitmap_walk_run_fn)(uint64_t start, uint64_t len, void *priv);
> > +int xbitmap_iter_set(struct xbitmap *bitmap, xbitmap_walk_run_fn fn,
> > +		void *priv);
> > +
> > +typedef int (*xbitmap_walk_bit_fn)(uint64_t bit, void *priv);
> > +int xbitmap_iter_set_bits(struct xbitmap *bitmap, xbitmap_walk_bit_fn fn,
> > +		void *priv);
> > +
> 
> Somewhat of a nit, but I read "set" as a verb in the above function
> names which tends to confuse me over what these functions do (i.e.
> iterate bits, not set bits). Could we call them something a bit more
> neutral, like xbitmap[_bit]_iter() perhaps? That aside the rest of the
> patch looks Ok to me.

Ok, will rename them.

--D

> Brian
> 
> >  #endif	/* __XFS_SCRUB_BITMAP_H__ */
> > diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
> > index d41da4c44f10..588bc054db5c 100644
> > --- a/fs/xfs/scrub/repair.c
> > +++ b/fs/xfs/scrub/repair.c
> > @@ -507,15 +507,21 @@ xrep_reap_invalidate_block(
> >  	xfs_trans_binval(sc->tp, bp);
> >  }
> >  
> > +struct xrep_reap_block {
> > +	struct xfs_scrub		*sc;
> > +	const struct xfs_owner_info	*oinfo;
> > +	enum xfs_ag_resv_type		resv;
> > +	unsigned int			deferred;
> > +};
> > +
> >  /* Dispose of a single block. */
> >  STATIC int
> >  xrep_reap_block(
> > -	struct xfs_scrub		*sc,
> > -	xfs_fsblock_t			fsbno,
> > -	const struct xfs_owner_info	*oinfo,
> > -	enum xfs_ag_resv_type		resv,
> > -	unsigned int			*deferred)
> > +	uint64_t			fsbno,
> > +	void				*priv)
> >  {
> > +	struct xrep_reap_block		*rb = priv;
> > +	struct xfs_scrub		*sc = rb->sc;
> >  	struct xfs_btree_cur		*cur;
> >  	struct xfs_buf			*agf_bp = NULL;
> >  	xfs_agnumber_t			agno;
> > @@ -527,6 +533,10 @@ xrep_reap_block(
> >  	agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
> >  	agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
> >  
> > +	ASSERT(sc->ip != NULL || agno == sc->sa.agno);
> > +
> > +	trace_xrep_dispose_btree_extent(sc->mp, agno, agbno, 1);
> > +
> >  	/*
> >  	 * If we are repairing per-inode metadata, we need to read in the AGF
> >  	 * buffer.  Otherwise, we're repairing a per-AG structure, so reuse
> > @@ -544,7 +554,8 @@ xrep_reap_block(
> >  	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, agno);
> >  
> >  	/* Can we find any other rmappings? */
> > -	error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
> > +	error = xfs_rmap_has_other_keys(cur, agbno, 1, rb->oinfo,
> > +			&has_other_rmap);
> >  	xfs_btree_del_cursor(cur, error);
> >  	if (error)
> >  		goto out_free;
> > @@ -563,8 +574,9 @@ xrep_reap_block(
> >  	 * to run xfs_repair.
> >  	 */
> >  	if (has_other_rmap) {
> > -		error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
> > -	} else if (resv == XFS_AG_RESV_AGFL) {
> > +		error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1,
> > +				rb->oinfo);
> > +	} else if (rb->resv == XFS_AG_RESV_AGFL) {
> >  		xrep_reap_invalidate_block(sc, fsbno);
> >  		error = xrep_put_freelist(sc, agbno);
> >  	} else {
> > @@ -576,16 +588,16 @@ xrep_reap_block(
> >  		 * reservation.
> >  		 */
> >  		xrep_reap_invalidate_block(sc, fsbno);
> > -		__xfs_bmap_add_free(sc->tp, fsbno, 1, oinfo, true);
> > -		(*deferred)++;
> > -		need_roll = *deferred > 100;
> > +		__xfs_bmap_add_free(sc->tp, fsbno, 1, rb->oinfo, true);
> > +		rb->deferred++;
> > +		need_roll = rb->deferred > 100;
> >  	}
> >  	if (agf_bp != sc->sa.agf_bp)
> >  		xfs_trans_brelse(sc->tp, agf_bp);
> >  	if (error || !need_roll)
> >  		return error;
> >  
> > -	*deferred = 0;
> > +	rb->deferred = 0;
> >  	if (sc->ip)
> >  		return xfs_trans_roll_inode(&sc->tp, sc->ip);
> >  	return xrep_roll_ag_trans(sc);
> > @@ -604,27 +616,17 @@ xrep_reap_extents(
> >  	const struct xfs_owner_info	*oinfo,
> >  	enum xfs_ag_resv_type		type)
> >  {
> > -	struct xbitmap_range		*bmr;
> > -	struct xbitmap_range		*n;
> > -	xfs_fsblock_t			fsbno;
> > -	unsigned int			deferred = 0;
> > +	struct xrep_reap_block		rb = {
> > +		.sc			= sc,
> > +		.oinfo			= oinfo,
> > +		.resv			= type,
> > +	};
> >  	int				error = 0;
> >  
> >  	ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
> >  
> > -	for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
> > -		ASSERT(sc->ip != NULL ||
> > -		       XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
> > -		trace_xrep_dispose_btree_extent(sc->mp,
> > -				XFS_FSB_TO_AGNO(sc->mp, fsbno),
> > -				XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
> > -
> > -		error = xrep_reap_block(sc, fsbno, oinfo, type, &deferred);
> > -		if (error)
> > -			break;
> > -	}
> > -
> > -	if (error || deferred == 0)
> > +	error = xbitmap_iter_set_bits(bitmap, xrep_reap_block, &rb);
> > +	if (error || rb.deferred == 0)
> >  		return error;
> >  
> >  	if (sc->ip)
> > 
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/4] xfs: convert xbitmap to interval tree
  2019-10-22 13:38   ` Brian Foster
@ 2019-10-22 17:59     ` Darrick J. Wong
  0 siblings, 0 replies; 11+ messages in thread
From: Darrick J. Wong @ 2019-10-22 17:59 UTC (permalink / raw)
  To: Brian Foster; +Cc: linux-xfs

On Tue, Oct 22, 2019 at 09:38:47AM -0400, Brian Foster wrote:
> On Wed, Oct 09, 2019 at 09:49:30AM -0700, Darrick J. Wong wrote:
> > From: Darrick J. Wong <darrick.wong@oracle.com>
> > 
> > Convert the xbitmap code to use interval trees instead of linked lists.
> > This reduces the amount of coding required to handle the disunion
> > operation and in the future will make it easier to set bits in arbitrary
> > order yet later be able to extract maximally sized extents, which we'll
> > need for rebuilding certain structures.
> > 
> > Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> > ---
> 
> Looks mostly straightforward provided my lack of knowledge on interval
> trees. A few random comments..
> 
> >  fs/xfs/Kconfig                 |    1 
> >  fs/xfs/scrub/agheader_repair.c |    4 -
> >  fs/xfs/scrub/bitmap.c          |  292 +++++++++++++++++-----------------------
> >  fs/xfs/scrub/bitmap.h          |   13 +-
> >  4 files changed, 135 insertions(+), 175 deletions(-)
> > 
> > 
> ...
> > diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
> > index 1041f17f6bb6..e1da103bce78 100644
> > --- a/fs/xfs/scrub/bitmap.c
> > +++ b/fs/xfs/scrub/bitmap.c
> > @@ -12,30 +12,105 @@
> >  #include "xfs_btree.h"
> >  #include "scrub/bitmap.h"
> >  
> > -#define for_each_xbitmap_extent(bex, n, bitmap) \
> > -	list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
> > +#define for_each_xbitmap_extent(itn, n, bitmap) \
> > +	rbtree_postorder_for_each_entry_safe((itn), (n), \
> > +			&(bitmap)->root.rb_root, rb)
> >  
> 
> I'm not familiar with the interval tree, but the header for this rbtree_
> macro mentions it is unsafe with respect to rbtree_erase(). Is that a
> concern for any of the users that might call interval_tree_remove()? It
> looks like that calls down into rb_erase_augmented(), but it's not clear
> to me if that's a problem..

It's not clear to me either, but the xbitmap iteration functions have
the implicit requirement that you can't edit the xbitmap while walking
it, so I'll add that to the documentation.

Note that none of the callers in my dev tree do that; the agheader
repair code uses disunion to get around that limitation.

As for xbitmap_destroy... yeah, I think that should use a
interval_tree_iter_first loop and not the for_* macro, since a tree
rebalance could upset things.

> > -/*
> > - * Set a range of this bitmap.  Caller must ensure the range is not set.
> > - *
> > - * This is the logical equivalent of bitmap |= mask(start, len).
> > - */
> > +/* Clear a range of this bitmap. */
> > +static void
> > +__xbitmap_clear(
> > +	struct xbitmap			*bitmap,
> > +	uint64_t			start,
> > +	uint64_t			last)
> > +{
> > +	struct interval_tree_node	*itn;
> > +
> > +	while ((itn = interval_tree_iter_first(&bitmap->root, start, last))) {
> > +		if (itn->start < start) {
> > +			/* overlaps with the left side of the clearing range */
> > +			interval_tree_remove(itn, &bitmap->root);
> > +			itn->last = start - 1;
> > +			interval_tree_insert(itn, &bitmap->root);
> > +		} else if (itn->last > last) {
> > +			/* overlaps with the right side of the clearing range */
> > +			interval_tree_remove(itn, &bitmap->root);
> > +			itn->start = last + 1;
> > +			interval_tree_insert(itn, &bitmap->root);
> > +			break;
> > +		} else {
> > +			/* in the middle of the clearing range */
> > +			interval_tree_remove(itn, &bitmap->root);
> > +			kmem_free(itn);
> > +		}
> > +	}
> > +}
> > +
> > +/* Clear a range of this bitmap. */
> > +void
> > +xbitmap_clear(
> > +	struct xbitmap			*bitmap,
> > +	uint64_t			start,
> > +	uint64_t			len)
> > +{
> > +	__xbitmap_clear(bitmap, start, start + len - 1);
> > +}
> 
> It seems unnecessary to split the functions like this just to preserve
> the interface. Could we have the other __xbitmap_clear() caller just
> calculate the len itself and call xbitmap_clear() instead?

Ok.

> > +
> > +/* Set a range of this bitmap. */
> >  int
> >  xbitmap_set(
> > -	struct xbitmap		*bitmap,
> > -	uint64_t		start,
> > -	uint64_t		len)
> > +	struct xbitmap			*bitmap,
> > +	uint64_t			start,
> > +	uint64_t			len)
> >  {
> > -	struct xbitmap_range	*bmr;
> > +	struct interval_tree_node	*left;
> > +	struct interval_tree_node	*right;
> > +	uint64_t			last = start + len - 1;
> >  
> > -	bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
> > -	if (!bmr)
> > -		return -ENOMEM;
> > +	/* Is this whole range already set? */
> > +	left = interval_tree_iter_first(&bitmap->root, start, last);
> > +	if (left && left->start <= start && left->last >= last)
> > +		return 0;
> >  
> > -	INIT_LIST_HEAD(&bmr->list);
> > -	bmr->start = start;
> > -	bmr->len = len;
> > -	list_add_tail(&bmr->list, &bitmap->list);
> > +	/* Clear out everything in the range we want to set. */
> > +	xbitmap_clear(bitmap, start, len);
> > +
> > +	/* Do we have a left-adjacent extent? */
> > +	left = interval_tree_iter_first(&bitmap->root, start - 1, start - 1);
> > +	if (left && left->last + 1 != start)
> > +		left = NULL;
> > +
> > +	/* Do we have a right-adjacent extent? */
> > +	right = interval_tree_iter_first(&bitmap->root, last + 1, last + 1);
> > +	if (right && right->start != last + 1)
> > +		right = NULL;
> 
> If we just cleared the range to set above, shouldn't these left/right
> checks always return an adjacent extent or NULL? It seems harmless,
> FWIW, but I'm curious if the logic is necessary.

They should be; I'll turn them into ASSERTs.  Thanks for reading this
series!

--D

> Brian
> 
> > +
> > +	if (left && right) {
> > +		/* combine left and right adjacent extent */
> > +		interval_tree_remove(left, &bitmap->root);
> > +		interval_tree_remove(right, &bitmap->root);
> > +		left->last = right->last;
> > +		interval_tree_insert(left, &bitmap->root);
> > +		kmem_free(right);
> > +	} else if (left) {
> > +		/* combine with left extent */
> > +		interval_tree_remove(left, &bitmap->root);
> > +		left->last = last;
> > +		interval_tree_insert(left, &bitmap->root);
> > +	} else if (right) {
> > +		/* combine with right extent */
> > +		interval_tree_remove(right, &bitmap->root);
> > +		right->start = start;
> > +		interval_tree_insert(right, &bitmap->root);
> > +	} else {
> > +		/* add an extent */
> > +		left = kmem_alloc(sizeof(struct interval_tree_node),
> > +				KM_MAYFAIL);
> > +		if (!left)
> > +			return -ENOMEM;
> > +		left->start = start;
> > +		left->last = last;
> > +		interval_tree_insert(left, &bitmap->root);
> > +	}
> >  
> >  	return 0;
> >  }
> > @@ -43,14 +118,13 @@ xbitmap_set(
> >  /* Free everything related to this bitmap. */
> >  void
> >  xbitmap_destroy(
> > -	struct xbitmap		*bitmap)
> > +	struct xbitmap			*bitmap)
> >  {
> > -	struct xbitmap_range	*bmr;
> > -	struct xbitmap_range	*n;
> > +	struct interval_tree_node	*itn, *p;
> >  
> > -	for_each_xbitmap_extent(bmr, n, bitmap) {
> > -		list_del(&bmr->list);
> > -		kmem_free(bmr);
> > +	for_each_xbitmap_extent(itn, p, bitmap) {
> > +		interval_tree_remove(itn, &bitmap->root);
> > +		kfree(itn);
> >  	}
> >  }
> >  
> > @@ -59,27 +133,7 @@ void
> >  xbitmap_init(
> >  	struct xbitmap		*bitmap)
> >  {
> > -	INIT_LIST_HEAD(&bitmap->list);
> > -}
> > -
> > -/* Compare two btree extents. */
> > -static int
> > -xbitmap_range_cmp(
> > -	void			*priv,
> > -	struct list_head	*a,
> > -	struct list_head	*b)
> > -{
> > -	struct xbitmap_range	*ap;
> > -	struct xbitmap_range	*bp;
> > -
> > -	ap = container_of(a, struct xbitmap_range, list);
> > -	bp = container_of(b, struct xbitmap_range, list);
> > -
> > -	if (ap->start > bp->start)
> > -		return 1;
> > -	if (ap->start < bp->start)
> > -		return -1;
> > -	return 0;
> > +	bitmap->root = RB_ROOT_CACHED;
> >  }
> >  
> >  /*
> > @@ -96,118 +150,19 @@ xbitmap_range_cmp(
> >   *
> >   * This is the logical equivalent of bitmap &= ~sub.
> >   */
> > -#define LEFT_ALIGNED	(1 << 0)
> > -#define RIGHT_ALIGNED	(1 << 1)
> > -int
> > +void
> >  xbitmap_disunion(
> > -	struct xbitmap		*bitmap,
> > -	struct xbitmap		*sub)
> > +	struct xbitmap			*bitmap,
> > +	struct xbitmap			*sub)
> >  {
> > -	struct list_head	*lp;
> > -	struct xbitmap_range	*br;
> > -	struct xbitmap_range	*new_br;
> > -	struct xbitmap_range	*sub_br;
> > -	uint64_t		sub_start;
> > -	uint64_t		sub_len;
> > -	int			state;
> > -	int			error = 0;
> > -
> > -	if (list_empty(&bitmap->list) || list_empty(&sub->list))
> > -		return 0;
> > -	ASSERT(!list_empty(&sub->list));
> > -
> > -	list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
> > -	list_sort(NULL, &sub->list, xbitmap_range_cmp);
> > -
> > -	/*
> > -	 * Now that we've sorted both lists, we iterate bitmap once, rolling
> > -	 * forward through sub and/or bitmap as necessary until we find an
> > -	 * overlap or reach the end of either list.  We do not reset lp to the
> > -	 * head of bitmap nor do we reset sub_br to the head of sub.  The
> > -	 * list traversal is similar to merge sort, but we're deleting
> > -	 * instead.  In this manner we avoid O(n^2) operations.
> > -	 */
> > -	sub_br = list_first_entry(&sub->list, struct xbitmap_range,
> > -			list);
> > -	lp = bitmap->list.next;
> > -	while (lp != &bitmap->list) {
> > -		br = list_entry(lp, struct xbitmap_range, list);
> > -
> > -		/*
> > -		 * Advance sub_br and/or br until we find a pair that
> > -		 * intersect or we run out of extents.
> > -		 */
> > -		while (sub_br->start + sub_br->len <= br->start) {
> > -			if (list_is_last(&sub_br->list, &sub->list))
> > -				goto out;
> > -			sub_br = list_next_entry(sub_br, list);
> > -		}
> > -		if (sub_br->start >= br->start + br->len) {
> > -			lp = lp->next;
> > -			continue;
> > -		}
> > +	struct interval_tree_node	*itn, *n;
> >  
> > -		/* trim sub_br to fit the extent we have */
> > -		sub_start = sub_br->start;
> > -		sub_len = sub_br->len;
> > -		if (sub_br->start < br->start) {
> > -			sub_len -= br->start - sub_br->start;
> > -			sub_start = br->start;
> > -		}
> > -		if (sub_len > br->len)
> > -			sub_len = br->len;
> > -
> > -		state = 0;
> > -		if (sub_start == br->start)
> > -			state |= LEFT_ALIGNED;
> > -		if (sub_start + sub_len == br->start + br->len)
> > -			state |= RIGHT_ALIGNED;
> > -		switch (state) {
> > -		case LEFT_ALIGNED:
> > -			/* Coincides with only the left. */
> > -			br->start += sub_len;
> > -			br->len -= sub_len;
> > -			break;
> > -		case RIGHT_ALIGNED:
> > -			/* Coincides with only the right. */
> > -			br->len -= sub_len;
> > -			lp = lp->next;
> > -			break;
> > -		case LEFT_ALIGNED | RIGHT_ALIGNED:
> > -			/* Total overlap, just delete ex. */
> > -			lp = lp->next;
> > -			list_del(&br->list);
> > -			kmem_free(br);
> > -			break;
> > -		case 0:
> > -			/*
> > -			 * Deleting from the middle: add the new right extent
> > -			 * and then shrink the left extent.
> > -			 */
> > -			new_br = kmem_alloc(sizeof(struct xbitmap_range),
> > -					KM_MAYFAIL);
> > -			if (!new_br) {
> > -				error = -ENOMEM;
> > -				goto out;
> > -			}
> > -			INIT_LIST_HEAD(&new_br->list);
> > -			new_br->start = sub_start + sub_len;
> > -			new_br->len = br->start + br->len - new_br->start;
> > -			list_add(&new_br->list, &br->list);
> > -			br->len = sub_start - br->start;
> > -			lp = lp->next;
> > -			break;
> > -		default:
> > -			ASSERT(0);
> > -			break;
> > -		}
> > -	}
> > +	if (xbitmap_empty(bitmap) || xbitmap_empty(sub))
> > +		return;
> >  
> > -out:
> > -	return error;
> > +	for_each_xbitmap_extent(itn, n, sub)
> > +		__xbitmap_clear(bitmap, itn->start, itn->last);
> >  }
> > -#undef LEFT_ALIGNED
> > -#undef RIGHT_ALIGNED
> >  
> >  /*
> >   * Record all btree blocks seen while iterating all records of a btree.
> > @@ -303,14 +258,13 @@ xbitmap_set_btblocks(
> >  /* How many bits are set in this bitmap? */
> >  uint64_t
> >  xbitmap_hweight(
> > -	struct xbitmap		*bitmap)
> > +	struct xbitmap			*bitmap)
> >  {
> > -	struct xbitmap_range	*bmr;
> > -	struct xbitmap_range	*n;
> > -	uint64_t		ret = 0;
> > +	struct interval_tree_node	*itn, *n;
> > +	uint64_t			ret = 0;
> >  
> > -	for_each_xbitmap_extent(bmr, n, bitmap)
> > -		ret += bmr->len;
> > +	for_each_xbitmap_extent(itn, n, bitmap)
> > +		ret += itn->last - itn->start + 1;
> >  
> >  	return ret;
> >  }
> > @@ -318,15 +272,15 @@ xbitmap_hweight(
> >  /* Call a function for every run of set bits in this bitmap. */
> >  int
> >  xbitmap_iter_set(
> > -	struct xbitmap		*bitmap,
> > -	xbitmap_walk_run_fn	fn,
> > -	void			*priv)
> > +	struct xbitmap			*bitmap,
> > +	xbitmap_walk_run_fn		fn,
> > +	void				*priv)
> >  {
> > -	struct xbitmap_range	*bex, *n;
> > -	int			error;
> > +	struct interval_tree_node	*itn, *n;
> > +	int				error;
> >  
> > -	for_each_xbitmap_extent(bex, n, bitmap) {
> > -		error = fn(bex->start, bex->len, priv);
> > +	for_each_xbitmap_extent(itn, n, bitmap) {
> > +		error = fn(itn->start, itn->last - itn->start + 1, priv);
> >  		if (error)
> >  			break;
> >  	}
> > @@ -370,3 +324,11 @@ xbitmap_iter_set_bits(
> >  
> >  	return xbitmap_iter_set(bitmap, xbitmap_walk_bits_in_run, &wb);
> >  }
> > +
> > +/* Does this bitmap have no bits set at all? */
> > +bool
> > +xbitmap_empty(
> > +	struct xbitmap		*bitmap)
> > +{
> > +	return bitmap->root.rb_root.rb_node == NULL;
> > +}
> > diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
> > index 27fde5b4a753..6be596e60ac8 100644
> > --- a/fs/xfs/scrub/bitmap.h
> > +++ b/fs/xfs/scrub/bitmap.h
> > @@ -6,21 +6,18 @@
> >  #ifndef __XFS_SCRUB_BITMAP_H__
> >  #define __XFS_SCRUB_BITMAP_H__
> >  
> > -struct xbitmap_range {
> > -	struct list_head	list;
> > -	uint64_t		start;
> > -	uint64_t		len;
> > -};
> > +#include <linux/interval_tree.h>
> >  
> >  struct xbitmap {
> > -	struct list_head	list;
> > +	struct rb_root_cached	root;
> >  };
> >  
> >  void xbitmap_init(struct xbitmap *bitmap);
> >  void xbitmap_destroy(struct xbitmap *bitmap);
> >  
> > +void xbitmap_clear(struct xbitmap *bitmap, uint64_t start, uint64_t len);
> >  int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
> > -int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
> > +void xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
> >  int xbitmap_set_btcur_path(struct xbitmap *bitmap,
> >  		struct xfs_btree_cur *cur);
> >  int xbitmap_set_btblocks(struct xbitmap *bitmap,
> > @@ -42,4 +39,6 @@ typedef int (*xbitmap_walk_bit_fn)(uint64_t bit, void *priv);
> >  int xbitmap_iter_set_bits(struct xbitmap *bitmap, xbitmap_walk_bit_fn fn,
> >  		void *priv);
> >  
> > +bool xbitmap_empty(struct xbitmap *bitmap);
> > +
> >  #endif	/* __XFS_SCRUB_BITMAP_H__ */
> > 
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2019-10-22 17:59 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-09 16:48 [PATCH 0/4] xfs: rework online repair incore bitmap Darrick J. Wong
2019-10-09 16:49 ` [PATCH 1/4] xfs: rename xfs_bitmap to xbitmap Darrick J. Wong
2019-10-21 14:34   ` Brian Foster
2019-10-09 16:49 ` [PATCH 2/4] xfs: replace open-coded bitmap weight logic Darrick J. Wong
2019-10-21 14:34   ` Brian Foster
2019-10-09 16:49 ` [PATCH 3/4] xfs: remove the for_each_xbitmap_ helpers Darrick J. Wong
2019-10-22 13:35   ` Brian Foster
2019-10-22 16:56     ` Darrick J. Wong
2019-10-09 16:49 ` [PATCH 4/4] xfs: convert xbitmap to interval tree Darrick J. Wong
2019-10-22 13:38   ` Brian Foster
2019-10-22 17:59     ` Darrick J. Wong

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.