All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Darrick J. Wong" <darrick.wong@oracle.com>
To: david@fromorbit.com, darrick.wong@oracle.com
Cc: linux-fsdevel@vger.kernel.org, vishal.l.verma@intel.com,
	bfoster@redhat.com, xfs@oss.sgi.com,
	Dave Chinner <dchinner@redhat.com>
Subject: [PATCH 30/47] xfs: add rmap btree operations
Date: Wed, 20 Jul 2016 21:59:23 -0700	[thread overview]
Message-ID: <146907716318.25461.166766599068910333.stgit@birch.djwong.org> (raw)
In-Reply-To: <146907695530.25461.3225785294902719773.stgit@birch.djwong.org>

From: Dave Chinner <dchinner@redhat.com>

Implement the generic btree operations needed to manipulate rmap
btree blocks. This is very similar to the per-ag freespace btree
implementation, and uses the AGFL for allocation and freeing of
blocks.

Adapt the rmap btree to store owner offsets within each rmap record,
and to handle the primary key being redefined as the tuple
[agblk, owner, offset].  The expansion of the primary key is crucial
to allowing multiple owners per extent.

v2: Don't double-invalidate a freed btree block, and fix a major
logic error in the inorder functions.

[darrick: adapt the btree ops to deal with offsets]
[darrick: remove init_rec_from_key]
[darrick: move unwritten bit to rm_offset]

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
---
 fs/xfs/libxfs/xfs_btree.h      |    1 
 fs/xfs/libxfs/xfs_rmap.c       |   96 ++++++++++++++
 fs/xfs/libxfs/xfs_rmap.h       |    9 +
 fs/xfs/libxfs/xfs_rmap_btree.c |  267 ++++++++++++++++++++++++++++++++++++++++
 fs/xfs/xfs_trace.h             |    3 
 5 files changed, 376 insertions(+)


diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 2a478b9..2695480 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -241,6 +241,7 @@ union xfs_btree_irec {
 	struct xfs_alloc_rec_incore	a;
 	struct xfs_bmbt_irec		b;
 	struct xfs_inobt_rec_incore	i;
+	struct xfs_rmap_irec		r;
 };
 
 /*
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index b522bfc..ce29d6b 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -36,6 +36,102 @@
 #include "xfs_error.h"
 #include "xfs_extent_busy.h"
 
+/*
+ * Lookup the first record less than or equal to [bno, len, owner, offset]
+ * in the btree given by cur.
+ */
+int
+xfs_rmap_lookup_le(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	xfs_extlen_t		len,
+	uint64_t		owner,
+	uint64_t		offset,
+	unsigned int		flags,
+	int			*stat)
+{
+	cur->bc_rec.r.rm_startblock = bno;
+	cur->bc_rec.r.rm_blockcount = len;
+	cur->bc_rec.r.rm_owner = owner;
+	cur->bc_rec.r.rm_offset = offset;
+	cur->bc_rec.r.rm_flags = flags;
+	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+}
+
+/*
+ * Lookup the record exactly matching [bno, len, owner, offset]
+ * in the btree given by cur.
+ */
+int
+xfs_rmap_lookup_eq(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	xfs_extlen_t		len,
+	uint64_t		owner,
+	uint64_t		offset,
+	unsigned int		flags,
+	int			*stat)
+{
+	cur->bc_rec.r.rm_startblock = bno;
+	cur->bc_rec.r.rm_blockcount = len;
+	cur->bc_rec.r.rm_owner = owner;
+	cur->bc_rec.r.rm_offset = offset;
+	cur->bc_rec.r.rm_flags = flags;
+	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+/*
+ * Update the record referred to by cur to the value given
+ * by [bno, len, owner, offset].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+STATIC int
+xfs_rmap_update(
+	struct xfs_btree_cur	*cur,
+	struct xfs_rmap_irec	*irec)
+{
+	union xfs_btree_rec	rec;
+
+	rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
+	rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount);
+	rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner);
+	rec.rmap.rm_offset = cpu_to_be64(
+			xfs_rmap_irec_offset_pack(irec));
+	return xfs_btree_update(cur, &rec);
+}
+
+static int
+xfs_rmap_btrec_to_irec(
+	union xfs_btree_rec	*rec,
+	struct xfs_rmap_irec	*irec)
+{
+	irec->rm_flags = 0;
+	irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
+	irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
+	irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
+	return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset),
+			irec);
+}
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int
+xfs_rmap_get_rec(
+	struct xfs_btree_cur	*cur,
+	struct xfs_rmap_irec	*irec,
+	int			*stat)
+{
+	union xfs_btree_rec	*rec;
+	int			error;
+
+	error = xfs_btree_get_rec(cur, &rec, stat);
+	if (error || !*stat)
+		return error;
+
+	return xfs_rmap_btrec_to_irec(rec, irec);
+}
+
 int
 xfs_rmap_free(
 	struct xfs_trans	*tp,
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index e7a6704..aa39a2a 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -142,4 +142,13 @@ int xfs_rmap_free(struct xfs_trans *tp, struct xfs_buf *agbp,
 		  xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
 		  struct xfs_owner_info *oinfo);
 
+int xfs_rmap_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+		xfs_extlen_t len, uint64_t owner, uint64_t offset,
+		unsigned int flags, int *stat);
+int xfs_rmap_lookup_eq(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+		xfs_extlen_t len, uint64_t owner, uint64_t offset,
+		unsigned int flags, int *stat);
+int xfs_rmap_get_rec(struct xfs_btree_cur *cur, struct xfs_rmap_irec *irec,
+		int *stat);
+
 #endif	/* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index a9ddc191..95cb964 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -29,12 +29,38 @@
 #include "xfs_trans.h"
 #include "xfs_alloc.h"
 #include "xfs_btree.h"
+#include "xfs_rmap.h"
 #include "xfs_rmap_btree.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
 #include "xfs_error.h"
 #include "xfs_extent_busy.h"
 
+/*
+ * Reverse map btree.
+ *
+ * This is a per-ag tree used to track the owner(s) of a given extent. With
+ * reflink it is possible for there to be multiple owners, which is a departure
+ * from classic XFS. Owner records for data extents are inserted when the
+ * extent is mapped and removed when an extent is unmapped.  Owner records for
+ * all other block types (i.e. metadata) are inserted when an extent is
+ * allocated and removed when an extent is freed. There can only be one owner
+ * of a metadata extent, usually an inode or some other metadata structure like
+ * an AG btree.
+ *
+ * The rmap btree is part of the free space management, so blocks for the tree
+ * are sourced from the agfl. Hence we need transaction reservation support for
+ * this tree so that the freelist is always large enough. This also impacts on
+ * the minimum space we need to leave free in the AG.
+ *
+ * The tree is ordered by [ag block, owner, offset]. This is a large key size,
+ * but it is the only way to enforce unique keys when a block can be owned by
+ * multiple files at any offset. There's no need to order/search by extent
+ * size for online updating/management of the tree. It is intended that most
+ * reverse lookups will be to find the owner(s) of a particular block, or to
+ * try to recover tree and file data from corrupt primary metadata.
+ */
+
 static struct xfs_btree_cur *
 xfs_rmapbt_dup_cursor(
 	struct xfs_btree_cur	*cur)
@@ -43,6 +69,172 @@ xfs_rmapbt_dup_cursor(
 			cur->bc_private.a.agbp, cur->bc_private.a.agno);
 }
 
+STATIC void
+xfs_rmapbt_set_root(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*ptr,
+	int			inc)
+{
+	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
+	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
+	int			btnum = cur->bc_btnum;
+	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
+
+	ASSERT(ptr->s != 0);
+
+	agf->agf_roots[btnum] = ptr->s;
+	be32_add_cpu(&agf->agf_levels[btnum], inc);
+	pag->pagf_levels[btnum] += inc;
+	xfs_perag_put(pag);
+
+	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
+}
+
+STATIC int
+xfs_rmapbt_alloc_block(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*start,
+	union xfs_btree_ptr	*new,
+	int			*stat)
+{
+	int			error;
+	xfs_agblock_t		bno;
+
+	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+
+	/* Allocate the new block from the freelist. If we can't, give up.  */
+	error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+				       &bno, 1);
+	if (error) {
+		XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+		return error;
+	}
+
+	trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
+			bno, 1);
+	if (bno == NULLAGBLOCK) {
+		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+		*stat = 0;
+		return 0;
+	}
+
+	xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
+			false);
+
+	xfs_trans_agbtree_delta(cur->bc_tp, 1);
+	new->s = cpu_to_be32(bno);
+
+	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+	*stat = 1;
+	return 0;
+}
+
+STATIC int
+xfs_rmapbt_free_block(
+	struct xfs_btree_cur	*cur,
+	struct xfs_buf		*bp)
+{
+	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
+	xfs_agblock_t		bno;
+	int			error;
+
+	bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
+	trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+			bno, 1);
+	error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
+	if (error)
+		return error;
+
+	xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+			      XFS_EXTENT_BUSY_SKIP_DISCARD);
+	xfs_trans_agbtree_delta(cur->bc_tp, -1);
+
+	return 0;
+}
+
+STATIC int
+xfs_rmapbt_get_minrecs(
+	struct xfs_btree_cur	*cur,
+	int			level)
+{
+	return cur->bc_mp->m_rmap_mnr[level != 0];
+}
+
+STATIC int
+xfs_rmapbt_get_maxrecs(
+	struct xfs_btree_cur	*cur,
+	int			level)
+{
+	return cur->bc_mp->m_rmap_mxr[level != 0];
+}
+
+STATIC void
+xfs_rmapbt_init_key_from_rec(
+	union xfs_btree_key	*key,
+	union xfs_btree_rec	*rec)
+{
+	key->rmap.rm_startblock = rec->rmap.rm_startblock;
+	key->rmap.rm_owner = rec->rmap.rm_owner;
+	key->rmap.rm_offset = rec->rmap.rm_offset;
+}
+
+STATIC void
+xfs_rmapbt_init_rec_from_cur(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_rec	*rec)
+{
+	rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
+	rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
+	rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
+	rec->rmap.rm_offset = cpu_to_be64(
+			xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
+}
+
+STATIC void
+xfs_rmapbt_init_ptr_from_cur(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*ptr)
+{
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+
+	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+	ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
+
+	ptr->s = agf->agf_roots[cur->bc_btnum];
+}
+
+STATIC __int64_t
+xfs_rmapbt_key_diff(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_key	*key)
+{
+	struct xfs_rmap_irec	*rec = &cur->bc_rec.r;
+	struct xfs_rmap_key	*kp = &key->rmap;
+	__u64			x, y;
+	__int64_t		d;
+
+	d = (__int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
+	if (d)
+		return d;
+
+	x = be64_to_cpu(kp->rm_owner);
+	y = rec->rm_owner;
+	if (x > y)
+		return 1;
+	else if (y > x)
+		return -1;
+
+	x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
+	y = rec->rm_offset;
+	if (x > y)
+		return 1;
+	else if (y > x)
+		return -1;
+	return 0;
+}
+
 static bool
 xfs_rmapbt_verify(
 	struct xfs_buf		*bp)
@@ -117,12 +309,87 @@ const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
 	.verify_write		= xfs_rmapbt_write_verify,
 };
 
+#if defined(DEBUG) || defined(XFS_WARN)
+STATIC int
+xfs_rmapbt_keys_inorder(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_key	*k1,
+	union xfs_btree_key	*k2)
+{
+	__uint32_t		x;
+	__uint32_t		y;
+	__uint64_t		a;
+	__uint64_t		b;
+
+	x = be32_to_cpu(k1->rmap.rm_startblock);
+	y = be32_to_cpu(k2->rmap.rm_startblock);
+	if (x < y)
+		return 1;
+	else if (x > y)
+		return 0;
+	a = be64_to_cpu(k1->rmap.rm_owner);
+	b = be64_to_cpu(k2->rmap.rm_owner);
+	if (a < b)
+		return 1;
+	else if (a > b)
+		return 0;
+	a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
+	b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
+	if (a <= b)
+		return 1;
+	return 0;
+}
+
+STATIC int
+xfs_rmapbt_recs_inorder(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_rec	*r1,
+	union xfs_btree_rec	*r2)
+{
+	__uint32_t		x;
+	__uint32_t		y;
+	__uint64_t		a;
+	__uint64_t		b;
+
+	x = be32_to_cpu(r1->rmap.rm_startblock);
+	y = be32_to_cpu(r2->rmap.rm_startblock);
+	if (x < y)
+		return 1;
+	else if (x > y)
+		return 0;
+	a = be64_to_cpu(r1->rmap.rm_owner);
+	b = be64_to_cpu(r2->rmap.rm_owner);
+	if (a < b)
+		return 1;
+	else if (a > b)
+		return 0;
+	a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
+	b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
+	if (a <= b)
+		return 1;
+	return 0;
+}
+#endif	/* DEBUG */
+
 static const struct xfs_btree_ops xfs_rmapbt_ops = {
 	.rec_len		= sizeof(struct xfs_rmap_rec),
 	.key_len		= 2 * sizeof(struct xfs_rmap_key),
 
 	.dup_cursor		= xfs_rmapbt_dup_cursor,
+	.set_root		= xfs_rmapbt_set_root,
+	.alloc_block		= xfs_rmapbt_alloc_block,
+	.free_block		= xfs_rmapbt_free_block,
+	.get_minrecs		= xfs_rmapbt_get_minrecs,
+	.get_maxrecs		= xfs_rmapbt_get_maxrecs,
+	.init_key_from_rec	= xfs_rmapbt_init_key_from_rec,
+	.init_rec_from_cur	= xfs_rmapbt_init_rec_from_cur,
+	.init_ptr_from_cur	= xfs_rmapbt_init_ptr_from_cur,
+	.key_diff		= xfs_rmapbt_key_diff,
 	.buf_ops		= &xfs_rmapbt_buf_ops,
+#if defined(DEBUG) || defined(XFS_WARN)
+	.keys_inorder		= xfs_rmapbt_keys_inorder,
+	.recs_inorder		= xfs_rmapbt_recs_inorder,
+#endif
 
 	.get_leaf_keys		= xfs_btree_get_leaf_keys_overlapped,
 	.get_node_keys		= xfs_btree_get_node_keys_overlapped,
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 4c3418b..e69912a 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -2502,6 +2502,9 @@ DEFINE_RMAP_EVENT(xfs_rmap_map);
 DEFINE_RMAP_EVENT(xfs_rmap_map_done);
 DEFINE_AG_ERROR_EVENT(xfs_rmap_map_error);
 
+DEFINE_BUSY_EVENT(xfs_rmapbt_alloc_block);
+DEFINE_BUSY_EVENT(xfs_rmapbt_free_block);
+
 #endif /* _TRACE_XFS_H */
 
 #undef TRACE_INCLUDE_PATH


WARNING: multiple messages have this Message-ID (diff)
From: "Darrick J. Wong" <darrick.wong@oracle.com>
To: david@fromorbit.com, darrick.wong@oracle.com
Cc: linux-fsdevel@vger.kernel.org, vishal.l.verma@intel.com,
	bfoster@redhat.com, Dave Chinner <dchinner@redhat.com>,
	xfs@oss.sgi.com
Subject: [PATCH 30/47] xfs: add rmap btree operations
Date: Wed, 20 Jul 2016 21:59:23 -0700	[thread overview]
Message-ID: <146907716318.25461.166766599068910333.stgit@birch.djwong.org> (raw)
In-Reply-To: <146907695530.25461.3225785294902719773.stgit@birch.djwong.org>

From: Dave Chinner <dchinner@redhat.com>

Implement the generic btree operations needed to manipulate rmap
btree blocks. This is very similar to the per-ag freespace btree
implementation, and uses the AGFL for allocation and freeing of
blocks.

Adapt the rmap btree to store owner offsets within each rmap record,
and to handle the primary key being redefined as the tuple
[agblk, owner, offset].  The expansion of the primary key is crucial
to allowing multiple owners per extent.

v2: Don't double-invalidate a freed btree block, and fix a major
logic error in the inorder functions.

[darrick: adapt the btree ops to deal with offsets]
[darrick: remove init_rec_from_key]
[darrick: move unwritten bit to rm_offset]

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
---
 fs/xfs/libxfs/xfs_btree.h      |    1 
 fs/xfs/libxfs/xfs_rmap.c       |   96 ++++++++++++++
 fs/xfs/libxfs/xfs_rmap.h       |    9 +
 fs/xfs/libxfs/xfs_rmap_btree.c |  267 ++++++++++++++++++++++++++++++++++++++++
 fs/xfs/xfs_trace.h             |    3 
 5 files changed, 376 insertions(+)


diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 2a478b9..2695480 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -241,6 +241,7 @@ union xfs_btree_irec {
 	struct xfs_alloc_rec_incore	a;
 	struct xfs_bmbt_irec		b;
 	struct xfs_inobt_rec_incore	i;
+	struct xfs_rmap_irec		r;
 };
 
 /*
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index b522bfc..ce29d6b 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -36,6 +36,102 @@
 #include "xfs_error.h"
 #include "xfs_extent_busy.h"
 
+/*
+ * Lookup the first record less than or equal to [bno, len, owner, offset]
+ * in the btree given by cur.
+ */
+int
+xfs_rmap_lookup_le(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	xfs_extlen_t		len,
+	uint64_t		owner,
+	uint64_t		offset,
+	unsigned int		flags,
+	int			*stat)
+{
+	cur->bc_rec.r.rm_startblock = bno;
+	cur->bc_rec.r.rm_blockcount = len;
+	cur->bc_rec.r.rm_owner = owner;
+	cur->bc_rec.r.rm_offset = offset;
+	cur->bc_rec.r.rm_flags = flags;
+	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+}
+
+/*
+ * Lookup the record exactly matching [bno, len, owner, offset]
+ * in the btree given by cur.
+ */
+int
+xfs_rmap_lookup_eq(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	xfs_extlen_t		len,
+	uint64_t		owner,
+	uint64_t		offset,
+	unsigned int		flags,
+	int			*stat)
+{
+	cur->bc_rec.r.rm_startblock = bno;
+	cur->bc_rec.r.rm_blockcount = len;
+	cur->bc_rec.r.rm_owner = owner;
+	cur->bc_rec.r.rm_offset = offset;
+	cur->bc_rec.r.rm_flags = flags;
+	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+/*
+ * Update the record referred to by cur to the value given
+ * by [bno, len, owner, offset].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+STATIC int
+xfs_rmap_update(
+	struct xfs_btree_cur	*cur,
+	struct xfs_rmap_irec	*irec)
+{
+	union xfs_btree_rec	rec;
+
+	rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
+	rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount);
+	rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner);
+	rec.rmap.rm_offset = cpu_to_be64(
+			xfs_rmap_irec_offset_pack(irec));
+	return xfs_btree_update(cur, &rec);
+}
+
+static int
+xfs_rmap_btrec_to_irec(
+	union xfs_btree_rec	*rec,
+	struct xfs_rmap_irec	*irec)
+{
+	irec->rm_flags = 0;
+	irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
+	irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
+	irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
+	return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset),
+			irec);
+}
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int
+xfs_rmap_get_rec(
+	struct xfs_btree_cur	*cur,
+	struct xfs_rmap_irec	*irec,
+	int			*stat)
+{
+	union xfs_btree_rec	*rec;
+	int			error;
+
+	error = xfs_btree_get_rec(cur, &rec, stat);
+	if (error || !*stat)
+		return error;
+
+	return xfs_rmap_btrec_to_irec(rec, irec);
+}
+
 int
 xfs_rmap_free(
 	struct xfs_trans	*tp,
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index e7a6704..aa39a2a 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -142,4 +142,13 @@ int xfs_rmap_free(struct xfs_trans *tp, struct xfs_buf *agbp,
 		  xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
 		  struct xfs_owner_info *oinfo);
 
+int xfs_rmap_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+		xfs_extlen_t len, uint64_t owner, uint64_t offset,
+		unsigned int flags, int *stat);
+int xfs_rmap_lookup_eq(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+		xfs_extlen_t len, uint64_t owner, uint64_t offset,
+		unsigned int flags, int *stat);
+int xfs_rmap_get_rec(struct xfs_btree_cur *cur, struct xfs_rmap_irec *irec,
+		int *stat);
+
 #endif	/* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index a9ddc191..95cb964 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -29,12 +29,38 @@
 #include "xfs_trans.h"
 #include "xfs_alloc.h"
 #include "xfs_btree.h"
+#include "xfs_rmap.h"
 #include "xfs_rmap_btree.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
 #include "xfs_error.h"
 #include "xfs_extent_busy.h"
 
+/*
+ * Reverse map btree.
+ *
+ * This is a per-ag tree used to track the owner(s) of a given extent. With
+ * reflink it is possible for there to be multiple owners, which is a departure
+ * from classic XFS. Owner records for data extents are inserted when the
+ * extent is mapped and removed when an extent is unmapped.  Owner records for
+ * all other block types (i.e. metadata) are inserted when an extent is
+ * allocated and removed when an extent is freed. There can only be one owner
+ * of a metadata extent, usually an inode or some other metadata structure like
+ * an AG btree.
+ *
+ * The rmap btree is part of the free space management, so blocks for the tree
+ * are sourced from the agfl. Hence we need transaction reservation support for
+ * this tree so that the freelist is always large enough. This also impacts on
+ * the minimum space we need to leave free in the AG.
+ *
+ * The tree is ordered by [ag block, owner, offset]. This is a large key size,
+ * but it is the only way to enforce unique keys when a block can be owned by
+ * multiple files at any offset. There's no need to order/search by extent
+ * size for online updating/management of the tree. It is intended that most
+ * reverse lookups will be to find the owner(s) of a particular block, or to
+ * try to recover tree and file data from corrupt primary metadata.
+ */
+
 static struct xfs_btree_cur *
 xfs_rmapbt_dup_cursor(
 	struct xfs_btree_cur	*cur)
@@ -43,6 +69,172 @@ xfs_rmapbt_dup_cursor(
 			cur->bc_private.a.agbp, cur->bc_private.a.agno);
 }
 
+STATIC void
+xfs_rmapbt_set_root(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*ptr,
+	int			inc)
+{
+	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
+	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
+	int			btnum = cur->bc_btnum;
+	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
+
+	ASSERT(ptr->s != 0);
+
+	agf->agf_roots[btnum] = ptr->s;
+	be32_add_cpu(&agf->agf_levels[btnum], inc);
+	pag->pagf_levels[btnum] += inc;
+	xfs_perag_put(pag);
+
+	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
+}
+
+STATIC int
+xfs_rmapbt_alloc_block(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*start,
+	union xfs_btree_ptr	*new,
+	int			*stat)
+{
+	int			error;
+	xfs_agblock_t		bno;
+
+	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+
+	/* Allocate the new block from the freelist. If we can't, give up.  */
+	error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+				       &bno, 1);
+	if (error) {
+		XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+		return error;
+	}
+
+	trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
+			bno, 1);
+	if (bno == NULLAGBLOCK) {
+		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+		*stat = 0;
+		return 0;
+	}
+
+	xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
+			false);
+
+	xfs_trans_agbtree_delta(cur->bc_tp, 1);
+	new->s = cpu_to_be32(bno);
+
+	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+	*stat = 1;
+	return 0;
+}
+
+STATIC int
+xfs_rmapbt_free_block(
+	struct xfs_btree_cur	*cur,
+	struct xfs_buf		*bp)
+{
+	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
+	xfs_agblock_t		bno;
+	int			error;
+
+	bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
+	trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+			bno, 1);
+	error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
+	if (error)
+		return error;
+
+	xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+			      XFS_EXTENT_BUSY_SKIP_DISCARD);
+	xfs_trans_agbtree_delta(cur->bc_tp, -1);
+
+	return 0;
+}
+
+STATIC int
+xfs_rmapbt_get_minrecs(
+	struct xfs_btree_cur	*cur,
+	int			level)
+{
+	return cur->bc_mp->m_rmap_mnr[level != 0];
+}
+
+STATIC int
+xfs_rmapbt_get_maxrecs(
+	struct xfs_btree_cur	*cur,
+	int			level)
+{
+	return cur->bc_mp->m_rmap_mxr[level != 0];
+}
+
+STATIC void
+xfs_rmapbt_init_key_from_rec(
+	union xfs_btree_key	*key,
+	union xfs_btree_rec	*rec)
+{
+	key->rmap.rm_startblock = rec->rmap.rm_startblock;
+	key->rmap.rm_owner = rec->rmap.rm_owner;
+	key->rmap.rm_offset = rec->rmap.rm_offset;
+}
+
+STATIC void
+xfs_rmapbt_init_rec_from_cur(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_rec	*rec)
+{
+	rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
+	rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
+	rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
+	rec->rmap.rm_offset = cpu_to_be64(
+			xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
+}
+
+STATIC void
+xfs_rmapbt_init_ptr_from_cur(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*ptr)
+{
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+
+	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+	ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
+
+	ptr->s = agf->agf_roots[cur->bc_btnum];
+}
+
+STATIC __int64_t
+xfs_rmapbt_key_diff(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_key	*key)
+{
+	struct xfs_rmap_irec	*rec = &cur->bc_rec.r;
+	struct xfs_rmap_key	*kp = &key->rmap;
+	__u64			x, y;
+	__int64_t		d;
+
+	d = (__int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
+	if (d)
+		return d;
+
+	x = be64_to_cpu(kp->rm_owner);
+	y = rec->rm_owner;
+	if (x > y)
+		return 1;
+	else if (y > x)
+		return -1;
+
+	x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
+	y = rec->rm_offset;
+	if (x > y)
+		return 1;
+	else if (y > x)
+		return -1;
+	return 0;
+}
+
 static bool
 xfs_rmapbt_verify(
 	struct xfs_buf		*bp)
@@ -117,12 +309,87 @@ const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
 	.verify_write		= xfs_rmapbt_write_verify,
 };
 
+#if defined(DEBUG) || defined(XFS_WARN)
+STATIC int
+xfs_rmapbt_keys_inorder(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_key	*k1,
+	union xfs_btree_key	*k2)
+{
+	__uint32_t		x;
+	__uint32_t		y;
+	__uint64_t		a;
+	__uint64_t		b;
+
+	x = be32_to_cpu(k1->rmap.rm_startblock);
+	y = be32_to_cpu(k2->rmap.rm_startblock);
+	if (x < y)
+		return 1;
+	else if (x > y)
+		return 0;
+	a = be64_to_cpu(k1->rmap.rm_owner);
+	b = be64_to_cpu(k2->rmap.rm_owner);
+	if (a < b)
+		return 1;
+	else if (a > b)
+		return 0;
+	a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
+	b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
+	if (a <= b)
+		return 1;
+	return 0;
+}
+
+STATIC int
+xfs_rmapbt_recs_inorder(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_rec	*r1,
+	union xfs_btree_rec	*r2)
+{
+	__uint32_t		x;
+	__uint32_t		y;
+	__uint64_t		a;
+	__uint64_t		b;
+
+	x = be32_to_cpu(r1->rmap.rm_startblock);
+	y = be32_to_cpu(r2->rmap.rm_startblock);
+	if (x < y)
+		return 1;
+	else if (x > y)
+		return 0;
+	a = be64_to_cpu(r1->rmap.rm_owner);
+	b = be64_to_cpu(r2->rmap.rm_owner);
+	if (a < b)
+		return 1;
+	else if (a > b)
+		return 0;
+	a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
+	b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
+	if (a <= b)
+		return 1;
+	return 0;
+}
+#endif	/* DEBUG */
+
 static const struct xfs_btree_ops xfs_rmapbt_ops = {
 	.rec_len		= sizeof(struct xfs_rmap_rec),
 	.key_len		= 2 * sizeof(struct xfs_rmap_key),
 
 	.dup_cursor		= xfs_rmapbt_dup_cursor,
+	.set_root		= xfs_rmapbt_set_root,
+	.alloc_block		= xfs_rmapbt_alloc_block,
+	.free_block		= xfs_rmapbt_free_block,
+	.get_minrecs		= xfs_rmapbt_get_minrecs,
+	.get_maxrecs		= xfs_rmapbt_get_maxrecs,
+	.init_key_from_rec	= xfs_rmapbt_init_key_from_rec,
+	.init_rec_from_cur	= xfs_rmapbt_init_rec_from_cur,
+	.init_ptr_from_cur	= xfs_rmapbt_init_ptr_from_cur,
+	.key_diff		= xfs_rmapbt_key_diff,
 	.buf_ops		= &xfs_rmapbt_buf_ops,
+#if defined(DEBUG) || defined(XFS_WARN)
+	.keys_inorder		= xfs_rmapbt_keys_inorder,
+	.recs_inorder		= xfs_rmapbt_recs_inorder,
+#endif
 
 	.get_leaf_keys		= xfs_btree_get_leaf_keys_overlapped,
 	.get_node_keys		= xfs_btree_get_node_keys_overlapped,
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 4c3418b..e69912a 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -2502,6 +2502,9 @@ DEFINE_RMAP_EVENT(xfs_rmap_map);
 DEFINE_RMAP_EVENT(xfs_rmap_map_done);
 DEFINE_AG_ERROR_EVENT(xfs_rmap_map_error);
 
+DEFINE_BUSY_EVENT(xfs_rmapbt_alloc_block);
+DEFINE_BUSY_EVENT(xfs_rmapbt_free_block);
+
 #endif /* _TRACE_XFS_H */
 
 #undef TRACE_INCLUDE_PATH

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

  parent reply	other threads:[~2016-07-21  4:59 UTC|newest]

Thread overview: 241+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-21  4:55 [PATCH v7 00/47] xfs: add reverse mapping support Darrick J. Wong
2016-07-21  4:55 ` Darrick J. Wong
2016-07-21  4:56 ` [PATCH 01/47] vfs: fix return type of ioctl_file_dedupe_range Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-08-01  6:33   ` Christoph Hellwig
2016-08-01  6:33     ` Christoph Hellwig
2016-07-21  4:56 ` [PATCH 02/47] vfs: support FS_XFLAG_REFLINK and FS_XFLAG_COWEXTSIZE Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-08-01  6:33   ` Christoph Hellwig
2016-08-01  6:33     ` Christoph Hellwig
2016-07-21  4:56 ` [PATCH 03/47] xfs: fix attr shortform structure alignment on cris Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-07-26 16:36   ` Brian Foster
2016-07-26 16:36     ` Brian Foster
2016-08-01  6:34   ` Christoph Hellwig
2016-08-01  6:34     ` Christoph Hellwig
2016-07-21  4:56 ` [PATCH 04/47] xfs: fix locking of the rt bitmap/summary inodes Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-07-26 16:36   ` Brian Foster
2016-07-26 16:36     ` Brian Foster
2016-07-28 18:58     ` Darrick J. Wong
2016-07-28 18:58       ` Darrick J. Wong
2016-08-01  6:34   ` Christoph Hellwig
2016-08-01  6:34     ` Christoph Hellwig
2016-07-21  4:56 ` [PATCH 05/47] xfs: set *stat=1 after iroot realloc Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-07-26 16:36   ` Brian Foster
2016-07-26 16:36     ` Brian Foster
2016-08-01  6:35   ` Christoph Hellwig
2016-08-01  6:35     ` Christoph Hellwig
2016-07-21  4:56 ` [PATCH 06/47] xfs: during btree split, save new block key & ptr for future insertion Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-07-26 16:36   ` Brian Foster
2016-07-26 16:36     ` Brian Foster
2016-08-01  6:37   ` Christoph Hellwig
2016-08-01  6:37     ` Christoph Hellwig
2016-07-21  4:56 ` [PATCH 07/47] xfs: add function pointers for get/update keys to the btree Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-07-26 19:09   ` Brian Foster
2016-07-26 19:09     ` Brian Foster
2016-07-28 19:13     ` Darrick J. Wong
2016-07-28 19:13       ` Darrick J. Wong
2016-07-28 19:46   ` [PATCH v2 " Darrick J. Wong
2016-07-28 19:46     ` Darrick J. Wong
2016-08-01 15:57     ` Brian Foster
2016-08-01 15:57       ` Brian Foster
2016-08-01 17:54       ` Darrick J. Wong
2016-08-01 17:54         ` Darrick J. Wong
2016-08-01  6:39   ` [PATCH " Christoph Hellwig
2016-08-01  6:39     ` Christoph Hellwig
2016-08-01 17:33     ` Darrick J. Wong
2016-08-01 17:33       ` Darrick J. Wong
2016-08-02 12:23       ` Christoph Hellwig
2016-08-02 12:23         ` Christoph Hellwig
2016-08-03  0:12         ` Darrick J. Wong
2016-08-03  0:12           ` Darrick J. Wong
2016-07-21  4:56 ` [PATCH 08/47] xfs: support btrees with overlapping intervals for keys Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-08-01  6:48   ` Christoph Hellwig
2016-08-01  6:48     ` Christoph Hellwig
2016-08-01 19:11     ` Darrick J. Wong
2016-08-01 19:11       ` Darrick J. Wong
2016-08-02 12:03       ` Christoph Hellwig
2016-08-02 12:03         ` Christoph Hellwig
2016-08-03  3:29         ` Darrick J. Wong
2016-08-03  3:29           ` Darrick J. Wong
2016-08-02 14:04       ` Brian Foster
2016-08-02 14:04         ` Brian Foster
2016-08-03  1:06         ` Dave Chinner
2016-08-03  1:06           ` Dave Chinner
2016-08-01 17:47   ` Brian Foster
2016-08-01 17:47     ` Brian Foster
2016-08-01 19:18     ` Darrick J. Wong
2016-08-01 19:18       ` Darrick J. Wong
2016-07-21  4:56 ` [PATCH 09/47] xfs: introduce interval queries on btrees Darrick J. Wong
2016-07-21  4:56   ` Darrick J. Wong
2016-08-01  8:00   ` Christoph Hellwig
2016-08-01  8:00     ` Christoph Hellwig
2016-07-21  4:57 ` [PATCH 10/47] xfs: refactor btree owner change into a separate visit-blocks function Darrick J. Wong
2016-07-21  4:57   ` Darrick J. Wong
2016-08-01  6:50   ` Christoph Hellwig
2016-08-01  6:50     ` Christoph Hellwig
2016-07-21  4:57 ` [PATCH 11/47] xfs: move deferred operations into a separate file Darrick J. Wong
2016-07-21  4:57   ` Darrick J. Wong
2016-08-01  7:08   ` Christoph Hellwig
2016-08-01  7:08     ` Christoph Hellwig
2016-08-01  8:02   ` Christoph Hellwig
2016-08-01  8:02     ` Christoph Hellwig
2016-08-02 22:39     ` Dave Chinner
2016-08-02 22:39       ` Dave Chinner
2016-08-03  9:16       ` Christoph Hellwig
2016-08-03  9:16         ` Christoph Hellwig
2016-08-03 22:57         ` Dave Chinner
2016-08-03 22:57           ` Dave Chinner
2016-08-04 16:00           ` Christoph Hellwig
2016-08-04 16:00             ` Christoph Hellwig
2016-08-04 23:44             ` Dave Chinner
2016-08-04 23:44               ` Dave Chinner
2016-08-02 17:30   ` Brian Foster
2016-08-02 17:30     ` Brian Foster
2016-07-21  4:57 ` [PATCH 12/47] xfs: add tracepoints for the deferred ops mechanism Darrick J. Wong
2016-07-21  4:57   ` Darrick J. Wong
2016-07-21  4:57 ` [PATCH 13/47] xfs: clean up typedef usage in the EFI/EFD handling code Darrick J. Wong
2016-07-21  4:57   ` Darrick J. Wong
2016-08-01  7:09   ` Christoph Hellwig
2016-08-01  7:09     ` Christoph Hellwig
2016-07-21  4:57 ` [PATCH 14/47] xfs: enable the xfs_defer mechanism to process extents to free Darrick J. Wong
2016-07-21  4:57   ` Darrick J. Wong
2016-08-01  7:09   ` Christoph Hellwig
2016-08-02 17:30   ` Brian Foster
2016-08-02 17:30     ` Brian Foster
2016-07-21  4:57 ` [PATCH 15/47] xfs: rework xfs_bmap_free callers to use xfs_defer_ops Darrick J. Wong
2016-07-21  4:57   ` Darrick J. Wong
2016-08-02 17:30   ` Brian Foster
2016-08-02 17:30     ` Brian Foster
2016-07-21  4:57 ` [PATCH 16/47] xfs: change xfs_bmap_{finish, cancel, init, free} -> xfs_defer_* Darrick J. Wong
2016-07-21  4:57   ` Darrick J. Wong
2016-08-02 17:30   ` Brian Foster
2016-08-02 17:30     ` Brian Foster
2016-08-02 20:47     ` Darrick J. Wong
2016-08-02 20:47       ` Darrick J. Wong
2016-07-21  4:57 ` [PATCH 17/47] xfs: rename flist/free_list to dfops Darrick J. Wong
2016-07-21  4:57   ` Darrick J. Wong
2016-08-02 17:30   ` Brian Foster
2016-08-02 17:30     ` Brian Foster
2016-07-21  4:58 ` [PATCH 18/47] xfs: refactor redo intent item processing Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-08-01  8:10   ` Christoph Hellwig
2016-08-01  8:10     ` Christoph Hellwig
2016-08-02 20:35     ` Darrick J. Wong
2016-08-02 20:35       ` Darrick J. Wong
2016-08-02 18:47   ` Brian Foster
2016-08-02 18:47     ` Brian Foster
2016-07-21  4:58 ` [PATCH 19/47] xfs: add tracepoints and error injection for deferred extent freeing Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-08-02 18:48   ` Brian Foster
2016-08-02 18:48     ` Brian Foster
2016-08-02 20:24     ` Darrick J. Wong
2016-08-02 20:24       ` Darrick J. Wong
2016-08-02 21:38       ` Brian Foster
2016-08-02 21:38         ` Brian Foster
2016-08-02 22:43         ` Darrick J. Wong
2016-08-02 22:43           ` Darrick J. Wong
2016-07-21  4:58 ` [PATCH 20/47] xfs: increase XFS_BTREE_MAXLEVELS to fit the rmapbt Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-08-02 18:48   ` Brian Foster
2016-08-02 18:48     ` Brian Foster
2016-08-02 20:06     ` Darrick J. Wong
2016-08-02 20:06       ` Darrick J. Wong
2016-08-02 21:38       ` Brian Foster
2016-08-02 21:38         ` Brian Foster
2016-07-21  4:58 ` [PATCH 21/47] xfs: introduce rmap btree definitions Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-07-21  4:58 ` [PATCH 22/47] xfs: add rmap btree stats infrastructure Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-07-21  4:58 ` [PATCH 23/47] xfs: rmap btree add more reserved blocks Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-07-21  4:58 ` [PATCH 24/47] xfs: add owner field to extent allocation and freeing Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-07-21  4:58 ` [PATCH 25/47] xfs: introduce rmap extent operation stubs Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-07-21  4:58 ` [PATCH 26/47] xfs: define the on-disk rmap btree format Darrick J. Wong
2016-07-21  4:58   ` Darrick J. Wong
2016-07-21  4:59 ` [PATCH 27/47] xfs: add rmap btree growfs support Darrick J. Wong
2016-07-21  4:59   ` Darrick J. Wong
2016-07-21  4:59 ` [PATCH 28/47] xfs: rmap btree transaction reservations Darrick J. Wong
2016-07-21  4:59   ` Darrick J. Wong
2016-07-21  4:59 ` [PATCH 29/47] xfs: rmap btree requires more reserved free space Darrick J. Wong
2016-07-21  4:59   ` Darrick J. Wong
2016-07-21  4:59 ` Darrick J. Wong [this message]
2016-07-21  4:59   ` [PATCH 30/47] xfs: add rmap btree operations Darrick J. Wong
2016-07-21  4:59 ` [PATCH 31/47] xfs: support overlapping intervals in the rmap btree Darrick J. Wong
2016-07-21  4:59   ` Darrick J. Wong
2016-07-21  4:59 ` [PATCH 32/47] xfs: teach rmapbt to support interval queries Darrick J. Wong
2016-07-21  4:59   ` Darrick J. Wong
2016-07-21  4:59 ` [PATCH 33/47] xfs: add tracepoints for the rmap functions Darrick J. Wong
2016-07-21  4:59   ` Darrick J. Wong
2016-07-21  4:59 ` [PATCH 34/47] xfs: add an extent to the rmap btree Darrick J. Wong
2016-07-21  4:59   ` Darrick J. Wong
2016-07-21  4:59 ` [PATCH 35/47] xfs: remove an extent from " Darrick J. Wong
2016-07-21  4:59   ` Darrick J. Wong
2016-07-21  5:00 ` [PATCH 36/47] xfs: convert unwritten status of reverse mappings Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-08-03  2:00   ` Dave Chinner
2016-08-03  2:00     ` Dave Chinner
2016-07-21  5:00 ` [PATCH 37/47] xfs: add rmap btree insert and delete helpers Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-07-21  5:00 ` [PATCH 38/47] xfs: create rmap update intent log items Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-08-01  7:12   ` Christoph Hellwig
2016-08-01  7:12     ` Christoph Hellwig
2016-08-01 18:08     ` Darrick J. Wong
2016-08-01 18:08       ` Darrick J. Wong
2016-07-21  5:00 ` [PATCH 39/47] xfs: log rmap intent items Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-07-21  5:00 ` [PATCH 40/47] xfs: enable the xfs_defer mechanism to process rmaps to update Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-07-21  5:00 ` [PATCH 41/47] xfs: propagate bmap updates to rmapbt Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-07-21  5:00 ` [PATCH 42/47] xfs: add rmap btree geometry feature flag Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-07-21  5:00 ` [PATCH 43/47] xfs: add rmap btree block detection to log recovery Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-07-21  5:00 ` [PATCH 44/47] xfs: disable XFS_IOC_SWAPEXT when rmap btree is enabled Darrick J. Wong
2016-07-21  5:00   ` Darrick J. Wong
2016-07-21  5:01 ` [PATCH 45/47] xfs: don't update rmapbt when fixing agfl Darrick J. Wong
2016-07-21  5:01   ` Darrick J. Wong
2016-07-21  5:01 ` [PATCH 46/47] xfs: enable the rmap btree functionality Darrick J. Wong
2016-07-21  5:01   ` Darrick J. Wong
2016-07-21  5:01 ` [PATCH 47/47] xfs: introduce the XFS_IOC_GETFSMAP ioctl Darrick J. Wong
2016-07-21  5:01   ` Darrick J. Wong
2016-07-23  4:28   ` [PATCH v2 " Darrick J. Wong
2016-07-23  4:28     ` Darrick J. Wong
2016-08-03 19:45 ` [PATCH v7 00/47] xfs: add reverse mapping support Mark Fasheh
2016-08-03 19:45   ` Mark Fasheh
2016-08-03 20:55   ` Darrick J. Wong
2016-08-03 20:55     ` Darrick J. Wong
2016-08-04  0:58     ` Darrick J. Wong
2016-08-04  0:58       ` Darrick J. Wong
2016-08-04  2:18       ` Mark Fasheh
2016-08-04  2:18         ` Mark Fasheh
2016-08-04 15:48         ` Darrick J. Wong
2016-08-04 15:48           ` Darrick J. Wong
2016-08-04 23:50           ` Dave Chinner
2016-08-04 23:50             ` Dave Chinner
2016-08-05  0:49             ` Darrick J. Wong
2016-08-05  0:49               ` Darrick J. Wong
2016-08-05  7:01             ` Artem Bityutskiy
2016-08-05  7:01               ` Artem Bityutskiy
2016-08-05  7:22               ` Darrick J. Wong
2016-08-05  7:22                 ` Darrick J. Wong
2016-08-05 10:49               ` Dave Chinner
2016-08-05 10:49                 ` Dave Chinner
2016-08-05 11:57                 ` Artem Bityutskiy
2016-08-05 11:57                   ` Artem Bityutskiy
2016-08-05 22:26                   ` Dave Chinner
2016-08-05 22:26                     ` Dave Chinner
2016-08-05 18:36             ` Mark Fasheh
2016-08-05 18:36               ` Mark Fasheh
2016-08-05 22:39               ` Dave Chinner
2016-08-05 22:39                 ` Dave Chinner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=146907716318.25461.166766599068910333.stgit@birch.djwong.org \
    --to=darrick.wong@oracle.com \
    --cc=bfoster@redhat.com \
    --cc=david@fromorbit.com \
    --cc=dchinner@redhat.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=vishal.l.verma@intel.com \
    --cc=xfs@oss.sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.