linux-xfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] xfs: fix unecessary percpu counter overhead
@ 2020-05-19 21:48 Dave Chinner
  2020-05-19 21:48 ` [PATCH 1/2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb() Dave Chinner
  2020-05-19 21:48 ` [PATCH 2/2] xfs: reduce free inode accounting overhead Dave Chinner
  0 siblings, 2 replies; 13+ messages in thread
From: Dave Chinner @ 2020-05-19 21:48 UTC (permalink / raw)
  To: linux-xfs

Hi folks,

This is a resend of a patch from months ago that can be found here:

https://lore.kernel.org/linux-xfs/20191121004437.9633-1-david@fromorbit.com/

I've split it into two patches and cleaned it up further and
retested it, and all is good now.

Essentially it solves the problem of production systems taking
percpu_counter_sum() overhead in a hot path when the sum is only
used for debug purposes and not actually compiled in to production
kernels. As a further cleanup of this code, the error handling
never returns errors at all to the caller, so it's only
for debug purposes. Given that the error handling logic is wrong and
we throw it away on debug kernels anyway, just get rid of all of it.

Cheers,

Dave.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-19 21:48 [PATCH 0/2] xfs: fix unecessary percpu counter overhead Dave Chinner
@ 2020-05-19 21:48 ` Dave Chinner
  2020-05-20  6:53   ` Christoph Hellwig
  2020-05-20  7:33   ` [PATCH 1/2 V2] " Dave Chinner
  2020-05-19 21:48 ` [PATCH 2/2] xfs: reduce free inode accounting overhead Dave Chinner
  1 sibling, 2 replies; 13+ messages in thread
From: Dave Chinner @ 2020-05-19 21:48 UTC (permalink / raw)
  To: linux-xfs

From: Dave Chinner <dchinner@redhat.com>

The error handling in xfs_trans_unreserve_and_mod_sb() is largely
incorrect - rolling back the changes in the transaction if only one
counter underruns makes all the other counters incorrect. We still
allow the change to proceed and committing the transaction, except
now we have multiple incorrect counters instead of a single
underflow.

Further, we don't actually report the error to the caller, so this
is completely silent except on debug kernels that will assert on
failure before we even get to the rollback code.  Hence this error
handling is broken, untested, and largely unnecessary complexity.

Just remove it.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_trans.c | 170 +++++++--------------------------------------
 1 file changed, 27 insertions(+), 143 deletions(-)

diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 28b983ff8b113..4522ceaaf57ba 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -534,57 +534,9 @@ xfs_trans_apply_sb_deltas(
 				  sizeof(sbp->sb_frextents) - 1);
 }
 
-STATIC int
-xfs_sb_mod8(
-	uint8_t			*field,
-	int8_t			delta)
-{
-	int8_t			counter = *field;
-
-	counter += delta;
-	if (counter < 0) {
-		ASSERT(0);
-		return -EINVAL;
-	}
-	*field = counter;
-	return 0;
-}
-
-STATIC int
-xfs_sb_mod32(
-	uint32_t		*field,
-	int32_t			delta)
-{
-	int32_t			counter = *field;
-
-	counter += delta;
-	if (counter < 0) {
-		ASSERT(0);
-		return -EINVAL;
-	}
-	*field = counter;
-	return 0;
-}
-
-STATIC int
-xfs_sb_mod64(
-	uint64_t		*field,
-	int64_t			delta)
-{
-	int64_t			counter = *field;
-
-	counter += delta;
-	if (counter < 0) {
-		ASSERT(0);
-		return -EINVAL;
-	}
-	*field = counter;
-	return 0;
-}
-
 /*
- * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
- * and apply superblock counter changes to the in-core superblock.  The
+ * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
+ * apply superblock counter changes to the in-core superblock.  The
  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
  * applied to the in-core superblock.  The idea is that that has already been
  * done.
@@ -629,20 +581,17 @@ xfs_trans_unreserve_and_mod_sb(
 	/* apply the per-cpu counters */
 	if (blkdelta) {
 		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
-		if (error)
-			goto out;
+		ASSERT(!error);
 	}
 
 	if (idelta) {
 		error = xfs_mod_icount(mp, idelta);
-		if (error)
-			goto out_undo_fdblocks;
+		ASSERT(!error);
 	}
 
 	if (ifreedelta) {
 		error = xfs_mod_ifree(mp, ifreedelta);
-		if (error)
-			goto out_undo_icount;
+		ASSERT(!error);
 	}
 
 	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
@@ -650,95 +599,30 @@ xfs_trans_unreserve_and_mod_sb(
 
 	/* apply remaining deltas */
 	spin_lock(&mp->m_sb_lock);
-	if (rtxdelta) {
-		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
-		if (error)
-			goto out_undo_ifree;
-	}
-
-	if (tp->t_dblocks_delta != 0) {
-		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
-		if (error)
-			goto out_undo_frextents;
-	}
-	if (tp->t_agcount_delta != 0) {
-		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
-		if (error)
-			goto out_undo_dblocks;
-	}
-	if (tp->t_imaxpct_delta != 0) {
-		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
-		if (error)
-			goto out_undo_agcount;
-	}
-	if (tp->t_rextsize_delta != 0) {
-		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
-				     tp->t_rextsize_delta);
-		if (error)
-			goto out_undo_imaxpct;
-	}
-	if (tp->t_rbmblocks_delta != 0) {
-		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
-				     tp->t_rbmblocks_delta);
-		if (error)
-			goto out_undo_rextsize;
-	}
-	if (tp->t_rblocks_delta != 0) {
-		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
-		if (error)
-			goto out_undo_rbmblocks;
-	}
-	if (tp->t_rextents_delta != 0) {
-		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
-				     tp->t_rextents_delta);
-		if (error)
-			goto out_undo_rblocks;
-	}
-	if (tp->t_rextslog_delta != 0) {
-		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
-				     tp->t_rextslog_delta);
-		if (error)
-			goto out_undo_rextents;
-	}
+	mp->m_sb.sb_frextents += rtxdelta;
+	mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
+	mp->m_sb.sb_agcount += tp->t_agcount_delta;
+	mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
+	mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
+	mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
+	mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
+	mp->m_sb.sb_rextents += tp->t_rextents_delta;
+	mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
 	spin_unlock(&mp->m_sb_lock);
-	return;
 
-out_undo_rextents:
-	if (tp->t_rextents_delta)
-		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
-out_undo_rblocks:
-	if (tp->t_rblocks_delta)
-		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
-out_undo_rbmblocks:
-	if (tp->t_rbmblocks_delta)
-		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
-out_undo_rextsize:
-	if (tp->t_rextsize_delta)
-		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
-out_undo_imaxpct:
-	if (tp->t_rextsize_delta)
-		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
-out_undo_agcount:
-	if (tp->t_agcount_delta)
-		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
-out_undo_dblocks:
-	if (tp->t_dblocks_delta)
-		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
-out_undo_frextents:
-	if (rtxdelta)
-		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
-out_undo_ifree:
-	spin_unlock(&mp->m_sb_lock);
-	if (ifreedelta)
-		xfs_mod_ifree(mp, -ifreedelta);
-out_undo_icount:
-	if (idelta)
-		xfs_mod_icount(mp, -idelta);
-out_undo_fdblocks:
-	if (blkdelta)
-		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
-out:
-	ASSERT(error == 0);
+	/*
+	 * Debug checks outside of the spinlock so they don't lock up the
+	 * machine if they fail.
+	 */
+	ASSERT(&mp->m_sb.sb_frextents >= 0);
+	ASSERT(&mp->m_sb.sb_dblocks >= 0);
+	ASSERT(&mp->m_sb.sb_agcount >= 0);
+	ASSERT(&mp->m_sb.sb_imax_pct >= 0);
+	ASSERT(&mp->m_sb.sb_rextsize >= 0);
+	ASSERT(&mp->m_sb.sb_rbmblocks >= 0);
+	ASSERT(&mp->m_sb.sb_rblocks >= 0);
+	ASSERT(&mp->m_sb.sb_rextents >= 0);
+	ASSERT(&mp->m_sb.sb_rextslog >= 0);
 	return;
 }
 
-- 
2.26.2.761.g0e0b3e54be


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/2] xfs: reduce free inode accounting overhead
  2020-05-19 21:48 [PATCH 0/2] xfs: fix unecessary percpu counter overhead Dave Chinner
  2020-05-19 21:48 ` [PATCH 1/2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb() Dave Chinner
@ 2020-05-19 21:48 ` Dave Chinner
  2020-05-20  6:56   ` Christoph Hellwig
  2020-05-20 20:43   ` Darrick J. Wong
  1 sibling, 2 replies; 13+ messages in thread
From: Dave Chinner @ 2020-05-19 21:48 UTC (permalink / raw)
  To: linux-xfs

From: Dave Chinner <dchinner@redhat.com>

Shaokun Zhang reported that XFs was using substantial CPU time in
percpu_count_sum() when running a single threaded benchmark on
a high CPU count (128p) machine from xfs_mod_ifree(). The issue
is that the filesystem is empty when the benchmark runs, so inode
allocation is running with a very low inode free count.

With the percpu counter batching, this means comparisons when the
counter is less that 128 * 256 = 32768 use the slow path of adding
up all the counters across the CPUs, and this is expensive on high
CPU count machines.

The summing in xfs_mod_ifree() is only used to fire an assert if an
underrun occurs. The error is ignored by the higher level code.
Hence this is really just debug code and we don't need to run it
on production kernels, nor do we need such debug checks to return
error values just to trigger an assert.

Finally, xfs_mod_icount/xfs_mod_ifree are only called from
xfs_trans_unreserve_and_mod_sb(), so get rid of them and just
directly call the percpu_counter_add/percpu_counter_compare
functions. The compare functions are now run only on debug builds as
they are internal to ASSERT() checks and so only compiled in when
ASSERTs are active (CONFIG_XFS_DEBUG=y or CONFIG_XFS_WARN=y).

Reported-by: Shaokun Zhang <zhangshaokun@hisilicon.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_mount.c | 33 ---------------------------------
 fs/xfs/xfs_mount.h |  2 --
 fs/xfs/xfs_trans.c | 17 +++++++++++++----
 3 files changed, 13 insertions(+), 39 deletions(-)

diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index bb91f04266b9a..d5dcf98698600 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1189,39 +1189,6 @@ xfs_log_sbcount(xfs_mount_t *mp)
 	return xfs_sync_sb(mp, true);
 }
 
-/*
- * Deltas for the inode count are +/-64, hence we use a large batch size
- * of 128 so we don't need to take the counter lock on every update.
- */
-#define XFS_ICOUNT_BATCH	128
-int
-xfs_mod_icount(
-	struct xfs_mount	*mp,
-	int64_t			delta)
-{
-	percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
-	if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
-		ASSERT(0);
-		percpu_counter_add(&mp->m_icount, -delta);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-int
-xfs_mod_ifree(
-	struct xfs_mount	*mp,
-	int64_t			delta)
-{
-	percpu_counter_add(&mp->m_ifree, delta);
-	if (percpu_counter_compare(&mp->m_ifree, 0) < 0) {
-		ASSERT(0);
-		percpu_counter_add(&mp->m_ifree, -delta);
-		return -EINVAL;
-	}
-	return 0;
-}
-
 /*
  * Deltas for the block count can vary from 1 to very large, but lock contention
  * only occurs on frequent small block count updates such as in the delayed
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index aba5a15792792..4835581f3eb00 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -392,8 +392,6 @@ extern int	xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
 				     xfs_agnumber_t *maxagi);
 extern void	xfs_unmountfs(xfs_mount_t *);
 
-extern int	xfs_mod_icount(struct xfs_mount *mp, int64_t delta);
-extern int	xfs_mod_ifree(struct xfs_mount *mp, int64_t delta);
 extern int	xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
 				 bool reserved);
 extern int	xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 4522ceaaf57ba..b055a5ab53465 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -545,7 +545,12 @@ xfs_trans_apply_sb_deltas(
  * used block counts are not updated in the on disk superblock. In this case,
  * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
  * still need to update the incore superblock with the changes.
+ *
+ * Deltas for the inode count are +/-64, hence we use a large batch size of 128
+ * so we don't need to take the counter lock on every update.
  */
+#define XFS_ICOUNT_BATCH	128
+
 void
 xfs_trans_unreserve_and_mod_sb(
 	struct xfs_trans	*tp)
@@ -585,13 +590,17 @@ xfs_trans_unreserve_and_mod_sb(
 	}
 
 	if (idelta) {
-		error = xfs_mod_icount(mp, idelta);
-		ASSERT(!error);
+		percpu_counter_add_batch(&mp->m_icount, idelta,
+					 XFS_ICOUNT_BATCH);
+		if (idelta < 0)
+			ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
+							XFS_ICOUNT_BATCH) >= 0);
 	}
 
 	if (ifreedelta) {
-		error = xfs_mod_ifree(mp, ifreedelta);
-		ASSERT(!error);
+		percpu_counter_add(&mp->m_ifree, ifreedelta);
+		if (ifreedelta < 0)
+			ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);
 	}
 
 	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
-- 
2.26.2.761.g0e0b3e54be


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-19 21:48 ` [PATCH 1/2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb() Dave Chinner
@ 2020-05-20  6:53   ` Christoph Hellwig
  2020-05-20  7:03     ` Dave Chinner
  2020-05-20  7:33   ` [PATCH 1/2 V2] " Dave Chinner
  1 sibling, 1 reply; 13+ messages in thread
From: Christoph Hellwig @ 2020-05-20  6:53 UTC (permalink / raw)
  To: Dave Chinner; +Cc: linux-xfs

On Wed, May 20, 2020 at 07:48:39AM +1000, Dave Chinner wrote:
> +	/*
> +	 * Debug checks outside of the spinlock so they don't lock up the
> +	 * machine if they fail.
> +	 */
> +	ASSERT(&mp->m_sb.sb_frextents >= 0);
> +	ASSERT(&mp->m_sb.sb_dblocks >= 0);
> +	ASSERT(&mp->m_sb.sb_agcount >= 0);

To stick to the theme of broken error handling I don't think this
does what you think as this takes the address of each field, which
will aways be >= 0.  I like the idea of the patch, though.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] xfs: reduce free inode accounting overhead
  2020-05-19 21:48 ` [PATCH 2/2] xfs: reduce free inode accounting overhead Dave Chinner
@ 2020-05-20  6:56   ` Christoph Hellwig
  2020-05-20 20:43   ` Darrick J. Wong
  1 sibling, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2020-05-20  6:56 UTC (permalink / raw)
  To: Dave Chinner; +Cc: linux-xfs

On Wed, May 20, 2020 at 07:48:40AM +1000, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> Shaokun Zhang reported that XFs was using substantial CPU time in

s/XFs/XFS/

>  	if (idelta) {
> -		error = xfs_mod_icount(mp, idelta);
> -		ASSERT(!error);
> +		percpu_counter_add_batch(&mp->m_icount, idelta,
> +					 XFS_ICOUNT_BATCH);
> +		if (idelta < 0)
> +			ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
> +							XFS_ICOUNT_BATCH) >= 0);
>  	}
>  
>  	if (ifreedelta) {
> -		error = xfs_mod_ifree(mp, ifreedelta);
> -		ASSERT(!error);
> +		percpu_counter_add(&mp->m_ifree, ifreedelta);
> +		if (ifreedelta < 0)
> +			ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);

I'd be tempted to just remove the ASSERTS entirely, as they are still
pretty heavy handed for debug kernels.

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-20  6:53   ` Christoph Hellwig
@ 2020-05-20  7:03     ` Dave Chinner
  0 siblings, 0 replies; 13+ messages in thread
From: Dave Chinner @ 2020-05-20  7:03 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-xfs

On Tue, May 19, 2020 at 11:53:34PM -0700, Christoph Hellwig wrote:
> On Wed, May 20, 2020 at 07:48:39AM +1000, Dave Chinner wrote:
> > +	/*
> > +	 * Debug checks outside of the spinlock so they don't lock up the
> > +	 * machine if they fail.
> > +	 */
> > +	ASSERT(&mp->m_sb.sb_frextents >= 0);
> > +	ASSERT(&mp->m_sb.sb_dblocks >= 0);
> > +	ASSERT(&mp->m_sb.sb_agcount >= 0);
> 
> To stick to the theme of broken error handling I don't think this
> does what you think as this takes the address of each field, which
> will aways be >= 0.  I like the idea of the patch, though.

Ah, search and replace fail. I'll fix it.

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/2 V2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-19 21:48 ` [PATCH 1/2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb() Dave Chinner
  2020-05-20  6:53   ` Christoph Hellwig
@ 2020-05-20  7:33   ` Dave Chinner
  2020-05-20  7:48     ` Christoph Hellwig
  1 sibling, 1 reply; 13+ messages in thread
From: Dave Chinner @ 2020-05-20  7:33 UTC (permalink / raw)
  To: linux-xfs

xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()

From: Dave Chinner <dchinner@redhat.com>

The error handling in xfs_trans_unreserve_and_mod_sb() is largely
incorrect - rolling back the changes in the transaction if only one
counter underruns makes all the other counters incorrect. We still
allow the change to proceed and committing the transaction, except
now we have multiple incorrect counters instead of a single
underflow.

Further, we don't actually report the error to the caller, so this
is completely silent except on debug kernels that will assert on
failure before we even get to the rollback code.  Hence this error
handling is broken, untested, and largely unnecessary complexity.

Just remove it.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_trans.c | 170 +++++++++--------------------------------------------
 1 file changed, 27 insertions(+), 143 deletions(-)

diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 28b983ff8b113..4522ceaaf57ba 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -534,57 +534,9 @@ xfs_trans_apply_sb_deltas(
 				  sizeof(sbp->sb_frextents) - 1);
 }
 
-STATIC int
-xfs_sb_mod8(
-	uint8_t			*field,
-	int8_t			delta)
-{
-	int8_t			counter = *field;
-
-	counter += delta;
-	if (counter < 0) {
-		ASSERT(0);
-		return -EINVAL;
-	}
-	*field = counter;
-	return 0;
-}
-
-STATIC int
-xfs_sb_mod32(
-	uint32_t		*field,
-	int32_t			delta)
-{
-	int32_t			counter = *field;
-
-	counter += delta;
-	if (counter < 0) {
-		ASSERT(0);
-		return -EINVAL;
-	}
-	*field = counter;
-	return 0;
-}
-
-STATIC int
-xfs_sb_mod64(
-	uint64_t		*field,
-	int64_t			delta)
-{
-	int64_t			counter = *field;
-
-	counter += delta;
-	if (counter < 0) {
-		ASSERT(0);
-		return -EINVAL;
-	}
-	*field = counter;
-	return 0;
-}
-
 /*
- * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
- * and apply superblock counter changes to the in-core superblock.  The
+ * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
+ * apply superblock counter changes to the in-core superblock.  The
  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
  * applied to the in-core superblock.  The idea is that that has already been
  * done.
@@ -629,20 +581,17 @@ xfs_trans_unreserve_and_mod_sb(
 	/* apply the per-cpu counters */
 	if (blkdelta) {
 		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
-		if (error)
-			goto out;
+		ASSERT(!error);
 	}
 
 	if (idelta) {
 		error = xfs_mod_icount(mp, idelta);
-		if (error)
-			goto out_undo_fdblocks;
+		ASSERT(!error);
 	}
 
 	if (ifreedelta) {
 		error = xfs_mod_ifree(mp, ifreedelta);
-		if (error)
-			goto out_undo_icount;
+		ASSERT(!error);
 	}
 
 	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
@@ -650,95 +599,30 @@ xfs_trans_unreserve_and_mod_sb(
 
 	/* apply remaining deltas */
 	spin_lock(&mp->m_sb_lock);
-	if (rtxdelta) {
-		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
-		if (error)
-			goto out_undo_ifree;
-	}
-
-	if (tp->t_dblocks_delta != 0) {
-		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
-		if (error)
-			goto out_undo_frextents;
-	}
-	if (tp->t_agcount_delta != 0) {
-		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
-		if (error)
-			goto out_undo_dblocks;
-	}
-	if (tp->t_imaxpct_delta != 0) {
-		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
-		if (error)
-			goto out_undo_agcount;
-	}
-	if (tp->t_rextsize_delta != 0) {
-		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
-				     tp->t_rextsize_delta);
-		if (error)
-			goto out_undo_imaxpct;
-	}
-	if (tp->t_rbmblocks_delta != 0) {
-		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
-				     tp->t_rbmblocks_delta);
-		if (error)
-			goto out_undo_rextsize;
-	}
-	if (tp->t_rblocks_delta != 0) {
-		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
-		if (error)
-			goto out_undo_rbmblocks;
-	}
-	if (tp->t_rextents_delta != 0) {
-		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
-				     tp->t_rextents_delta);
-		if (error)
-			goto out_undo_rblocks;
-	}
-	if (tp->t_rextslog_delta != 0) {
-		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
-				     tp->t_rextslog_delta);
-		if (error)
-			goto out_undo_rextents;
-	}
+	mp->m_sb.sb_frextents += rtxdelta;
+	mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
+	mp->m_sb.sb_agcount += tp->t_agcount_delta;
+	mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
+	mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
+	mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
+	mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
+	mp->m_sb.sb_rextents += tp->t_rextents_delta;
+	mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
 	spin_unlock(&mp->m_sb_lock);
-	return;
 
-out_undo_rextents:
-	if (tp->t_rextents_delta)
-		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
-out_undo_rblocks:
-	if (tp->t_rblocks_delta)
-		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
-out_undo_rbmblocks:
-	if (tp->t_rbmblocks_delta)
-		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
-out_undo_rextsize:
-	if (tp->t_rextsize_delta)
-		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
-out_undo_imaxpct:
-	if (tp->t_rextsize_delta)
-		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
-out_undo_agcount:
-	if (tp->t_agcount_delta)
-		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
-out_undo_dblocks:
-	if (tp->t_dblocks_delta)
-		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
-out_undo_frextents:
-	if (rtxdelta)
-		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
-out_undo_ifree:
-	spin_unlock(&mp->m_sb_lock);
-	if (ifreedelta)
-		xfs_mod_ifree(mp, -ifreedelta);
-out_undo_icount:
-	if (idelta)
-		xfs_mod_icount(mp, -idelta);
-out_undo_fdblocks:
-	if (blkdelta)
-		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
-out:
-	ASSERT(error == 0);
+	/*
+	 * Debug checks outside of the spinlock so they don't lock up the
+	 * machine if they fail.
+	 */
+	ASSERT(mp->m_sb.sb_frextents >= 0);
+	ASSERT(mp->m_sb.sb_dblocks >= 0);
+	ASSERT(mp->m_sb.sb_agcount >= 0);
+	ASSERT(mp->m_sb.sb_imax_pct >= 0);
+	ASSERT(mp->m_sb.sb_rextsize >= 0);
+	ASSERT(mp->m_sb.sb_rbmblocks >= 0);
+	ASSERT(mp->m_sb.sb_rblocks >= 0);
+	ASSERT(mp->m_sb.sb_rextents >= 0);
+	ASSERT(mp->m_sb.sb_rextslog >= 0);
 	return;
 }
 

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2 V2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-20  7:33   ` [PATCH 1/2 V2] " Dave Chinner
@ 2020-05-20  7:48     ` Christoph Hellwig
  2020-05-20 20:27       ` Darrick J. Wong
  0 siblings, 1 reply; 13+ messages in thread
From: Christoph Hellwig @ 2020-05-20  7:48 UTC (permalink / raw)
  To: Dave Chinner; +Cc: linux-xfs

On Wed, May 20, 2020 at 05:33:58PM +1000, Dave Chinner wrote:
> +	/*
> +	 * Debug checks outside of the spinlock so they don't lock up the
> +	 * machine if they fail.
> +	 */
> +	ASSERT(mp->m_sb.sb_frextents >= 0);
> +	ASSERT(mp->m_sb.sb_dblocks >= 0);
> +	ASSERT(mp->m_sb.sb_agcount >= 0);
> +	ASSERT(mp->m_sb.sb_imax_pct >= 0);
> +	ASSERT(mp->m_sb.sb_rextsize >= 0);
> +	ASSERT(mp->m_sb.sb_rbmblocks >= 0);
> +	ASSERT(mp->m_sb.sb_rblocks >= 0);
> +	ASSERT(mp->m_sb.sb_rextents >= 0);
> +	ASSERT(mp->m_sb.sb_rextslog >= 0);
>  	return;

No need for the return here at the end of the function.

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2 V2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-20  7:48     ` Christoph Hellwig
@ 2020-05-20 20:27       ` Darrick J. Wong
  2020-05-20 21:55         ` Dave Chinner
  0 siblings, 1 reply; 13+ messages in thread
From: Darrick J. Wong @ 2020-05-20 20:27 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Dave Chinner, linux-xfs

On Wed, May 20, 2020 at 12:48:05AM -0700, Christoph Hellwig wrote:
> On Wed, May 20, 2020 at 05:33:58PM +1000, Dave Chinner wrote:
> > +	/*
> > +	 * Debug checks outside of the spinlock so they don't lock up the
> > +	 * machine if they fail.
> > +	 */
> > +	ASSERT(mp->m_sb.sb_frextents >= 0);
> > +	ASSERT(mp->m_sb.sb_dblocks >= 0);
> > +	ASSERT(mp->m_sb.sb_agcount >= 0);
> > +	ASSERT(mp->m_sb.sb_imax_pct >= 0);
> > +	ASSERT(mp->m_sb.sb_rextsize >= 0);
> > +	ASSERT(mp->m_sb.sb_rbmblocks >= 0);
> > +	ASSERT(mp->m_sb.sb_rblocks >= 0);
> > +	ASSERT(mp->m_sb.sb_rextents >= 0);
> > +	ASSERT(mp->m_sb.sb_rextslog >= 0);

Except for imax_pct and rextslog, all of these are unsigned quantities,
right?  So the asserts will /never/ trigger?

--D

> >  	return;
> 
> No need for the return here at the end of the function.
> 
> Otherwise looks good:
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] xfs: reduce free inode accounting overhead
  2020-05-19 21:48 ` [PATCH 2/2] xfs: reduce free inode accounting overhead Dave Chinner
  2020-05-20  6:56   ` Christoph Hellwig
@ 2020-05-20 20:43   ` Darrick J. Wong
  1 sibling, 0 replies; 13+ messages in thread
From: Darrick J. Wong @ 2020-05-20 20:43 UTC (permalink / raw)
  To: Dave Chinner; +Cc: linux-xfs

On Wed, May 20, 2020 at 07:48:40AM +1000, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> Shaokun Zhang reported that XFs was using substantial CPU time in
> percpu_count_sum() when running a single threaded benchmark on
> a high CPU count (128p) machine from xfs_mod_ifree(). The issue
> is that the filesystem is empty when the benchmark runs, so inode
> allocation is running with a very low inode free count.
> 
> With the percpu counter batching, this means comparisons when the
> counter is less that 128 * 256 = 32768 use the slow path of adding
> up all the counters across the CPUs, and this is expensive on high
> CPU count machines.
> 
> The summing in xfs_mod_ifree() is only used to fire an assert if an
> underrun occurs. The error is ignored by the higher level code.
> Hence this is really just debug code and we don't need to run it
> on production kernels, nor do we need such debug checks to return
> error values just to trigger an assert.
> 
> Finally, xfs_mod_icount/xfs_mod_ifree are only called from
> xfs_trans_unreserve_and_mod_sb(), so get rid of them and just
> directly call the percpu_counter_add/percpu_counter_compare
> functions. The compare functions are now run only on debug builds as
> they are internal to ASSERT() checks and so only compiled in when
> ASSERTs are active (CONFIG_XFS_DEBUG=y or CONFIG_XFS_WARN=y).
> 
> Reported-by: Shaokun Zhang <zhangshaokun@hisilicon.com>
> Signed-off-by: Dave Chinner <dchinner@redhat.com>

Seems like a reasonable substitution/ASSERT reduction to me,
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>

--D

> ---
>  fs/xfs/xfs_mount.c | 33 ---------------------------------
>  fs/xfs/xfs_mount.h |  2 --
>  fs/xfs/xfs_trans.c | 17 +++++++++++++----
>  3 files changed, 13 insertions(+), 39 deletions(-)
> 
> diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
> index bb91f04266b9a..d5dcf98698600 100644
> --- a/fs/xfs/xfs_mount.c
> +++ b/fs/xfs/xfs_mount.c
> @@ -1189,39 +1189,6 @@ xfs_log_sbcount(xfs_mount_t *mp)
>  	return xfs_sync_sb(mp, true);
>  }
>  
> -/*
> - * Deltas for the inode count are +/-64, hence we use a large batch size
> - * of 128 so we don't need to take the counter lock on every update.
> - */
> -#define XFS_ICOUNT_BATCH	128
> -int
> -xfs_mod_icount(
> -	struct xfs_mount	*mp,
> -	int64_t			delta)
> -{
> -	percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
> -	if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
> -		ASSERT(0);
> -		percpu_counter_add(&mp->m_icount, -delta);
> -		return -EINVAL;
> -	}
> -	return 0;
> -}
> -
> -int
> -xfs_mod_ifree(
> -	struct xfs_mount	*mp,
> -	int64_t			delta)
> -{
> -	percpu_counter_add(&mp->m_ifree, delta);
> -	if (percpu_counter_compare(&mp->m_ifree, 0) < 0) {
> -		ASSERT(0);
> -		percpu_counter_add(&mp->m_ifree, -delta);
> -		return -EINVAL;
> -	}
> -	return 0;
> -}
> -
>  /*
>   * Deltas for the block count can vary from 1 to very large, but lock contention
>   * only occurs on frequent small block count updates such as in the delayed
> diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
> index aba5a15792792..4835581f3eb00 100644
> --- a/fs/xfs/xfs_mount.h
> +++ b/fs/xfs/xfs_mount.h
> @@ -392,8 +392,6 @@ extern int	xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
>  				     xfs_agnumber_t *maxagi);
>  extern void	xfs_unmountfs(xfs_mount_t *);
>  
> -extern int	xfs_mod_icount(struct xfs_mount *mp, int64_t delta);
> -extern int	xfs_mod_ifree(struct xfs_mount *mp, int64_t delta);
>  extern int	xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
>  				 bool reserved);
>  extern int	xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
> diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
> index 4522ceaaf57ba..b055a5ab53465 100644
> --- a/fs/xfs/xfs_trans.c
> +++ b/fs/xfs/xfs_trans.c
> @@ -545,7 +545,12 @@ xfs_trans_apply_sb_deltas(
>   * used block counts are not updated in the on disk superblock. In this case,
>   * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
>   * still need to update the incore superblock with the changes.
> + *
> + * Deltas for the inode count are +/-64, hence we use a large batch size of 128
> + * so we don't need to take the counter lock on every update.
>   */
> +#define XFS_ICOUNT_BATCH	128
> +
>  void
>  xfs_trans_unreserve_and_mod_sb(
>  	struct xfs_trans	*tp)
> @@ -585,13 +590,17 @@ xfs_trans_unreserve_and_mod_sb(
>  	}
>  
>  	if (idelta) {
> -		error = xfs_mod_icount(mp, idelta);
> -		ASSERT(!error);
> +		percpu_counter_add_batch(&mp->m_icount, idelta,
> +					 XFS_ICOUNT_BATCH);
> +		if (idelta < 0)
> +			ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
> +							XFS_ICOUNT_BATCH) >= 0);
>  	}
>  
>  	if (ifreedelta) {
> -		error = xfs_mod_ifree(mp, ifreedelta);
> -		ASSERT(!error);
> +		percpu_counter_add(&mp->m_ifree, ifreedelta);
> +		if (ifreedelta < 0)
> +			ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);
>  	}
>  
>  	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
> -- 
> 2.26.2.761.g0e0b3e54be
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2 V2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-20 20:27       ` Darrick J. Wong
@ 2020-05-20 21:55         ` Dave Chinner
  2020-05-20 22:28           ` Darrick J. Wong
  0 siblings, 1 reply; 13+ messages in thread
From: Dave Chinner @ 2020-05-20 21:55 UTC (permalink / raw)
  To: Darrick J. Wong; +Cc: Christoph Hellwig, linux-xfs

On Wed, May 20, 2020 at 01:27:02PM -0700, Darrick J. Wong wrote:
> On Wed, May 20, 2020 at 12:48:05AM -0700, Christoph Hellwig wrote:
> > On Wed, May 20, 2020 at 05:33:58PM +1000, Dave Chinner wrote:
> > > +	/*
> > > +	 * Debug checks outside of the spinlock so they don't lock up the
> > > +	 * machine if they fail.
> > > +	 */
> > > +	ASSERT(mp->m_sb.sb_frextents >= 0);
> > > +	ASSERT(mp->m_sb.sb_dblocks >= 0);
> > > +	ASSERT(mp->m_sb.sb_agcount >= 0);
> > > +	ASSERT(mp->m_sb.sb_imax_pct >= 0);
> > > +	ASSERT(mp->m_sb.sb_rextsize >= 0);
> > > +	ASSERT(mp->m_sb.sb_rbmblocks >= 0);
> > > +	ASSERT(mp->m_sb.sb_rblocks >= 0);
> > > +	ASSERT(mp->m_sb.sb_rextents >= 0);
> > > +	ASSERT(mp->m_sb.sb_rextslog >= 0);
> 
> Except for imax_pct and rextslog, all of these are unsigned quantities,
> right?  So the asserts will /never/ trigger?

In truth, I didn't look that far. I just assumed that because all
the xfs_sb_mod*() functions used signed math that they could all
underflow/overflow.  IOWs, the checking for overflow/underflow was
completely wrong in the first place.

Should I just remove the ASSERT()s entirely?

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2 V2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-20 21:55         ` Dave Chinner
@ 2020-05-20 22:28           ` Darrick J. Wong
  2020-05-20 22:37             ` Dave Chinner
  0 siblings, 1 reply; 13+ messages in thread
From: Darrick J. Wong @ 2020-05-20 22:28 UTC (permalink / raw)
  To: Dave Chinner; +Cc: Christoph Hellwig, linux-xfs

On Thu, May 21, 2020 at 07:55:30AM +1000, Dave Chinner wrote:
> On Wed, May 20, 2020 at 01:27:02PM -0700, Darrick J. Wong wrote:
> > On Wed, May 20, 2020 at 12:48:05AM -0700, Christoph Hellwig wrote:
> > > On Wed, May 20, 2020 at 05:33:58PM +1000, Dave Chinner wrote:
> > > > +	/*
> > > > +	 * Debug checks outside of the spinlock so they don't lock up the
> > > > +	 * machine if they fail.
> > > > +	 */
> > > > +	ASSERT(mp->m_sb.sb_frextents >= 0);
> > > > +	ASSERT(mp->m_sb.sb_dblocks >= 0);
> > > > +	ASSERT(mp->m_sb.sb_agcount >= 0);
> > > > +	ASSERT(mp->m_sb.sb_imax_pct >= 0);
> > > > +	ASSERT(mp->m_sb.sb_rextsize >= 0);
> > > > +	ASSERT(mp->m_sb.sb_rbmblocks >= 0);
> > > > +	ASSERT(mp->m_sb.sb_rblocks >= 0);
> > > > +	ASSERT(mp->m_sb.sb_rextents >= 0);
> > > > +	ASSERT(mp->m_sb.sb_rextslog >= 0);
> > 
> > Except for imax_pct and rextslog, all of these are unsigned quantities,
> > right?  So the asserts will /never/ trigger?
> 
> In truth, I didn't look that far. I just assumed that because all
> the xfs_sb_mod*() functions used signed math that they could all
> underflow/overflow.  IOWs, the checking for overflow/underflow was
> completely wrong in the first place.
> 
> Should I just remove the ASSERT()s entirely?

It causes a bunch of gcc 9.3 warnings, so yes please. :)

(Granted, I ripped out all the asserts except for the two I mentioned
above, so if nobody else have complaints then no need to resend.)

--D

> Cheers,
> 
> Dave.
> -- 
> Dave Chinner
> david@fromorbit.com

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2 V2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb()
  2020-05-20 22:28           ` Darrick J. Wong
@ 2020-05-20 22:37             ` Dave Chinner
  0 siblings, 0 replies; 13+ messages in thread
From: Dave Chinner @ 2020-05-20 22:37 UTC (permalink / raw)
  To: Darrick J. Wong; +Cc: Christoph Hellwig, linux-xfs

On Wed, May 20, 2020 at 03:28:21PM -0700, Darrick J. Wong wrote:
> On Thu, May 21, 2020 at 07:55:30AM +1000, Dave Chinner wrote:
> > On Wed, May 20, 2020 at 01:27:02PM -0700, Darrick J. Wong wrote:
> > > On Wed, May 20, 2020 at 12:48:05AM -0700, Christoph Hellwig wrote:
> > > > On Wed, May 20, 2020 at 05:33:58PM +1000, Dave Chinner wrote:
> > > > > +	/*
> > > > > +	 * Debug checks outside of the spinlock so they don't lock up the
> > > > > +	 * machine if they fail.
> > > > > +	 */
> > > > > +	ASSERT(mp->m_sb.sb_frextents >= 0);
> > > > > +	ASSERT(mp->m_sb.sb_dblocks >= 0);
> > > > > +	ASSERT(mp->m_sb.sb_agcount >= 0);
> > > > > +	ASSERT(mp->m_sb.sb_imax_pct >= 0);
> > > > > +	ASSERT(mp->m_sb.sb_rextsize >= 0);
> > > > > +	ASSERT(mp->m_sb.sb_rbmblocks >= 0);
> > > > > +	ASSERT(mp->m_sb.sb_rblocks >= 0);
> > > > > +	ASSERT(mp->m_sb.sb_rextents >= 0);
> > > > > +	ASSERT(mp->m_sb.sb_rextslog >= 0);
> > > 
> > > Except for imax_pct and rextslog, all of these are unsigned quantities,
> > > right?  So the asserts will /never/ trigger?
> > 
> > In truth, I didn't look that far. I just assumed that because all
> > the xfs_sb_mod*() functions used signed math that they could all
> > underflow/overflow.  IOWs, the checking for overflow/underflow was
> > completely wrong in the first place.
> > 
> > Should I just remove the ASSERT()s entirely?
> 
> It causes a bunch of gcc 9.3 warnings, so yes please. :)
> 
> (Granted, I ripped out all the asserts except for the two I mentioned
> above, so if nobody else have complaints then no need to resend.)

Fine by me. FWIW, gcc 9.2 doesn't complain at all about these.

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2020-05-20 22:37 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-19 21:48 [PATCH 0/2] xfs: fix unecessary percpu counter overhead Dave Chinner
2020-05-19 21:48 ` [PATCH 1/2] xfs: gut error handling in xfs_trans_unreserve_and_mod_sb() Dave Chinner
2020-05-20  6:53   ` Christoph Hellwig
2020-05-20  7:03     ` Dave Chinner
2020-05-20  7:33   ` [PATCH 1/2 V2] " Dave Chinner
2020-05-20  7:48     ` Christoph Hellwig
2020-05-20 20:27       ` Darrick J. Wong
2020-05-20 21:55         ` Dave Chinner
2020-05-20 22:28           ` Darrick J. Wong
2020-05-20 22:37             ` Dave Chinner
2020-05-19 21:48 ` [PATCH 2/2] xfs: reduce free inode accounting overhead Dave Chinner
2020-05-20  6:56   ` Christoph Hellwig
2020-05-20 20:43   ` Darrick J. Wong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).