All of lore.kernel.org
 help / color / mirror / Atom feed
* more log cleanups
@ 2020-03-20  6:53 Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 1/8] xfs: merge xlog_cil_push into xlog_cil_push_work Christoph Hellwig
                   ` (7 more replies)
  0 siblings, 8 replies; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner

Hi all,

this series contains more uncontroversial patches from the
"cleanup log I/O error handling", series.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/8] xfs: merge xlog_cil_push into xlog_cil_push_work
  2020-03-20  6:53 more log cleanups Christoph Hellwig
@ 2020-03-20  6:53 ` Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 2/8] xfs: factor out a xlog_wait_on_iclog helper Christoph Hellwig
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner, Dave Chinner, Brian Foster, Darrick J . Wong

xlog_cil_push is only called by xlog_cil_push_work, so merge the two
functions.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/xfs_log_cil.c | 46 +++++++++++++++++---------------------------
 1 file changed, 18 insertions(+), 28 deletions(-)

diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 48435cf2aa16..9ef0f8b555a4 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -626,24 +626,26 @@ xlog_cil_process_committed(
 }
 
 /*
- * Push the Committed Item List to the log. If @push_seq flag is zero, then it
- * is a background flush and so we can chose to ignore it. Otherwise, if the
- * current sequence is the same as @push_seq we need to do a flush. If
- * @push_seq is less than the current sequence, then it has already been
+ * Push the Committed Item List to the log.
+ *
+ * If the current sequence is the same as xc_push_seq we need to do a flush. If
+ * xc_push_seq is less than the current sequence, then it has already been
  * flushed and we don't need to do anything - the caller will wait for it to
  * complete if necessary.
  *
- * @push_seq is a value rather than a flag because that allows us to do an
- * unlocked check of the sequence number for a match. Hence we can allows log
- * forces to run racily and not issue pushes for the same sequence twice. If we
- * get a race between multiple pushes for the same sequence they will block on
- * the first one and then abort, hence avoiding needless pushes.
+ * xc_push_seq is checked unlocked against the sequence number for a match.
+ * Hence we can allow log forces to run racily and not issue pushes for the
+ * same sequence twice.  If we get a race between multiple pushes for the same
+ * sequence they will block on the first one and then abort, hence avoiding
+ * needless pushes.
  */
-STATIC int
-xlog_cil_push(
-	struct xlog		*log)
+static void
+xlog_cil_push_work(
+	struct work_struct	*work)
 {
-	struct xfs_cil		*cil = log->l_cilp;
+	struct xfs_cil		*cil =
+		container_of(work, struct xfs_cil, xc_push_work);
+	struct xlog		*log = cil->xc_log;
 	struct xfs_log_vec	*lv;
 	struct xfs_cil_ctx	*ctx;
 	struct xfs_cil_ctx	*new_ctx;
@@ -657,9 +659,6 @@ xlog_cil_push(
 	xfs_lsn_t		commit_lsn;
 	xfs_lsn_t		push_seq;
 
-	if (!cil)
-		return 0;
-
 	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
 	new_ctx->ticket = xlog_cil_ticket_alloc(log);
 
@@ -867,28 +866,19 @@ xlog_cil_push(
 	spin_unlock(&cil->xc_push_lock);
 
 	/* release the hounds! */
-	return xfs_log_release_iclog(log->l_mp, commit_iclog);
+	xfs_log_release_iclog(log->l_mp, commit_iclog);
+	return;
 
 out_skip:
 	up_write(&cil->xc_ctx_lock);
 	xfs_log_ticket_put(new_ctx->ticket);
 	kmem_free(new_ctx);
-	return 0;
+	return;
 
 out_abort_free_ticket:
 	xfs_log_ticket_put(tic);
 out_abort:
 	xlog_cil_committed(ctx, true);
-	return -EIO;
-}
-
-static void
-xlog_cil_push_work(
-	struct work_struct	*work)
-{
-	struct xfs_cil		*cil = container_of(work, struct xfs_cil,
-							xc_push_work);
-	xlog_cil_push(cil->xc_log);
 }
 
 /*
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/8] xfs: factor out a xlog_wait_on_iclog helper
  2020-03-20  6:53 more log cleanups Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 1/8] xfs: merge xlog_cil_push into xlog_cil_push_work Christoph Hellwig
@ 2020-03-20  6:53 ` Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 3/8] xfs: simplify the xfs_log_release_iclog calling convention Christoph Hellwig
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner, Brian Foster, Darrick J . Wong

Factor out the shared code to wait for a log force into a new helper.
This helper uses the XLOG_FORCED_SHUTDOWN check previous only used
by the unmount code over the equivalent iclog ioerror state used by
the other two functions.

There is a slight behavior change in that the force of the unmount
record is now accounted in the log force statistics.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/xfs_log.c | 76 ++++++++++++++++++++----------------------------
 1 file changed, 31 insertions(+), 45 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 0986983ef6b5..955df2902c2c 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -859,6 +859,31 @@ xfs_log_mount_cancel(
 	xfs_log_unmount(mp);
 }
 
+/*
+ * Wait for the iclog to be written disk, or return an error if the log has been
+ * shut down.
+ */
+static int
+xlog_wait_on_iclog(
+	struct xlog_in_core	*iclog)
+		__releases(iclog->ic_log->l_icloglock)
+{
+	struct xlog		*log = iclog->ic_log;
+
+	if (!XLOG_FORCED_SHUTDOWN(log) &&
+	    iclog->ic_state != XLOG_STATE_ACTIVE &&
+	    iclog->ic_state != XLOG_STATE_DIRTY) {
+		XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
+		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
+	} else {
+		spin_unlock(&log->l_icloglock);
+	}
+
+	if (XLOG_FORCED_SHUTDOWN(log))
+		return -EIO;
+	return 0;
+}
+
 /*
  * Final log writes as part of unmount.
  *
@@ -926,18 +951,7 @@ xfs_log_write_unmount_record(
 	atomic_inc(&iclog->ic_refcnt);
 	xlog_state_want_sync(log, iclog);
 	error = xlog_state_release_iclog(log, iclog);
-	switch (iclog->ic_state) {
-	default:
-		if (!XLOG_FORCED_SHUTDOWN(log)) {
-			xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
-			break;
-		}
-		/* fall through */
-	case XLOG_STATE_ACTIVE:
-	case XLOG_STATE_DIRTY:
-		spin_unlock(&log->l_icloglock);
-		break;
-	}
+	xlog_wait_on_iclog(iclog);
 
 	if (tic) {
 		trace_xfs_log_umount_write(log, tic);
@@ -3230,9 +3244,6 @@ xfs_log_force(
 		 * previous iclog and go to sleep.
 		 */
 		iclog = iclog->ic_prev;
-		if (iclog->ic_state == XLOG_STATE_ACTIVE ||
-		    iclog->ic_state == XLOG_STATE_DIRTY)
-			goto out_unlock;
 	} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
 		if (atomic_read(&iclog->ic_refcnt) == 0) {
 			/*
@@ -3248,8 +3259,7 @@ xfs_log_force(
 			if (xlog_state_release_iclog(log, iclog))
 				goto out_error;
 
-			if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn ||
-			    iclog->ic_state == XLOG_STATE_DIRTY)
+			if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
 				goto out_unlock;
 		} else {
 			/*
@@ -3269,17 +3279,8 @@ xfs_log_force(
 		;
 	}
 
-	if (!(flags & XFS_LOG_SYNC))
-		goto out_unlock;
-
-	if (iclog->ic_state == XLOG_STATE_IOERROR)
-		goto out_error;
-	XFS_STATS_INC(mp, xs_log_force_sleep);
-	xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
-	if (iclog->ic_state == XLOG_STATE_IOERROR)
-		return -EIO;
-	return 0;
-
+	if (flags & XFS_LOG_SYNC)
+		return xlog_wait_on_iclog(iclog);
 out_unlock:
 	spin_unlock(&log->l_icloglock);
 	return 0;
@@ -3310,9 +3311,6 @@ __xfs_log_force_lsn(
 			goto out_unlock;
 	}
 
-	if (iclog->ic_state == XLOG_STATE_DIRTY)
-		goto out_unlock;
-
 	if (iclog->ic_state == XLOG_STATE_ACTIVE) {
 		/*
 		 * We sleep here if we haven't already slept (e.g. this is the
@@ -3346,20 +3344,8 @@ __xfs_log_force_lsn(
 			*log_flushed = 1;
 	}
 
-	if (!(flags & XFS_LOG_SYNC) ||
-	    (iclog->ic_state == XLOG_STATE_ACTIVE ||
-	     iclog->ic_state == XLOG_STATE_DIRTY))
-		goto out_unlock;
-
-	if (iclog->ic_state == XLOG_STATE_IOERROR)
-		goto out_error;
-
-	XFS_STATS_INC(mp, xs_log_force_sleep);
-	xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
-	if (iclog->ic_state == XLOG_STATE_IOERROR)
-		return -EIO;
-	return 0;
-
+	if (flags & XFS_LOG_SYNC)
+		return xlog_wait_on_iclog(iclog);
 out_unlock:
 	spin_unlock(&log->l_icloglock);
 	return 0;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/8] xfs: simplify the xfs_log_release_iclog calling convention
  2020-03-20  6:53 more log cleanups Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 1/8] xfs: merge xlog_cil_push into xlog_cil_push_work Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 2/8] xfs: factor out a xlog_wait_on_iclog helper Christoph Hellwig
@ 2020-03-20  6:53 ` Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 4/8] xfs: simplify log shutdown checking in xfs_log_release_iclog Christoph Hellwig
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner, Brian Foster, Darrick J . Wong

The only caller of xfs_log_release_iclog doesn't care about the return
value, so remove it.  Also don't bother passing the mount pointer,
given that we can trivially derive it from the iclog.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/xfs_log.c     | 10 ++++------
 fs/xfs/xfs_log.h     |  3 +--
 fs/xfs/xfs_log_cil.c |  2 +-
 3 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 955df2902c2c..17ba92b115ea 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -597,12 +597,11 @@ xlog_state_release_iclog(
 	return 0;
 }
 
-int
+void
 xfs_log_release_iclog(
-	struct xfs_mount        *mp,
 	struct xlog_in_core	*iclog)
 {
-	struct xlog		*log = mp->m_log;
+	struct xlog		*log = iclog->ic_log;
 	bool			sync;
 
 	if (iclog->ic_state == XLOG_STATE_IOERROR)
@@ -618,10 +617,9 @@ xfs_log_release_iclog(
 		if (sync)
 			xlog_sync(log, iclog);
 	}
-	return 0;
+	return;
 error:
-	xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
-	return -EIO;
+	xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
 }
 
 /*
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 84e06805160f..b38602216c5a 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -121,8 +121,7 @@ void	xfs_log_mount_cancel(struct xfs_mount *);
 xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
 xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
 void	  xfs_log_space_wake(struct xfs_mount *mp);
-int	  xfs_log_release_iclog(struct xfs_mount *mp,
-			 struct xlog_in_core	 *iclog);
+void	  xfs_log_release_iclog(struct xlog_in_core *iclog);
 int	  xfs_log_reserve(struct xfs_mount *mp,
 			  int		   length,
 			  int		   count,
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 9ef0f8b555a4..278166811c80 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -866,7 +866,7 @@ xlog_cil_push_work(
 	spin_unlock(&cil->xc_push_lock);
 
 	/* release the hounds! */
-	xfs_log_release_iclog(log->l_mp, commit_iclog);
+	xfs_log_release_iclog(commit_iclog);
 	return;
 
 out_skip:
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/8] xfs: simplify log shutdown checking in xfs_log_release_iclog
  2020-03-20  6:53 more log cleanups Christoph Hellwig
                   ` (2 preceding siblings ...)
  2020-03-20  6:53 ` [PATCH 3/8] xfs: simplify the xfs_log_release_iclog calling convention Christoph Hellwig
@ 2020-03-20  6:53 ` Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 5/8] xfs: remove the aborted parameter to xlog_state_done_syncing Christoph Hellwig
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner, Brian Foster, Darrick J . Wong

There is no need to check for the ioerror state before the lock, as
the shutdown case is not a fast path.  Also remove the call to force
shutdown the file system, as it must have been shut down already
for an iclog to be in the ioerror state.  Also clean up the flow of
the function a bit.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/xfs_log.c | 20 ++++++--------------
 1 file changed, 6 insertions(+), 14 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 17ba92b115ea..7af9c292540b 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -602,24 +602,16 @@ xfs_log_release_iclog(
 	struct xlog_in_core	*iclog)
 {
 	struct xlog		*log = iclog->ic_log;
-	bool			sync;
-
-	if (iclog->ic_state == XLOG_STATE_IOERROR)
-		goto error;
+	bool			sync = false;
 
 	if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
-		if (iclog->ic_state == XLOG_STATE_IOERROR) {
-			spin_unlock(&log->l_icloglock);
-			goto error;
-		}
-		sync = __xlog_state_release_iclog(log, iclog);
+		if (iclog->ic_state != XLOG_STATE_IOERROR)
+			sync = __xlog_state_release_iclog(log, iclog);
 		spin_unlock(&log->l_icloglock);
-		if (sync)
-			xlog_sync(log, iclog);
 	}
-	return;
-error:
-	xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+
+	if (sync)
+		xlog_sync(log, iclog);
 }
 
 /*
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 5/8] xfs: remove the aborted parameter to xlog_state_done_syncing
  2020-03-20  6:53 more log cleanups Christoph Hellwig
                   ` (3 preceding siblings ...)
  2020-03-20  6:53 ` [PATCH 4/8] xfs: simplify log shutdown checking in xfs_log_release_iclog Christoph Hellwig
@ 2020-03-20  6:53 ` Christoph Hellwig
  2020-03-23 15:27   ` Darrick J. Wong
  2020-03-20  6:53 ` [PATCH 6/8] xfs: refactor xlog_state_clean_iclog Christoph Hellwig
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner, Brian Foster

We can just check for a shut down log all the way down in
xlog_cil_committed instead of passing the parameter.  This means a
slight behavior change in that we now also abort log items if the
shutdown came in halfway into the I/O completion processing, which
actually is the right thing to do.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
---
 fs/xfs/xfs_log.c     | 48 +++++++++++++++-----------------------------
 fs/xfs/xfs_log.h     |  2 +-
 fs/xfs/xfs_log_cil.c | 12 +++++------
 3 files changed, 23 insertions(+), 39 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 7af9c292540b..4f9303524efb 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -47,8 +47,7 @@ xlog_dealloc_log(
 
 /* local state machine functions */
 STATIC void xlog_state_done_syncing(
-	struct xlog_in_core	*iclog,
-	bool			aborted);
+	struct xlog_in_core	*iclog);
 STATIC int
 xlog_state_get_iclog_space(
 	struct xlog		*log,
@@ -1254,7 +1253,6 @@ xlog_ioend_work(
 	struct xlog_in_core     *iclog =
 		container_of(work, struct xlog_in_core, ic_end_io_work);
 	struct xlog		*log = iclog->ic_log;
-	bool			aborted = false;
 	int			error;
 
 	error = blk_status_to_errno(iclog->ic_bio.bi_status);
@@ -1270,17 +1268,9 @@ xlog_ioend_work(
 	if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
 		xfs_alert(log->l_mp, "log I/O error %d", error);
 		xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
-		/*
-		 * This flag will be propagated to the trans-committed
-		 * callback routines to let them know that the log-commit
-		 * didn't succeed.
-		 */
-		aborted = true;
-	} else if (iclog->ic_state == XLOG_STATE_IOERROR) {
-		aborted = true;
 	}
 
-	xlog_state_done_syncing(iclog, aborted);
+	xlog_state_done_syncing(iclog);
 	bio_uninit(&iclog->ic_bio);
 
 	/*
@@ -1759,7 +1749,7 @@ xlog_write_iclog(
 		 * the buffer manually, the code needs to be kept in sync
 		 * with the I/O completion path.
 		 */
-		xlog_state_done_syncing(iclog, true);
+		xlog_state_done_syncing(iclog);
 		up(&iclog->ic_sema);
 		return;
 	}
@@ -2783,8 +2773,7 @@ xlog_state_iodone_process_iclog(
 static void
 xlog_state_do_iclog_callbacks(
 	struct xlog		*log,
-	struct xlog_in_core	*iclog,
-	bool			aborted)
+	struct xlog_in_core	*iclog)
 		__releases(&log->l_icloglock)
 		__acquires(&log->l_icloglock)
 {
@@ -2796,7 +2785,7 @@ xlog_state_do_iclog_callbacks(
 		list_splice_init(&iclog->ic_callbacks, &tmp);
 
 		spin_unlock(&iclog->ic_callback_lock);
-		xlog_cil_process_committed(&tmp, aborted);
+		xlog_cil_process_committed(&tmp);
 		spin_lock(&iclog->ic_callback_lock);
 	}
 
@@ -2811,8 +2800,7 @@ xlog_state_do_iclog_callbacks(
 
 STATIC void
 xlog_state_do_callback(
-	struct xlog		*log,
-	bool			aborted)
+	struct xlog		*log)
 {
 	struct xlog_in_core	*iclog;
 	struct xlog_in_core	*first_iclog;
@@ -2853,7 +2841,7 @@ xlog_state_do_callback(
 			 * we'll have to run at least one more complete loop.
 			 */
 			cycled_icloglock = true;
-			xlog_state_do_iclog_callbacks(log, iclog, aborted);
+			xlog_state_do_iclog_callbacks(log, iclog);
 
 			xlog_state_clean_iclog(log, iclog);
 			iclog = iclog->ic_next;
@@ -2891,25 +2879,22 @@ xlog_state_do_callback(
  */
 STATIC void
 xlog_state_done_syncing(
-	struct xlog_in_core	*iclog,
-	bool			aborted)
+	struct xlog_in_core	*iclog)
 {
 	struct xlog		*log = iclog->ic_log;
 
 	spin_lock(&log->l_icloglock);
-
 	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
 
 	/*
 	 * If we got an error, either on the first buffer, or in the case of
-	 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
-	 * and none should ever be attempted to be written to disk
-	 * again.
+	 * split log writes, on the second, we shut down the file system and
+	 * no iclogs should ever be attempted to be written to disk again.
 	 */
-	if (iclog->ic_state == XLOG_STATE_SYNCING)
+	if (!XLOG_FORCED_SHUTDOWN(log)) {
+		ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
 		iclog->ic_state = XLOG_STATE_DONE_SYNC;
-	else
-		ASSERT(iclog->ic_state == XLOG_STATE_IOERROR);
+	}
 
 	/*
 	 * Someone could be sleeping prior to writing out the next
@@ -2918,9 +2903,8 @@ xlog_state_done_syncing(
 	 */
 	wake_up_all(&iclog->ic_write_wait);
 	spin_unlock(&log->l_icloglock);
-	xlog_state_do_callback(log, aborted);	/* also cleans log */
-}	/* xlog_state_done_syncing */
-
+	xlog_state_do_callback(log);	/* also cleans log */
+}
 
 /*
  * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
@@ -3884,7 +3868,7 @@ xfs_log_force_umount(
 	spin_lock(&log->l_cilp->xc_push_lock);
 	wake_up_all(&log->l_cilp->xc_commit_wait);
 	spin_unlock(&log->l_cilp->xc_push_lock);
-	xlog_state_do_callback(log, true);
+	xlog_state_do_callback(log);
 
 	/* return non-zero if log IOERROR transition had already happened */
 	return retval;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index b38602216c5a..cc77cc36560a 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -137,7 +137,7 @@ void	  xfs_log_ticket_put(struct xlog_ticket *ticket);
 
 void	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
 				xfs_lsn_t *commit_lsn, bool regrant);
-void	xlog_cil_process_committed(struct list_head *list, bool aborted);
+void	xlog_cil_process_committed(struct list_head *list);
 bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
 
 void	xfs_log_work_queue(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 278166811c80..64cc0bf2ab3b 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -574,10 +574,10 @@ xlog_discard_busy_extents(
  */
 static void
 xlog_cil_committed(
-	struct xfs_cil_ctx	*ctx,
-	bool			abort)
+	struct xfs_cil_ctx	*ctx)
 {
 	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
+	bool			abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log);
 
 	/*
 	 * If the I/O failed, we're aborting the commit and already shutdown.
@@ -613,15 +613,14 @@ xlog_cil_committed(
 
 void
 xlog_cil_process_committed(
-	struct list_head	*list,
-	bool			aborted)
+	struct list_head	*list)
 {
 	struct xfs_cil_ctx	*ctx;
 
 	while ((ctx = list_first_entry_or_null(list,
 			struct xfs_cil_ctx, iclog_entry))) {
 		list_del(&ctx->iclog_entry);
-		xlog_cil_committed(ctx, aborted);
+		xlog_cil_committed(ctx);
 	}
 }
 
@@ -878,7 +877,8 @@ xlog_cil_push_work(
 out_abort_free_ticket:
 	xfs_log_ticket_put(tic);
 out_abort:
-	xlog_cil_committed(ctx, true);
+	ASSERT(XLOG_FORCED_SHUTDOWN(log));
+	xlog_cil_committed(ctx);
 }
 
 /*
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 6/8] xfs: refactor xlog_state_clean_iclog
  2020-03-20  6:53 more log cleanups Christoph Hellwig
                   ` (4 preceding siblings ...)
  2020-03-20  6:53 ` [PATCH 5/8] xfs: remove the aborted parameter to xlog_state_done_syncing Christoph Hellwig
@ 2020-03-20  6:53 ` Christoph Hellwig
  2020-03-20 11:51   ` Brian Foster
  2020-03-20  6:53 ` [PATCH 7/8] xfs: move the ioerror check out of xlog_state_clean_iclog Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 8/8] xfs: remove xlog_state_want_sync Christoph Hellwig
  7 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner

Factor out a few self-contained helpers from xlog_state_clean_iclog, and
update the documentation so it primarily documents why things happens
instead of how.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/xfs/xfs_log.c | 180 +++++++++++++++++++++++------------------------
 1 file changed, 88 insertions(+), 92 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 4f9303524efb..6facbb91b8a8 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2540,111 +2540,107 @@ xlog_write(
  *****************************************************************************
  */
 
+static void
+xlog_state_activate_iclog(
+	struct xlog_in_core	*iclog,
+	int			*iclogs_changed)
+{
+	ASSERT(list_empty_careful(&iclog->ic_callbacks));
+
+	/*
+	 * If the number of ops in this iclog indicate it just contains the
+	 * dummy transaction, we can change state into IDLE (the second time
+	 * around). Otherwise we should change the state into NEED a dummy.
+	 * We don't need to cover the dummy.
+	 */
+	if (*iclogs_changed == 0 &&
+	    iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
+		*iclogs_changed = 1;
+	} else {
+		/*
+		 * We have two dirty iclogs so start over.  This could also be
+		 * num of ops indicating this is not the dummy going out.
+		 */
+		*iclogs_changed = 2;
+	}
+
+	iclog->ic_state	= XLOG_STATE_ACTIVE;
+	iclog->ic_offset = 0;
+	iclog->ic_header.h_num_logops = 0;
+	memset(iclog->ic_header.h_cycle_data, 0,
+		sizeof(iclog->ic_header.h_cycle_data));
+	iclog->ic_header.h_lsn = 0;
+}
+
 /*
- * An iclog has just finished IO completion processing, so we need to update
- * the iclog state and propagate that up into the overall log state. Hence we
- * prepare the iclog for cleaning, and then clean all the pending dirty iclogs
- * starting from the head, and then wake up any threads that are waiting for the
- * iclog to be marked clean.
- *
- * The ordering of marking iclogs ACTIVE must be maintained, so an iclog
- * doesn't become ACTIVE beyond one that is SYNCING.  This is also required to
- * maintain the notion that we use a ordered wait queue to hold off would be
- * writers to the log when every iclog is trying to sync to disk.
- *
- * Caller must hold the icloglock before calling us.
- *
- * State Change: !IOERROR -> DIRTY -> ACTIVE
+ * Loop through all iclogs and mark all iclogs currently marked DIRTY as
+ * ACTIVE after iclog I/O has completed.
  */
-STATIC void
-xlog_state_clean_iclog(
+static void
+xlog_state_activate_iclogs(
 	struct xlog		*log,
-	struct xlog_in_core	*dirty_iclog)
+	int			*iclogs_changed)
 {
-	struct xlog_in_core	*iclog;
-	int			changed = 0;
-
-	/* Prepare the completed iclog. */
-	if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
-		dirty_iclog->ic_state = XLOG_STATE_DIRTY;
+	struct xlog_in_core	*iclog = log->l_iclog;
 
-	/* Walk all the iclogs to update the ordered active state. */
-	iclog = log->l_iclog;
 	do {
-		if (iclog->ic_state == XLOG_STATE_DIRTY) {
-			iclog->ic_state	= XLOG_STATE_ACTIVE;
-			iclog->ic_offset       = 0;
-			ASSERT(list_empty_careful(&iclog->ic_callbacks));
-			/*
-			 * If the number of ops in this iclog indicate it just
-			 * contains the dummy transaction, we can
-			 * change state into IDLE (the second time around).
-			 * Otherwise we should change the state into
-			 * NEED a dummy.
-			 * We don't need to cover the dummy.
-			 */
-			if (!changed &&
-			   (be32_to_cpu(iclog->ic_header.h_num_logops) ==
-			   		XLOG_COVER_OPS)) {
-				changed = 1;
-			} else {
-				/*
-				 * We have two dirty iclogs so start over
-				 * This could also be num of ops indicates
-				 * this is not the dummy going out.
-				 */
-				changed = 2;
-			}
-			iclog->ic_header.h_num_logops = 0;
-			memset(iclog->ic_header.h_cycle_data, 0,
-			      sizeof(iclog->ic_header.h_cycle_data));
-			iclog->ic_header.h_lsn = 0;
-		} else if (iclog->ic_state == XLOG_STATE_ACTIVE)
-			/* do nothing */;
-		else
-			break;	/* stop cleaning */
-		iclog = iclog->ic_next;
-	} while (iclog != log->l_iclog);
-
+		if (iclog->ic_state == XLOG_STATE_DIRTY)
+			xlog_state_activate_iclog(iclog, iclogs_changed);
+		/*
+		 * The ordering of marking iclogs ACTIVE must be maintained, so
+		 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
+		 */
+		else if (iclog->ic_state != XLOG_STATE_ACTIVE)
+			break;
+	} while ((iclog = iclog->ic_next) != log->l_iclog);
+}
 
+static int
+xlog_covered_state(
+	int			prev_state,
+	int			iclogs_changed)
+{
 	/*
-	 * Wake up threads waiting in xfs_log_force() for the dirty iclog
-	 * to be cleaned.
+	 * We usually go to NEED. But we go to NEED2 if the changed indicates we
+	 * are done writing the dummy record.  If we are done with the second
+	 * dummy recored (DONE2), then we go to IDLE.
 	 */
-	wake_up_all(&dirty_iclog->ic_force_wait);
+	switch (prev_state) {
+	case XLOG_STATE_COVER_IDLE:
+	case XLOG_STATE_COVER_NEED:
+	case XLOG_STATE_COVER_NEED2:
+		break;
+	case XLOG_STATE_COVER_DONE:
+		if (iclogs_changed == 1)
+			return XLOG_STATE_COVER_NEED2;
+		break;
+	case XLOG_STATE_COVER_DONE2:
+		if (iclogs_changed == 1)
+			return XLOG_STATE_COVER_IDLE;
+		break;
+	default:
+		ASSERT(0);
+	}
 
-	/*
-	 * Change state for the dummy log recording.
-	 * We usually go to NEED. But we go to NEED2 if the changed indicates
-	 * we are done writing the dummy record.
-	 * If we are done with the second dummy recored (DONE2), then
-	 * we go to IDLE.
-	 */
-	if (changed) {
-		switch (log->l_covered_state) {
-		case XLOG_STATE_COVER_IDLE:
-		case XLOG_STATE_COVER_NEED:
-		case XLOG_STATE_COVER_NEED2:
-			log->l_covered_state = XLOG_STATE_COVER_NEED;
-			break;
+	return XLOG_STATE_COVER_NEED;
+}
 
-		case XLOG_STATE_COVER_DONE:
-			if (changed == 1)
-				log->l_covered_state = XLOG_STATE_COVER_NEED2;
-			else
-				log->l_covered_state = XLOG_STATE_COVER_NEED;
-			break;
+STATIC void
+xlog_state_clean_iclog(
+	struct xlog		*log,
+	struct xlog_in_core	*dirty_iclog)
+{
+	int			iclogs_changed = 0;
 
-		case XLOG_STATE_COVER_DONE2:
-			if (changed == 1)
-				log->l_covered_state = XLOG_STATE_COVER_IDLE;
-			else
-				log->l_covered_state = XLOG_STATE_COVER_NEED;
-			break;
+	if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
+		dirty_iclog->ic_state = XLOG_STATE_DIRTY;
 
-		default:
-			ASSERT(0);
-		}
+	xlog_state_activate_iclogs(log, &iclogs_changed);
+	wake_up_all(&dirty_iclog->ic_force_wait);
+
+	if (iclogs_changed) {
+		log->l_covered_state = xlog_covered_state(log->l_covered_state,
+				iclogs_changed);
 	}
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 7/8] xfs: move the ioerror check out of xlog_state_clean_iclog
  2020-03-20  6:53 more log cleanups Christoph Hellwig
                   ` (5 preceding siblings ...)
  2020-03-20  6:53 ` [PATCH 6/8] xfs: refactor xlog_state_clean_iclog Christoph Hellwig
@ 2020-03-20  6:53 ` Christoph Hellwig
  2020-03-20  6:53 ` [PATCH 8/8] xfs: remove xlog_state_want_sync Christoph Hellwig
  7 siblings, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner, Brian Foster, Darrick J . Wong

Use the shutdown flag in the log to bypass xlog_state_clean_iclog
entirely in case of a shut down log.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/xfs_log.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 6facbb91b8a8..7e39835d9852 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2632,8 +2632,7 @@ xlog_state_clean_iclog(
 {
 	int			iclogs_changed = 0;
 
-	if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
-		dirty_iclog->ic_state = XLOG_STATE_DIRTY;
+	dirty_iclog->ic_state = XLOG_STATE_DIRTY;
 
 	xlog_state_activate_iclogs(log, &iclogs_changed);
 	wake_up_all(&dirty_iclog->ic_force_wait);
@@ -2838,8 +2837,10 @@ xlog_state_do_callback(
 			 */
 			cycled_icloglock = true;
 			xlog_state_do_iclog_callbacks(log, iclog);
-
-			xlog_state_clean_iclog(log, iclog);
+			if (XLOG_FORCED_SHUTDOWN(log))
+				wake_up_all(&iclog->ic_force_wait);
+			else
+				xlog_state_clean_iclog(log, iclog);
 			iclog = iclog->ic_next;
 		} while (first_iclog != iclog);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 8/8] xfs: remove xlog_state_want_sync
  2020-03-20  6:53 more log cleanups Christoph Hellwig
                   ` (6 preceding siblings ...)
  2020-03-20  6:53 ` [PATCH 7/8] xfs: move the ioerror check out of xlog_state_clean_iclog Christoph Hellwig
@ 2020-03-20  6:53 ` Christoph Hellwig
  7 siblings, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2020-03-20  6:53 UTC (permalink / raw)
  To: linux-xfs; +Cc: Dave Chinner, Brian Foster, Darrick J . Wong

Open code the xlog_state_want_sync logic in its two callers given that
this function is a trivial wrapper around xlog_state_switch_iclogs.

Move the lockdep assert into xlog_state_switch_iclogs to not lose this
debugging aid, and improve the comment that documents
xlog_state_switch_iclogs as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/xfs_log.c | 50 +++++++++++++++++-------------------------------
 1 file changed, 18 insertions(+), 32 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 7e39835d9852..2a90a483c2d6 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -62,11 +62,6 @@ xlog_state_switch_iclogs(
 	struct xlog_in_core	*iclog,
 	int			eventual_size);
 STATIC void
-xlog_state_want_sync(
-	struct xlog		*log,
-	struct xlog_in_core	*iclog);
-
-STATIC void
 xlog_grant_push_ail(
 	struct xlog		*log,
 	int			need_bytes);
@@ -938,7 +933,11 @@ xfs_log_write_unmount_record(
 	spin_lock(&log->l_icloglock);
 	iclog = log->l_iclog;
 	atomic_inc(&iclog->ic_refcnt);
-	xlog_state_want_sync(log, iclog);
+	if (iclog->ic_state == XLOG_STATE_ACTIVE)
+		xlog_state_switch_iclogs(log, iclog, 0);
+	else
+		ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+		       iclog->ic_state == XLOG_STATE_IOERROR);
 	error = xlog_state_release_iclog(log, iclog);
 	xlog_wait_on_iclog(iclog);
 
@@ -2293,7 +2292,11 @@ xlog_write_copy_finish(
 		*record_cnt = 0;
 		*data_cnt = 0;
 
-		xlog_state_want_sync(log, iclog);
+		if (iclog->ic_state == XLOG_STATE_ACTIVE)
+			xlog_state_switch_iclogs(log, iclog, 0);
+		else
+			ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+			       iclog->ic_state == XLOG_STATE_IOERROR);
 		if (!commit_iclog)
 			goto release_iclog;
 		spin_unlock(&log->l_icloglock);
@@ -3108,11 +3111,12 @@ xlog_ungrant_log_space(
 }
 
 /*
- * This routine will mark the current iclog in the ring as WANT_SYNC
- * and move the current iclog pointer to the next iclog in the ring.
- * When this routine is called from xlog_state_get_iclog_space(), the
- * exact size of the iclog has not yet been determined.  All we know is
- * that every data block.  We have run out of space in this log record.
+ * Mark the current iclog in the ring as WANT_SYNC and move the current iclog
+ * pointer to the next iclog in the ring.
+ *
+ * When called from xlog_state_get_iclog_space(), the exact size of the iclog
+ * has not yet been determined, all we know is that we have run out of space in
+ * the current iclog.
  */
 STATIC void
 xlog_state_switch_iclogs(
@@ -3121,6 +3125,8 @@ xlog_state_switch_iclogs(
 	int			eventual_size)
 {
 	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+	assert_spin_locked(&log->l_icloglock);
+
 	if (!eventual_size)
 		eventual_size = iclog->ic_offset;
 	iclog->ic_state = XLOG_STATE_WANT_SYNC;
@@ -3362,26 +3368,6 @@ xfs_log_force_lsn(
 	return ret;
 }
 
-/*
- * Called when we want to mark the current iclog as being ready to sync to
- * disk.
- */
-STATIC void
-xlog_state_want_sync(
-	struct xlog		*log,
-	struct xlog_in_core	*iclog)
-{
-	assert_spin_locked(&log->l_icloglock);
-
-	if (iclog->ic_state == XLOG_STATE_ACTIVE) {
-		xlog_state_switch_iclogs(log, iclog, 0);
-	} else {
-		ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
-		       iclog->ic_state == XLOG_STATE_IOERROR);
-	}
-}
-
-
 /*****************************************************************************
  *
  *		TICKET functions
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 6/8] xfs: refactor xlog_state_clean_iclog
  2020-03-20  6:53 ` [PATCH 6/8] xfs: refactor xlog_state_clean_iclog Christoph Hellwig
@ 2020-03-20 11:51   ` Brian Foster
  0 siblings, 0 replies; 11+ messages in thread
From: Brian Foster @ 2020-03-20 11:51 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-xfs, Dave Chinner

On Fri, Mar 20, 2020 at 07:53:09AM +0100, Christoph Hellwig wrote:
> Factor out a few self-contained helpers from xlog_state_clean_iclog, and
> update the documentation so it primarily documents why things happens
> instead of how.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---

Reviewed-by: Brian Foster <bfoster@redhat.com>

>  fs/xfs/xfs_log.c | 180 +++++++++++++++++++++++------------------------
>  1 file changed, 88 insertions(+), 92 deletions(-)
> 
> diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
> index 4f9303524efb..6facbb91b8a8 100644
> --- a/fs/xfs/xfs_log.c
> +++ b/fs/xfs/xfs_log.c
> @@ -2540,111 +2540,107 @@ xlog_write(
>   *****************************************************************************
>   */
>  
> +static void
> +xlog_state_activate_iclog(
> +	struct xlog_in_core	*iclog,
> +	int			*iclogs_changed)
> +{
> +	ASSERT(list_empty_careful(&iclog->ic_callbacks));
> +
> +	/*
> +	 * If the number of ops in this iclog indicate it just contains the
> +	 * dummy transaction, we can change state into IDLE (the second time
> +	 * around). Otherwise we should change the state into NEED a dummy.
> +	 * We don't need to cover the dummy.
> +	 */
> +	if (*iclogs_changed == 0 &&
> +	    iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
> +		*iclogs_changed = 1;
> +	} else {
> +		/*
> +		 * We have two dirty iclogs so start over.  This could also be
> +		 * num of ops indicating this is not the dummy going out.
> +		 */
> +		*iclogs_changed = 2;
> +	}
> +
> +	iclog->ic_state	= XLOG_STATE_ACTIVE;
> +	iclog->ic_offset = 0;
> +	iclog->ic_header.h_num_logops = 0;
> +	memset(iclog->ic_header.h_cycle_data, 0,
> +		sizeof(iclog->ic_header.h_cycle_data));
> +	iclog->ic_header.h_lsn = 0;
> +}
> +
>  /*
> - * An iclog has just finished IO completion processing, so we need to update
> - * the iclog state and propagate that up into the overall log state. Hence we
> - * prepare the iclog for cleaning, and then clean all the pending dirty iclogs
> - * starting from the head, and then wake up any threads that are waiting for the
> - * iclog to be marked clean.
> - *
> - * The ordering of marking iclogs ACTIVE must be maintained, so an iclog
> - * doesn't become ACTIVE beyond one that is SYNCING.  This is also required to
> - * maintain the notion that we use a ordered wait queue to hold off would be
> - * writers to the log when every iclog is trying to sync to disk.
> - *
> - * Caller must hold the icloglock before calling us.
> - *
> - * State Change: !IOERROR -> DIRTY -> ACTIVE
> + * Loop through all iclogs and mark all iclogs currently marked DIRTY as
> + * ACTIVE after iclog I/O has completed.
>   */
> -STATIC void
> -xlog_state_clean_iclog(
> +static void
> +xlog_state_activate_iclogs(
>  	struct xlog		*log,
> -	struct xlog_in_core	*dirty_iclog)
> +	int			*iclogs_changed)
>  {
> -	struct xlog_in_core	*iclog;
> -	int			changed = 0;
> -
> -	/* Prepare the completed iclog. */
> -	if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
> -		dirty_iclog->ic_state = XLOG_STATE_DIRTY;
> +	struct xlog_in_core	*iclog = log->l_iclog;
>  
> -	/* Walk all the iclogs to update the ordered active state. */
> -	iclog = log->l_iclog;
>  	do {
> -		if (iclog->ic_state == XLOG_STATE_DIRTY) {
> -			iclog->ic_state	= XLOG_STATE_ACTIVE;
> -			iclog->ic_offset       = 0;
> -			ASSERT(list_empty_careful(&iclog->ic_callbacks));
> -			/*
> -			 * If the number of ops in this iclog indicate it just
> -			 * contains the dummy transaction, we can
> -			 * change state into IDLE (the second time around).
> -			 * Otherwise we should change the state into
> -			 * NEED a dummy.
> -			 * We don't need to cover the dummy.
> -			 */
> -			if (!changed &&
> -			   (be32_to_cpu(iclog->ic_header.h_num_logops) ==
> -			   		XLOG_COVER_OPS)) {
> -				changed = 1;
> -			} else {
> -				/*
> -				 * We have two dirty iclogs so start over
> -				 * This could also be num of ops indicates
> -				 * this is not the dummy going out.
> -				 */
> -				changed = 2;
> -			}
> -			iclog->ic_header.h_num_logops = 0;
> -			memset(iclog->ic_header.h_cycle_data, 0,
> -			      sizeof(iclog->ic_header.h_cycle_data));
> -			iclog->ic_header.h_lsn = 0;
> -		} else if (iclog->ic_state == XLOG_STATE_ACTIVE)
> -			/* do nothing */;
> -		else
> -			break;	/* stop cleaning */
> -		iclog = iclog->ic_next;
> -	} while (iclog != log->l_iclog);
> -
> +		if (iclog->ic_state == XLOG_STATE_DIRTY)
> +			xlog_state_activate_iclog(iclog, iclogs_changed);
> +		/*
> +		 * The ordering of marking iclogs ACTIVE must be maintained, so
> +		 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
> +		 */
> +		else if (iclog->ic_state != XLOG_STATE_ACTIVE)
> +			break;
> +	} while ((iclog = iclog->ic_next) != log->l_iclog);
> +}
>  
> +static int
> +xlog_covered_state(
> +	int			prev_state,
> +	int			iclogs_changed)
> +{
>  	/*
> -	 * Wake up threads waiting in xfs_log_force() for the dirty iclog
> -	 * to be cleaned.
> +	 * We usually go to NEED. But we go to NEED2 if the changed indicates we
> +	 * are done writing the dummy record.  If we are done with the second
> +	 * dummy recored (DONE2), then we go to IDLE.
>  	 */
> -	wake_up_all(&dirty_iclog->ic_force_wait);
> +	switch (prev_state) {
> +	case XLOG_STATE_COVER_IDLE:
> +	case XLOG_STATE_COVER_NEED:
> +	case XLOG_STATE_COVER_NEED2:
> +		break;
> +	case XLOG_STATE_COVER_DONE:
> +		if (iclogs_changed == 1)
> +			return XLOG_STATE_COVER_NEED2;
> +		break;
> +	case XLOG_STATE_COVER_DONE2:
> +		if (iclogs_changed == 1)
> +			return XLOG_STATE_COVER_IDLE;
> +		break;
> +	default:
> +		ASSERT(0);
> +	}
>  
> -	/*
> -	 * Change state for the dummy log recording.
> -	 * We usually go to NEED. But we go to NEED2 if the changed indicates
> -	 * we are done writing the dummy record.
> -	 * If we are done with the second dummy recored (DONE2), then
> -	 * we go to IDLE.
> -	 */
> -	if (changed) {
> -		switch (log->l_covered_state) {
> -		case XLOG_STATE_COVER_IDLE:
> -		case XLOG_STATE_COVER_NEED:
> -		case XLOG_STATE_COVER_NEED2:
> -			log->l_covered_state = XLOG_STATE_COVER_NEED;
> -			break;
> +	return XLOG_STATE_COVER_NEED;
> +}
>  
> -		case XLOG_STATE_COVER_DONE:
> -			if (changed == 1)
> -				log->l_covered_state = XLOG_STATE_COVER_NEED2;
> -			else
> -				log->l_covered_state = XLOG_STATE_COVER_NEED;
> -			break;
> +STATIC void
> +xlog_state_clean_iclog(
> +	struct xlog		*log,
> +	struct xlog_in_core	*dirty_iclog)
> +{
> +	int			iclogs_changed = 0;
>  
> -		case XLOG_STATE_COVER_DONE2:
> -			if (changed == 1)
> -				log->l_covered_state = XLOG_STATE_COVER_IDLE;
> -			else
> -				log->l_covered_state = XLOG_STATE_COVER_NEED;
> -			break;
> +	if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
> +		dirty_iclog->ic_state = XLOG_STATE_DIRTY;
>  
> -		default:
> -			ASSERT(0);
> -		}
> +	xlog_state_activate_iclogs(log, &iclogs_changed);
> +	wake_up_all(&dirty_iclog->ic_force_wait);
> +
> +	if (iclogs_changed) {
> +		log->l_covered_state = xlog_covered_state(log->l_covered_state,
> +				iclogs_changed);
>  	}
>  }
>  
> -- 
> 2.25.1
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 5/8] xfs: remove the aborted parameter to xlog_state_done_syncing
  2020-03-20  6:53 ` [PATCH 5/8] xfs: remove the aborted parameter to xlog_state_done_syncing Christoph Hellwig
@ 2020-03-23 15:27   ` Darrick J. Wong
  0 siblings, 0 replies; 11+ messages in thread
From: Darrick J. Wong @ 2020-03-23 15:27 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-xfs, Dave Chinner, Brian Foster

On Fri, Mar 20, 2020 at 07:53:08AM +0100, Christoph Hellwig wrote:
> We can just check for a shut down log all the way down in
> xlog_cil_committed instead of passing the parameter.  This means a
> slight behavior change in that we now also abort log items if the
> shutdown came in halfway into the I/O completion processing, which
> actually is the right thing to do.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Brian Foster <bfoster@redhat.com>

Looks ok now that I've gotten myself straightened out :)

Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>

--D

> ---
>  fs/xfs/xfs_log.c     | 48 +++++++++++++++-----------------------------
>  fs/xfs/xfs_log.h     |  2 +-
>  fs/xfs/xfs_log_cil.c | 12 +++++------
>  3 files changed, 23 insertions(+), 39 deletions(-)
> 
> diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
> index 7af9c292540b..4f9303524efb 100644
> --- a/fs/xfs/xfs_log.c
> +++ b/fs/xfs/xfs_log.c
> @@ -47,8 +47,7 @@ xlog_dealloc_log(
>  
>  /* local state machine functions */
>  STATIC void xlog_state_done_syncing(
> -	struct xlog_in_core	*iclog,
> -	bool			aborted);
> +	struct xlog_in_core	*iclog);
>  STATIC int
>  xlog_state_get_iclog_space(
>  	struct xlog		*log,
> @@ -1254,7 +1253,6 @@ xlog_ioend_work(
>  	struct xlog_in_core     *iclog =
>  		container_of(work, struct xlog_in_core, ic_end_io_work);
>  	struct xlog		*log = iclog->ic_log;
> -	bool			aborted = false;
>  	int			error;
>  
>  	error = blk_status_to_errno(iclog->ic_bio.bi_status);
> @@ -1270,17 +1268,9 @@ xlog_ioend_work(
>  	if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
>  		xfs_alert(log->l_mp, "log I/O error %d", error);
>  		xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
> -		/*
> -		 * This flag will be propagated to the trans-committed
> -		 * callback routines to let them know that the log-commit
> -		 * didn't succeed.
> -		 */
> -		aborted = true;
> -	} else if (iclog->ic_state == XLOG_STATE_IOERROR) {
> -		aborted = true;
>  	}
>  
> -	xlog_state_done_syncing(iclog, aborted);
> +	xlog_state_done_syncing(iclog);
>  	bio_uninit(&iclog->ic_bio);
>  
>  	/*
> @@ -1759,7 +1749,7 @@ xlog_write_iclog(
>  		 * the buffer manually, the code needs to be kept in sync
>  		 * with the I/O completion path.
>  		 */
> -		xlog_state_done_syncing(iclog, true);
> +		xlog_state_done_syncing(iclog);
>  		up(&iclog->ic_sema);
>  		return;
>  	}
> @@ -2783,8 +2773,7 @@ xlog_state_iodone_process_iclog(
>  static void
>  xlog_state_do_iclog_callbacks(
>  	struct xlog		*log,
> -	struct xlog_in_core	*iclog,
> -	bool			aborted)
> +	struct xlog_in_core	*iclog)
>  		__releases(&log->l_icloglock)
>  		__acquires(&log->l_icloglock)
>  {
> @@ -2796,7 +2785,7 @@ xlog_state_do_iclog_callbacks(
>  		list_splice_init(&iclog->ic_callbacks, &tmp);
>  
>  		spin_unlock(&iclog->ic_callback_lock);
> -		xlog_cil_process_committed(&tmp, aborted);
> +		xlog_cil_process_committed(&tmp);
>  		spin_lock(&iclog->ic_callback_lock);
>  	}
>  
> @@ -2811,8 +2800,7 @@ xlog_state_do_iclog_callbacks(
>  
>  STATIC void
>  xlog_state_do_callback(
> -	struct xlog		*log,
> -	bool			aborted)
> +	struct xlog		*log)
>  {
>  	struct xlog_in_core	*iclog;
>  	struct xlog_in_core	*first_iclog;
> @@ -2853,7 +2841,7 @@ xlog_state_do_callback(
>  			 * we'll have to run at least one more complete loop.
>  			 */
>  			cycled_icloglock = true;
> -			xlog_state_do_iclog_callbacks(log, iclog, aborted);
> +			xlog_state_do_iclog_callbacks(log, iclog);
>  
>  			xlog_state_clean_iclog(log, iclog);
>  			iclog = iclog->ic_next;
> @@ -2891,25 +2879,22 @@ xlog_state_do_callback(
>   */
>  STATIC void
>  xlog_state_done_syncing(
> -	struct xlog_in_core	*iclog,
> -	bool			aborted)
> +	struct xlog_in_core	*iclog)
>  {
>  	struct xlog		*log = iclog->ic_log;
>  
>  	spin_lock(&log->l_icloglock);
> -
>  	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
>  
>  	/*
>  	 * If we got an error, either on the first buffer, or in the case of
> -	 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
> -	 * and none should ever be attempted to be written to disk
> -	 * again.
> +	 * split log writes, on the second, we shut down the file system and
> +	 * no iclogs should ever be attempted to be written to disk again.
>  	 */
> -	if (iclog->ic_state == XLOG_STATE_SYNCING)
> +	if (!XLOG_FORCED_SHUTDOWN(log)) {
> +		ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
>  		iclog->ic_state = XLOG_STATE_DONE_SYNC;
> -	else
> -		ASSERT(iclog->ic_state == XLOG_STATE_IOERROR);
> +	}
>  
>  	/*
>  	 * Someone could be sleeping prior to writing out the next
> @@ -2918,9 +2903,8 @@ xlog_state_done_syncing(
>  	 */
>  	wake_up_all(&iclog->ic_write_wait);
>  	spin_unlock(&log->l_icloglock);
> -	xlog_state_do_callback(log, aborted);	/* also cleans log */
> -}	/* xlog_state_done_syncing */
> -
> +	xlog_state_do_callback(log);	/* also cleans log */
> +}
>  
>  /*
>   * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
> @@ -3884,7 +3868,7 @@ xfs_log_force_umount(
>  	spin_lock(&log->l_cilp->xc_push_lock);
>  	wake_up_all(&log->l_cilp->xc_commit_wait);
>  	spin_unlock(&log->l_cilp->xc_push_lock);
> -	xlog_state_do_callback(log, true);
> +	xlog_state_do_callback(log);
>  
>  	/* return non-zero if log IOERROR transition had already happened */
>  	return retval;
> diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
> index b38602216c5a..cc77cc36560a 100644
> --- a/fs/xfs/xfs_log.h
> +++ b/fs/xfs/xfs_log.h
> @@ -137,7 +137,7 @@ void	  xfs_log_ticket_put(struct xlog_ticket *ticket);
>  
>  void	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
>  				xfs_lsn_t *commit_lsn, bool regrant);
> -void	xlog_cil_process_committed(struct list_head *list, bool aborted);
> +void	xlog_cil_process_committed(struct list_head *list);
>  bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
>  
>  void	xfs_log_work_queue(struct xfs_mount *mp);
> diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
> index 278166811c80..64cc0bf2ab3b 100644
> --- a/fs/xfs/xfs_log_cil.c
> +++ b/fs/xfs/xfs_log_cil.c
> @@ -574,10 +574,10 @@ xlog_discard_busy_extents(
>   */
>  static void
>  xlog_cil_committed(
> -	struct xfs_cil_ctx	*ctx,
> -	bool			abort)
> +	struct xfs_cil_ctx	*ctx)
>  {
>  	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
> +	bool			abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log);
>  
>  	/*
>  	 * If the I/O failed, we're aborting the commit and already shutdown.
> @@ -613,15 +613,14 @@ xlog_cil_committed(
>  
>  void
>  xlog_cil_process_committed(
> -	struct list_head	*list,
> -	bool			aborted)
> +	struct list_head	*list)
>  {
>  	struct xfs_cil_ctx	*ctx;
>  
>  	while ((ctx = list_first_entry_or_null(list,
>  			struct xfs_cil_ctx, iclog_entry))) {
>  		list_del(&ctx->iclog_entry);
> -		xlog_cil_committed(ctx, aborted);
> +		xlog_cil_committed(ctx);
>  	}
>  }
>  
> @@ -878,7 +877,8 @@ xlog_cil_push_work(
>  out_abort_free_ticket:
>  	xfs_log_ticket_put(tic);
>  out_abort:
> -	xlog_cil_committed(ctx, true);
> +	ASSERT(XLOG_FORCED_SHUTDOWN(log));
> +	xlog_cil_committed(ctx);
>  }
>  
>  /*
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2020-03-23 15:27 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-20  6:53 more log cleanups Christoph Hellwig
2020-03-20  6:53 ` [PATCH 1/8] xfs: merge xlog_cil_push into xlog_cil_push_work Christoph Hellwig
2020-03-20  6:53 ` [PATCH 2/8] xfs: factor out a xlog_wait_on_iclog helper Christoph Hellwig
2020-03-20  6:53 ` [PATCH 3/8] xfs: simplify the xfs_log_release_iclog calling convention Christoph Hellwig
2020-03-20  6:53 ` [PATCH 4/8] xfs: simplify log shutdown checking in xfs_log_release_iclog Christoph Hellwig
2020-03-20  6:53 ` [PATCH 5/8] xfs: remove the aborted parameter to xlog_state_done_syncing Christoph Hellwig
2020-03-23 15:27   ` Darrick J. Wong
2020-03-20  6:53 ` [PATCH 6/8] xfs: refactor xlog_state_clean_iclog Christoph Hellwig
2020-03-20 11:51   ` Brian Foster
2020-03-20  6:53 ` [PATCH 7/8] xfs: move the ioerror check out of xlog_state_clean_iclog Christoph Hellwig
2020-03-20  6:53 ` [PATCH 8/8] xfs: remove xlog_state_want_sync Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.