All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Darrick J. Wong" <djwong@kernel.org>
To: djwong@kernel.org
Cc: linux-xfs@vger.kernel.org
Subject: [PATCH 10/11] xfs: parallelize inode inactivation
Date: Wed, 10 Mar 2021 19:06:36 -0800	[thread overview]
Message-ID: <161543199635.1947934.2885924822578773349.stgit@magnolia> (raw)
In-Reply-To: <161543194009.1947934.9910987247994410125.stgit@magnolia>

From: Darrick J. Wong <djwong@kernel.org>

Split the inode inactivation work into per-AG work items so that we can
take advantage of parallelization.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
---
 fs/xfs/xfs_icache.c |   62 ++++++++++++++++++++++++++++++++++++++-------------
 fs/xfs/xfs_mount.c  |    3 ++
 fs/xfs/xfs_mount.h  |    4 ++-
 fs/xfs/xfs_super.c  |    1 -
 4 files changed, 52 insertions(+), 18 deletions(-)


diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 594d340bbe37..d5f580b92e48 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -245,11 +245,13 @@ xfs_inode_clear_reclaim_tag(
 /* Queue a new inode gc pass if there are inodes needing inactivation. */
 static void
 xfs_inodegc_queue(
-	struct xfs_mount        *mp)
+	struct xfs_perag	*pag)
 {
+	struct xfs_mount	*mp = pag->pag_mount;
+
 	rcu_read_lock();
-	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_INACTIVE_TAG))
-		queue_delayed_work(mp->m_gc_workqueue, &mp->m_inodegc_work,
+	if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_INACTIVE_TAG))
+		queue_delayed_work(mp->m_gc_workqueue, &pag->pag_inodegc_work,
 				msecs_to_jiffies(xfs_inodegc_centisecs * 10));
 	rcu_read_unlock();
 }
@@ -272,7 +274,7 @@ xfs_perag_set_inactive_tag(
 	spin_unlock(&mp->m_perag_lock);
 
 	/* schedule periodic background inode inactivation */
-	xfs_inodegc_queue(mp);
+	xfs_inodegc_queue(pag);
 
 	trace_xfs_perag_set_inactive(mp, pag->pag_agno, -1, _RET_IP_);
 }
@@ -2074,8 +2076,9 @@ void
 xfs_inodegc_worker(
 	struct work_struct	*work)
 {
-	struct xfs_mount	*mp = container_of(to_delayed_work(work),
-					struct xfs_mount, m_inodegc_work);
+	struct xfs_perag	*pag = container_of(to_delayed_work(work),
+					struct xfs_perag, pag_inodegc_work);
+	struct xfs_mount	*mp = pag->pag_mount;
 	int			error;
 
 	/*
@@ -2095,25 +2098,44 @@ xfs_inodegc_worker(
 		xfs_err(mp, "inode inactivation failed, error %d", error);
 
 	sb_end_write(mp->m_super);
-	xfs_inodegc_queue(mp);
+	xfs_inodegc_queue(pag);
 }
 
-/* Force all queued inode inactivation work to run immediately. */
-void
-xfs_inodegc_force(
-	struct xfs_mount	*mp)
+/* Garbage collect all inactive inodes in an AG immediately. */
+static inline bool
+xfs_inodegc_force_pag(
+	struct xfs_perag	*pag)
 {
+	struct xfs_mount	*mp = pag->pag_mount;
+
 	/*
 	 * In order to reset the delay timer to run immediately, we have to
 	 * cancel the work item and requeue it with a zero timer value.  We
 	 * don't care if the worker races with our requeue, because at worst
 	 * we iterate the radix tree and find no inodes to inactivate.
 	 */
-	if (!cancel_delayed_work(&mp->m_inodegc_work))
+	if (!cancel_delayed_work(&pag->pag_inodegc_work))
+		return false;
+
+	queue_delayed_work(mp->m_gc_workqueue, &pag->pag_inodegc_work, 0);
+	return true;
+}
+
+/* Force all queued inode inactivation work to run immediately. */
+void
+xfs_inodegc_force(
+	struct xfs_mount	*mp)
+{
+	struct xfs_perag	*pag;
+	xfs_agnumber_t		agno;
+	bool			queued = false;
+
+	for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG)
+		queued |= xfs_inodegc_force_pag(pag);
+	if (!queued)
 		return;
 
-	queue_delayed_work(mp->m_gc_workqueue, &mp->m_inodegc_work, 0);
-	flush_delayed_work(&mp->m_inodegc_work);
+	flush_workqueue(mp->m_gc_workqueue);
 }
 
 /* Stop all queued inactivation work. */
@@ -2121,7 +2143,11 @@ void
 xfs_inodegc_stop(
 	struct xfs_mount	*mp)
 {
-	cancel_delayed_work_sync(&mp->m_inodegc_work);
+	struct xfs_perag	*pag;
+	xfs_agnumber_t		agno;
+
+	for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG)
+		cancel_delayed_work_sync(&pag->pag_inodegc_work);
 }
 
 /* Schedule deferred inode inactivation work. */
@@ -2129,5 +2155,9 @@ void
 xfs_inodegc_start(
 	struct xfs_mount	*mp)
 {
-	xfs_inodegc_queue(mp);
+	struct xfs_perag	*pag;
+	xfs_agnumber_t		agno;
+
+	for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG)
+		xfs_inodegc_queue(pag);
 }
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index cd015e3d72fc..a5963061485c 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -127,6 +127,7 @@ __xfs_free_perag(
 	struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
 
 	ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
+	ASSERT(!delayed_work_pending(&pag->pag_inodegc_work));
 	ASSERT(atomic_read(&pag->pag_ref) == 0);
 	kmem_free(pag);
 }
@@ -148,6 +149,7 @@ xfs_free_perag(
 		ASSERT(pag);
 		ASSERT(atomic_read(&pag->pag_ref) == 0);
 		cancel_delayed_work_sync(&pag->pag_blockgc_work);
+		cancel_delayed_work_sync(&pag->pag_inodegc_work);
 		xfs_iunlink_destroy(pag);
 		xfs_buf_hash_destroy(pag);
 		call_rcu(&pag->rcu_head, __xfs_free_perag);
@@ -204,6 +206,7 @@ xfs_initialize_perag(
 		pag->pag_mount = mp;
 		spin_lock_init(&pag->pag_ici_lock);
 		INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
+		INIT_DELAYED_WORK(&pag->pag_inodegc_work, xfs_inodegc_worker);
 		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
 
 		error = xfs_buf_hash_init(pag);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index ce00ad47b8ea..835c07d00cd7 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -177,7 +177,6 @@ typedef struct xfs_mount {
 	uint64_t		m_resblks_avail;/* available reserved blocks */
 	uint64_t		m_resblks_save;	/* reserved blks @ remount,ro */
 	struct delayed_work	m_reclaim_work;	/* background inode reclaim */
-	struct delayed_work	m_inodegc_work; /* background inode inactive */
 	struct xfs_kobj		m_kobj;
 	struct xfs_kobj		m_error_kobj;
 	struct xfs_kobj		m_error_meta_kobj;
@@ -370,6 +369,9 @@ typedef struct xfs_perag {
 	/* background prealloc block trimming */
 	struct delayed_work	pag_blockgc_work;
 
+	/* background inode inactivation */
+	struct delayed_work	pag_inodegc_work;
+
 	/* reference count */
 	uint8_t			pagf_refcount_level;
 
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 8d0142487fc7..566e5657c1b0 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1879,7 +1879,6 @@ static int xfs_init_fs_context(
 	mutex_init(&mp->m_growlock);
 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
-	INIT_DELAYED_WORK(&mp->m_inodegc_work, xfs_inodegc_worker);
 	mp->m_kobj.kobject.kset = xfs_kset;
 	/*
 	 * We don't create the finobt per-ag space reservation until after log


  parent reply	other threads:[~2021-03-11  3:07 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-11  3:05 [PATCHSET v3 00/11] xfs: deferred inode inactivation Darrick J. Wong
2021-03-11  3:05 ` [PATCH 01/11] xfs: prevent metadata files from being inactivated Darrick J. Wong
2021-03-11 13:05   ` Christoph Hellwig
2021-03-22 23:13   ` Dave Chinner
2021-03-11  3:05 ` [PATCH 02/11] xfs: refactor the predicate part of xfs_free_eofblocks Darrick J. Wong
2021-03-11 13:09   ` Christoph Hellwig
2021-03-15 18:46   ` Christoph Hellwig
2021-03-18  4:33     ` Darrick J. Wong
2021-03-19  1:48       ` Darrick J. Wong
2021-03-11  3:05 ` [PATCH 03/11] xfs: don't reclaim dquots with incore reservations Darrick J. Wong
2021-03-15 18:29   ` Christoph Hellwig
2021-03-22 23:31   ` Dave Chinner
2021-03-23  0:01     ` Darrick J. Wong
2021-03-23  1:48       ` Dave Chinner
2021-03-11  3:06 ` [PATCH 04/11] xfs: decide if inode needs inactivation Darrick J. Wong
2021-03-15 18:47   ` Christoph Hellwig
2021-03-15 19:06     ` Darrick J. Wong
2021-03-11  3:06 ` [PATCH 05/11] xfs: rename the blockgc workqueue Darrick J. Wong
2021-03-15 18:49   ` Christoph Hellwig
2021-03-11  3:06 ` [PATCH 06/11] xfs: deferred inode inactivation Darrick J. Wong
2021-03-16  7:27   ` Christoph Hellwig
2021-03-16 15:47     ` Darrick J. Wong
2021-03-17 15:21       ` Christoph Hellwig
2021-03-17 15:49         ` Darrick J. Wong
2021-03-22 23:46           ` Dave Chinner
2021-03-22 23:37       ` Dave Chinner
2021-03-23  0:24         ` Darrick J. Wong
2021-03-23  1:44   ` Dave Chinner
2021-03-23  4:00     ` Darrick J. Wong
2021-03-23  5:19       ` Dave Chinner
2021-03-24  2:04         ` Darrick J. Wong
2021-03-24  4:57           ` Dave Chinner
2021-03-25  4:20             ` Darrick J. Wong
2021-03-24 17:53       ` Christoph Hellwig
2021-03-25  4:26         ` Darrick J. Wong
2021-03-11  3:06 ` [PATCH 07/11] xfs: expose sysfs knob to control inode inactivation delay Darrick J. Wong
2021-03-11  3:06 ` [PATCH 08/11] xfs: force inode inactivation and retry fs writes when there isn't space Darrick J. Wong
2021-03-15 18:54   ` Christoph Hellwig
2021-03-15 19:06     ` Darrick J. Wong
2021-03-11  3:06 ` [PATCH 09/11] xfs: force inode garbage collection before fallocate when space is low Darrick J. Wong
2021-03-11  3:06 ` Darrick J. Wong [this message]
2021-03-15 18:55   ` [PATCH 10/11] xfs: parallelize inode inactivation Christoph Hellwig
2021-03-15 19:03     ` Darrick J. Wong
2021-03-23 22:21   ` Dave Chinner
2021-03-24  3:52     ` Darrick J. Wong
2021-03-11  3:06 ` [PATCH 11/11] xfs: create a polled function to force " Darrick J. Wong
2021-03-23 22:31   ` Dave Chinner
2021-03-24  3:34     ` Darrick J. Wong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=161543199635.1947934.2885924822578773349.stgit@magnolia \
    --to=djwong@kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.