linux-xfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Darrick J. Wong" <djwong@kernel.org>
To: Dave Chinner <david@fromorbit.com>
Cc: linux-xfs@vger.kernel.org, hch@infradead.org
Subject: Re: [PATCH, pre-03/20 #2] xfs: introduce all-mounts list for cpu hotplug notifications
Date: Wed, 4 Aug 2021 09:06:01 -0700	[thread overview]
Message-ID: <20210804160601.GO3601466@magnolia> (raw)
In-Reply-To: <20210804115051.GO2757197@dread.disaster.area>

On Wed, Aug 04, 2021 at 09:50:51PM +1000, Dave Chinner wrote:
> 
> From: Dave Chinner <dchinner@redhat.com>
> 
> The inode inactivation and CIL tracking percpu structures are
> per-xfs_mount structures. That means when we get a CPU dead
> notification, we need to then iterate all the per-cpu structure
> instances to process them. Rather than keeping linked lists of
> per-cpu structures in each subsystem, add a list of all xfs_mounts
> that the generic xfs_cpu_dead() function will iterate and call into
> each subsystem appropriately.
> 
> This allows us to handle both per-mount and global XFS percpu state
> from xfs_cpu_dead(), and avoids the need to link subsystem
> structures that can be easily found from the xfs_mount into their
> own global lists.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>
> ---
>  fs/xfs/xfs_mount.h |  1 +
>  fs/xfs/xfs_super.c | 41 +++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 42 insertions(+)
> 
> diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
> index c78b63fe779a..ed7064596f94 100644
> --- a/fs/xfs/xfs_mount.h
> +++ b/fs/xfs/xfs_mount.h
> @@ -82,6 +82,7 @@ typedef struct xfs_mount {
>  	xfs_buftarg_t		*m_ddev_targp;	/* saves taking the address */
>  	xfs_buftarg_t		*m_logdev_targp;/* ptr to log device */
>  	xfs_buftarg_t		*m_rtdev_targp;	/* ptr to rt device */
> +	struct list_head	m_mount_list;	/* global mount list */
>  	/*
>  	 * Optional cache of rt summary level per bitmap block with the
>  	 * invariant that m_rsum_cache[bbno] <= the minimum i for which
> diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
> index ffe1ecd048db..c27df85212d4 100644
> --- a/fs/xfs/xfs_super.c
> +++ b/fs/xfs/xfs_super.c
> @@ -49,6 +49,28 @@ static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
>  static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
>  #endif
>  
> +#ifdef CONFIG_HOTPLUG_CPU
> +static LIST_HEAD(xfs_mount_list);
> +static DEFINE_SPINLOCK(xfs_mount_list_lock);
> +
> +static inline void xfs_mount_list_add(struct xfs_mount *mp)
> +{
> +	spin_lock(&xfs_mount_list_lock);
> +	list_add(&mp->m_mount_list, &xfs_mount_list);
> +	spin_unlock(&xfs_mount_list_lock);
> +}
> +
> +static inline void xfs_mount_list_del(struct xfs_mount *mp)
> +{
> +	spin_lock(&xfs_mount_list_lock);
> +	list_del(&mp->m_mount_list);
> +	spin_unlock(&xfs_mount_list_lock);
> +}
> +#else /* !CONFIG_HOTPLUG_CPU */
> +static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
> +static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
> +#endif
> +
>  enum xfs_dax_mode {
>  	XFS_DAX_INODE = 0,
>  	XFS_DAX_ALWAYS = 1,
> @@ -988,6 +1010,7 @@ xfs_fs_put_super(
>  
>  	xfs_freesb(mp);
>  	free_percpu(mp->m_stats.xs_stats);
> +	xfs_mount_list_del(mp);
>  	xfs_destroy_percpu_counters(mp);
>  	xfs_destroy_mount_workqueues(mp);
>  	xfs_close_devices(mp);
> @@ -1359,6 +1382,8 @@ xfs_fs_fill_super(
>  	if (error)
>  		goto out_destroy_workqueues;
>  
> +	xfs_mount_list_add(mp);
> +
>  	/* Allocate stats memory before we do operations that might use it */
>  	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
>  	if (!mp->m_stats.xs_stats) {
> @@ -1567,6 +1592,7 @@ xfs_fs_fill_super(
>   out_free_stats:
>  	free_percpu(mp->m_stats.xs_stats);
>   out_destroy_counters:
> +	xfs_mount_list_del(mp);
>  	xfs_destroy_percpu_counters(mp);
>   out_destroy_workqueues:
>  	xfs_destroy_mount_workqueues(mp);
> @@ -2061,10 +2087,20 @@ xfs_destroy_workqueues(void)
>  	destroy_workqueue(xfs_alloc_wq);
>  }
>  
> +#ifdef CONFIG_HOTPLUG_CPU
>  static int
>  xfs_cpu_dead(
>  	unsigned int		cpu)
>  {
> +	struct xfs_mount	*mp, *n;
> +
> +	spin_lock(&xfs_mount_list_lock);
> +	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
> +		spin_unlock(&xfs_mount_list_lock);
> +		/* xfs_subsys_dead(mp, cpu); */
> +		spin_lock(&xfs_mount_list_lock);
> +	}
> +	spin_unlock(&xfs_mount_list_lock);
>  	return 0;
>  }
>  
> @@ -2090,6 +2126,11 @@ xfs_cpu_hotplug_destroy(void)
>  	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
>  }
>  
> +#else /* !CONFIG_HOTPLUG_CPU */
> +static inline int xfs_cpu_hotplug_init(struct xfs_cil *cil) { return 0; }
> +static inline void xfs_cpu_hotplug_destroy(struct xfs_cil *cil) {}

void arguments here, right?

> +#endif

Nit: I think this ifdef stuff belongs in the previous patch.  Will fix
it when I drag this into my tree.

--D

> +
>  STATIC int __init
>  init_xfs_fs(void)
>  {

  reply	other threads:[~2021-08-04 16:06 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-29 18:43 [PATCHSET v8 00/20] xfs: deferred inode inactivation Darrick J. Wong
2021-07-29 18:43 ` [PATCH 01/20] xfs: move xfs_inactive call to xfs_inode_mark_reclaimable Darrick J. Wong
2021-07-29 18:44 ` [PATCH 02/20] xfs: detach dquots from inode if we don't need to inactivate it Darrick J. Wong
2021-07-29 18:44 ` [PATCH 03/20] xfs: defer inode inactivation to a workqueue Darrick J. Wong
2021-07-30  4:24   ` Dave Chinner
2021-07-31  4:21     ` Darrick J. Wong
2021-08-01 21:49       ` Dave Chinner
2021-08-01 23:47         ` Dave Chinner
2021-08-03  8:34   ` [PATCH, alternative] xfs: per-cpu deferred inode inactivation queues Dave Chinner
2021-08-03 20:20     ` Darrick J. Wong
2021-08-04  3:20     ` [PATCH, alternative v2] " Darrick J. Wong
2021-08-04 10:03       ` [PATCH] xfs: inodegc needs to stop before freeze Dave Chinner
2021-08-04 12:37         ` Dave Chinner
2021-08-04 10:46       ` [PATCH] xfs: don't run inodegc flushes when inodegc is not active Dave Chinner
2021-08-04 16:20         ` Darrick J. Wong
2021-08-04 11:09       ` [PATCH, alternative v2] xfs: per-cpu deferred inode inactivation queues Dave Chinner
2021-08-04 15:59         ` Darrick J. Wong
2021-08-04 21:35           ` Dave Chinner
2021-08-04 11:49       ` [PATCH, pre-03/20 #1] xfs: introduce CPU hotplug infrastructure Dave Chinner
2021-08-04 11:50       ` [PATCH, pre-03/20 #2] xfs: introduce all-mounts list for cpu hotplug notifications Dave Chinner
2021-08-04 16:06         ` Darrick J. Wong [this message]
2021-08-04 21:17           ` Dave Chinner
2021-08-04 11:52       ` [PATCH, post-03/20 1/1] xfs: hook up inodegc to CPU dead notification Dave Chinner
2021-08-04 16:19         ` Darrick J. Wong
2021-08-04 21:48           ` Dave Chinner
2021-07-29 18:44 ` [PATCH 04/20] xfs: throttle inode inactivation queuing on memory reclaim Darrick J. Wong
2021-07-29 18:44 ` [PATCH 05/20] xfs: don't throttle memory reclaim trying to queue inactive inodes Darrick J. Wong
2021-07-29 18:44 ` [PATCH 06/20] xfs: throttle inodegc queuing on backlog Darrick J. Wong
2021-08-02  0:45   ` Dave Chinner
2021-08-02  1:30     ` Dave Chinner
2021-07-29 18:44 ` [PATCH 07/20] xfs: queue inodegc worker immediately when memory is tight Darrick J. Wong
2021-07-29 18:44 ` [PATCH 08/20] xfs: expose sysfs knob to control inode inactivation delay Darrick J. Wong
2021-07-29 18:44 ` [PATCH 09/20] xfs: reduce inactivation delay when free space is tight Darrick J. Wong
2021-07-29 18:44 ` [PATCH 10/20] xfs: reduce inactivation delay when quota are tight Darrick J. Wong
2021-07-29 18:44 ` [PATCH 11/20] xfs: reduce inactivation delay when realtime extents " Darrick J. Wong
2021-07-29 18:44 ` [PATCH 12/20] xfs: inactivate inodes any time we try to free speculative preallocations Darrick J. Wong
2021-07-29 18:45 ` [PATCH 13/20] xfs: flush inode inactivation work when compiling usage statistics Darrick J. Wong
2021-07-29 18:45 ` [PATCH 14/20] xfs: parallelize inode inactivation Darrick J. Wong
2021-08-02  0:55   ` Dave Chinner
2021-08-02 21:33     ` Darrick J. Wong
2021-07-29 18:45 ` [PATCH 15/20] xfs: reduce inactivation delay when AG free space are tight Darrick J. Wong
2021-07-29 18:45 ` [PATCH 16/20] xfs: queue inodegc worker immediately on backlog Darrick J. Wong
2021-07-29 18:45 ` [PATCH 17/20] xfs: don't run speculative preallocation gc when fs is frozen Darrick J. Wong
2021-07-29 18:45 ` [PATCH 18/20] xfs: scale speculative preallocation gc delay based on free space Darrick J. Wong
2021-07-29 18:45 ` [PATCH 19/20] xfs: use background worker pool when transactions can't get " Darrick J. Wong
2021-07-29 18:45 ` [PATCH 20/20] xfs: avoid buffer deadlocks when walking fs inodes Darrick J. Wong
2021-08-02 10:35 ` [PATCHSET v8 00/20] xfs: deferred inode inactivation Dave Chinner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210804160601.GO3601466@magnolia \
    --to=djwong@kernel.org \
    --cc=david@fromorbit.com \
    --cc=hch@infradead.org \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).