All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Darrick J. Wong" <djwong@kernel.org>
To: Dave Chinner <david@fromorbit.com>
Cc: linux-xfs@vger.kernel.org
Subject: Re: [PATCH 2/6] xfs: break up xfs_buf_find() into individual pieces
Date: Wed, 29 Jun 2022 14:50:59 -0700	[thread overview]
Message-ID: <YrzJQwfTbtkiR1K4@magnolia> (raw)
In-Reply-To: <20220627060841.244226-3-david@fromorbit.com>

On Mon, Jun 27, 2022 at 04:08:37PM +1000, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> xfs_buf_find() is made up of three main parts: lookup, insert and
> locking. The interactions with xfs_buf_get_map() require it to be
> called twice - once for a pure lookup, and again on lookup failure
> so the insert path can be run. We want to simplify this down a lot,
> so split it into a fast path lookup, a slow path insert and a "lock
> the found buffer" helper. This will then let use integrate these
> operations more effectively into xfs_buf_get_map() in future
> patches.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>
> ---
>  fs/xfs/xfs_buf.c | 159 +++++++++++++++++++++++++++++++----------------
>  1 file changed, 105 insertions(+), 54 deletions(-)
> 
> diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> index 143e1c70df5d..95d4b428aec0 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -503,77 +503,60 @@ xfs_buf_hash_destroy(
>  	rhashtable_destroy(&pag->pag_buf_hash);
>  }
>  
> -/*
> - * Look up a buffer in the buffer cache and return it referenced and locked
> - * in @found_bp.
> - *
> - * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
> - * cache.
> - *
> - * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
> - * -EAGAIN if we fail to lock it.
> - *
> - * Return values are:
> - *	-EFSCORRUPTED if have been supplied with an invalid address
> - *	-EAGAIN on trylock failure
> - *	-ENOENT if we fail to find a match and @new_bp was NULL
> - *	0, with @found_bp:
> - *		- @new_bp if we inserted it into the cache
> - *		- the buffer we found and locked.
> - */
>  static int
> -xfs_buf_find(
> +xfs_buf_find_verify(

Isn't this more of a xfs_buf_map verifier?  Why not call it
xfs_buf_map_verify()?

--D

>  	struct xfs_buftarg	*btp,
> -	struct xfs_buf_map	*map,
> -	int			nmaps,
> -	xfs_buf_flags_t		flags,
> -	struct xfs_buf		*new_bp,
> -	struct xfs_buf		**found_bp)
> +	struct xfs_buf_map	*map)
>  {
> -	struct xfs_perag	*pag;
> -	struct xfs_buf		*bp;
> -	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
>  	xfs_daddr_t		eofs;
> -	int			i;
> -
> -	*found_bp = NULL;
> -
> -	for (i = 0; i < nmaps; i++)
> -		cmap.bm_len += map[i].bm_len;
>  
>  	/* Check for IOs smaller than the sector size / not sector aligned */
> -	ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
> -	ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
> +	ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
> +	ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
>  
>  	/*
>  	 * Corrupted block numbers can get through to here, unfortunately, so we
>  	 * have to check that the buffer falls within the filesystem bounds.
>  	 */
>  	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
> -	if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
> +	if (map->bm_bn < 0 || map->bm_bn >= eofs) {
>  		xfs_alert(btp->bt_mount,
>  			  "%s: daddr 0x%llx out of range, EOFS 0x%llx",
> -			  __func__, cmap.bm_bn, eofs);
> +			  __func__, map->bm_bn, eofs);
>  		WARN_ON(1);
>  		return -EFSCORRUPTED;
>  	}
> +	return 0;
> +}
>  
> -	pag = xfs_perag_get(btp->bt_mount,
> -			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
> +static inline struct xfs_buf *
> +xfs_buf_find_fast(
> +	struct xfs_perag	*pag,
> +	struct xfs_buf_map	*map)
> +{
> +	struct xfs_buf          *bp;
>  
> -	spin_lock(&pag->pag_buf_lock);
> -	bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
> -				    xfs_buf_hash_params);
> -	if (bp) {
> -		atomic_inc(&bp->b_hold);
> -		goto found;
> -	}
> +	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
> +	if (!bp)
> +		return NULL;
> +	atomic_inc(&bp->b_hold);
> +	return bp;
> +}
>  
> +/*
> + * Insert the new_bp into the hash table. This consumes the perag reference
> + * taken for the lookup.
> + */
> +static int
> +xfs_buf_find_insert(
> +	struct xfs_buftarg	*btp,
> +	struct xfs_perag	*pag,
> +	struct xfs_buf		*new_bp)
> +{
>  	/* No match found */
>  	if (!new_bp) {
> -		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
> -		spin_unlock(&pag->pag_buf_lock);
>  		xfs_perag_put(pag);
> +		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
>  		return -ENOENT;
>  	}
>  
> @@ -581,14 +564,15 @@ xfs_buf_find(
>  	new_bp->b_pag = pag;
>  	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
>  			       xfs_buf_hash_params);
> -	spin_unlock(&pag->pag_buf_lock);
> -	*found_bp = new_bp;
>  	return 0;
> +}
>  
> -found:
> -	spin_unlock(&pag->pag_buf_lock);
> -	xfs_perag_put(pag);
> -
> +static int
> +xfs_buf_find_lock(
> +	struct xfs_buftarg	*btp,
> +	struct xfs_buf          *bp,
> +	xfs_buf_flags_t		flags)
> +{
>  	if (!xfs_buf_trylock(bp)) {
>  		if (flags & XBF_TRYLOCK) {
>  			xfs_buf_rele(bp);
> @@ -609,6 +593,73 @@ xfs_buf_find(
>  		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
>  		bp->b_ops = NULL;
>  	}
> +	return 0;
> +}
> +
> +/*
> + * Look up a buffer in the buffer cache and return it referenced and locked
> + * in @found_bp.
> + *
> + * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
> + * cache.
> + *
> + * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
> + * -EAGAIN if we fail to lock it.
> + *
> + * Return values are:
> + *	-EFSCORRUPTED if have been supplied with an invalid address
> + *	-EAGAIN on trylock failure
> + *	-ENOENT if we fail to find a match and @new_bp was NULL
> + *	0, with @found_bp:
> + *		- @new_bp if we inserted it into the cache
> + *		- the buffer we found and locked.
> + */
> +static int
> +xfs_buf_find(
> +	struct xfs_buftarg	*btp,
> +	struct xfs_buf_map	*map,
> +	int			nmaps,
> +	xfs_buf_flags_t		flags,
> +	struct xfs_buf		*new_bp,
> +	struct xfs_buf		**found_bp)
> +{
> +	struct xfs_perag	*pag;
> +	struct xfs_buf		*bp;
> +	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
> +	int			error;
> +	int			i;
> +
> +	*found_bp = NULL;
> +
> +	for (i = 0; i < nmaps; i++)
> +		cmap.bm_len += map[i].bm_len;
> +
> +	error = xfs_buf_find_verify(btp, &cmap);
> +	if (error)
> +		return error;
> +
> +	pag = xfs_perag_get(btp->bt_mount,
> +			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
> +
> +	spin_lock(&pag->pag_buf_lock);
> +	bp = xfs_buf_find_fast(pag, &cmap);
> +	if (bp)
> +		goto found;
> +
> +	error = xfs_buf_find_insert(btp, pag, new_bp);
> +	spin_unlock(&pag->pag_buf_lock);
> +	if (error)
> +		return error;
> +	*found_bp = new_bp;
> +	return 0;
> +
> +found:
> +	spin_unlock(&pag->pag_buf_lock);
> +	xfs_perag_put(pag);
> +
> +	error = xfs_buf_find_lock(btp, bp, flags);
> +	if (error)
> +		return error;
>  
>  	trace_xfs_buf_find(bp, flags, _RET_IP_);
>  	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
> -- 
> 2.36.1
> 

  parent reply	other threads:[~2022-06-29 21:51 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-27  6:08 [PATCH 0/6 v2] xfs: lockless buffer lookups Dave Chinner
2022-06-27  6:08 ` [PATCH 1/6] xfs: rework xfs_buf_incore() API Dave Chinner
2022-06-29  7:30   ` Christoph Hellwig
2022-06-29 21:24   ` Darrick J. Wong
2022-06-27  6:08 ` [PATCH 2/6] xfs: break up xfs_buf_find() into individual pieces Dave Chinner
2022-06-28  2:22   ` Chris Dunlop
2022-06-29  7:35   ` Christoph Hellwig
2022-06-29 21:50   ` Darrick J. Wong [this message]
2022-06-27  6:08 ` [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map() Dave Chinner
2022-06-29  7:40   ` Christoph Hellwig
2022-06-29 22:06     ` Darrick J. Wong
2022-07-07 12:39       ` Dave Chinner
2022-06-27  6:08 ` [PATCH 4/6] xfs: reduce the number of atomic when locking a buffer after lookup Dave Chinner
2022-06-29 22:00   ` Darrick J. Wong
2022-06-27  6:08 ` [PATCH 5/6] xfs: remove a superflous hash lookup when inserting new buffers Dave Chinner
2022-06-29  7:40   ` Christoph Hellwig
2022-06-29 22:01   ` Darrick J. Wong
2022-06-27  6:08 ` [PATCH 6/6] xfs: lockless buffer lookup Dave Chinner
2022-06-29  7:41   ` Christoph Hellwig
2022-06-29 22:04   ` Darrick J. Wong
2022-07-07 12:36     ` Dave Chinner
2022-07-07 17:55       ` Darrick J. Wong
2022-07-11  5:16       ` Christoph Hellwig
2022-07-07  2:40 ` [PATCH 0/6 v2] xfs: lockless buffer lookups Darrick J. Wong
2022-07-07 23:52 [PATCH 0/6 v3] " Dave Chinner
2022-07-07 23:52 ` [PATCH 2/6] xfs: break up xfs_buf_find() into individual pieces Dave Chinner
2022-07-09 22:58   ` Darrick J. Wong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YrzJQwfTbtkiR1K4@magnolia \
    --to=djwong@kernel.org \
    --cc=david@fromorbit.com \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.