All of lore.kernel.org
 help / color / mirror / Atom feed
* Re: [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
@ 2022-06-27 22:09 kernel test robot
  0 siblings, 0 replies; 9+ messages in thread
From: kernel test robot @ 2022-06-27 22:09 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 9521 bytes --]

CC: kbuild-all(a)lists.01.org
BCC: lkp(a)intel.com
In-Reply-To: <20220627060841.244226-4-david@fromorbit.com>
References: <20220627060841.244226-4-david@fromorbit.com>
TO: Dave Chinner <david@fromorbit.com>
TO: linux-xfs(a)vger.kernel.org

Hi Dave,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on xfs-linux/for-next]
[also build test WARNING on linus/master v5.19-rc4 next-20220627]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/intel-lab-lkp/linux/commits/Dave-Chinner/xfs-lockless-buffer-lookups/20220627-141053
base:   https://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git for-next
:::::: branch date: 16 hours ago
:::::: commit date: 16 hours ago
config: x86_64-randconfig-m001-20220627 (https://download.01.org/0day-ci/archive/20220628/202206280549.pN2aOPuA-lkp(a)intel.com/config)
compiler: gcc-11 (Debian 11.3.0-3) 11.3.0

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

smatch warnings:
fs/xfs/xfs_buf.c:703 xfs_buf_get_map() error: we previously assumed 'bp' could be null (see line 686)

vim +/bp +703 fs/xfs/xfs_buf.c

ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  651  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  652  /*
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  653   * Assembles a buffer covering the specified range. The code is optimised for
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  654   * cache hits, as metadata intensive workloads will see 3 orders of magnitude
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  655   * more hits than misses.
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  656   */
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  657  int
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  658  xfs_buf_get_map(
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  659  	struct xfs_buftarg	*btp,
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  660  	struct xfs_buf_map	*map,
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  661  	int			nmaps,
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  662  	xfs_buf_flags_t		flags,
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  663  	struct xfs_buf		**bpp)
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  664  {
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  665  	struct xfs_perag	*pag;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  666  	struct xfs_buf		*bp = NULL;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  667  	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  668  	int			error;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  669  	int			i;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  670  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  671  	for (i = 0; i < nmaps; i++)
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  672  		cmap.bm_len += map[i].bm_len;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  673  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  674  	error = xfs_buf_find_verify(btp, &cmap);
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  675  	if (error)
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  676  		return error;
^1da177e4c3f41 fs/xfs/linux-2.6/xfs_buf.c Linus Torvalds    2005-04-16  677  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  678  	pag = xfs_perag_get(btp->bt_mount,
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  679  			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  680  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  681  	error = xfs_buf_find_fast(pag, &cmap, flags, &bp);
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  682  	if (error && error != -ENOENT)
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  683  		goto out_put_perag;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  684  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  685  	/* cache hits always outnumber misses by at least 10:1 */
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27 @686  	if (unlikely(!bp)) {
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  687  		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  688  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  689  		if (flags & XBF_INCORE)
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  690  			goto out_put_perag;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  691  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  692  		/* xfs_buf_find_insert() consumes the perag reference. */
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  693  		error = xfs_buf_find_insert(btp, pag, &cmap, map, nmaps,
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  694  				flags, &bp);
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  695  		if (error)
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  696  			return error;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  697  	} else {
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  698  		XFS_STATS_INC(btp->bt_mount, xb_get_locked);
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  699  		xfs_perag_put(pag);
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  700  	}
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  701  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  702  	/* We do not hold a perag reference anymore. */
611c99468c7aa1 fs/xfs/xfs_buf.c           Dave Chinner      2012-04-23 @703  	if (!bp->b_addr) {
ce8e922c0e79c8 fs/xfs/linux-2.6/xfs_buf.c Nathan Scott      2006-01-11  704  		error = _xfs_buf_map_pages(bp, flags);
^1da177e4c3f41 fs/xfs/linux-2.6/xfs_buf.c Linus Torvalds    2005-04-16  705  		if (unlikely(error)) {
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  706  			xfs_warn_ratelimited(btp->bt_mount,
93baa55af1a19a fs/xfs/xfs_buf.c           Darrick J. Wong   2020-02-21  707  				"%s: failed to map %u pages", __func__,
93baa55af1a19a fs/xfs/xfs_buf.c           Darrick J. Wong   2020-02-21  708  				bp->b_page_count);
a8acad70731e7d fs/xfs/xfs_buf.c           Dave Chinner      2012-04-23  709  			xfs_buf_relse(bp);
3848b5f6709221 fs/xfs/xfs_buf.c           Darrick J. Wong   2020-01-23  710  			return error;
^1da177e4c3f41 fs/xfs/linux-2.6/xfs_buf.c Linus Torvalds    2005-04-16  711  		}
^1da177e4c3f41 fs/xfs/linux-2.6/xfs_buf.c Linus Torvalds    2005-04-16  712  	}
^1da177e4c3f41 fs/xfs/linux-2.6/xfs_buf.c Linus Torvalds    2005-04-16  713  
b79f4a1c68bb99 fs/xfs/xfs_buf.c           Dave Chinner      2016-01-12  714  	/*
b79f4a1c68bb99 fs/xfs/xfs_buf.c           Dave Chinner      2016-01-12  715  	 * Clear b_error if this is a lookup from a caller that doesn't expect
b79f4a1c68bb99 fs/xfs/xfs_buf.c           Dave Chinner      2016-01-12  716  	 * valid data to be found in the buffer.
b79f4a1c68bb99 fs/xfs/xfs_buf.c           Dave Chinner      2016-01-12  717  	 */
b79f4a1c68bb99 fs/xfs/xfs_buf.c           Dave Chinner      2016-01-12  718  	if (!(flags & XBF_READ))
b79f4a1c68bb99 fs/xfs/xfs_buf.c           Dave Chinner      2016-01-12  719  		xfs_buf_ioerror(bp, 0);
b79f4a1c68bb99 fs/xfs/xfs_buf.c           Dave Chinner      2016-01-12  720  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  721  	XFS_STATS_INC(btp->bt_mount, xb_get);
0b1b213fcf3a84 fs/xfs/linux-2.6/xfs_buf.c Christoph Hellwig 2009-12-14  722  	trace_xfs_buf_get(bp, flags, _RET_IP_);
3848b5f6709221 fs/xfs/xfs_buf.c           Darrick J. Wong   2020-01-23  723  	*bpp = bp;
3848b5f6709221 fs/xfs/xfs_buf.c           Darrick J. Wong   2020-01-23  724  	return 0;
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  725  
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  726  out_put_perag:
ac3a2dcca9b378 fs/xfs/xfs_buf.c           Dave Chinner      2022-06-27  727  	xfs_perag_put(pag);
170041f71596da fs/xfs/xfs_buf.c           Christoph Hellwig 2021-06-07  728  	return error;
^1da177e4c3f41 fs/xfs/linux-2.6/xfs_buf.c Linus Torvalds    2005-04-16  729  }
^1da177e4c3f41 fs/xfs/linux-2.6/xfs_buf.c Linus Torvalds    2005-04-16  730  

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
  2022-07-11  5:14   ` Christoph Hellwig
@ 2022-07-12  0:01     ` Dave Chinner
  0 siblings, 0 replies; 9+ messages in thread
From: Dave Chinner @ 2022-07-12  0:01 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-xfs

On Sun, Jul 10, 2022 at 10:14:16PM -0700, Christoph Hellwig wrote:
> On Fri, Jul 08, 2022 at 09:52:56AM +1000, Dave Chinner wrote:
> > index 91dc691f40a8..81ca951b451a 100644
> > --- a/fs/xfs/xfs_buf.c
> > +++ b/fs/xfs/xfs_buf.c
> > @@ -531,18 +531,16 @@ xfs_buf_map_verify(
> >  
> >  static int
> >  xfs_buf_find_lock(
> > -	struct xfs_buftarg	*btp,
> >  	struct xfs_buf          *bp,
> >  	xfs_buf_flags_t		flags)
> >  {
> >  	if (!xfs_buf_trylock(bp)) {
> >  		if (flags & XBF_TRYLOCK) {
> > -			xfs_buf_rele(bp);
> > -			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
> > +			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
> >  			return -EAGAIN;
> >  		}
> >  		xfs_buf_lock(bp);
> > -		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
> > +		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
> >  	}
> >  
> >  	/*
> 
> Not doing this to start with in the previous patch still feels
> rather odd.

Oops, missed that. Will fix.

-Dave.
-- 
Dave Chinner
david@fromorbit.com

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
  2022-07-07 23:52 ` [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map() Dave Chinner
  2022-07-10  0:15   ` Darrick J. Wong
@ 2022-07-11  5:14   ` Christoph Hellwig
  2022-07-12  0:01     ` Dave Chinner
  1 sibling, 1 reply; 9+ messages in thread
From: Christoph Hellwig @ 2022-07-11  5:14 UTC (permalink / raw)
  To: Dave Chinner; +Cc: linux-xfs

On Fri, Jul 08, 2022 at 09:52:56AM +1000, Dave Chinner wrote:
> index 91dc691f40a8..81ca951b451a 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -531,18 +531,16 @@ xfs_buf_map_verify(
>  
>  static int
>  xfs_buf_find_lock(
> -	struct xfs_buftarg	*btp,
>  	struct xfs_buf          *bp,
>  	xfs_buf_flags_t		flags)
>  {
>  	if (!xfs_buf_trylock(bp)) {
>  		if (flags & XBF_TRYLOCK) {
> -			xfs_buf_rele(bp);
> -			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
> +			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
>  			return -EAGAIN;
>  		}
>  		xfs_buf_lock(bp);
> -		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
> +		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
>  	}
>  
>  	/*

Not doing this to start with in the previous patch still feels
rather odd.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
  2022-07-07 23:52 ` [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map() Dave Chinner
@ 2022-07-10  0:15   ` Darrick J. Wong
  2022-07-11  5:14   ` Christoph Hellwig
  1 sibling, 0 replies; 9+ messages in thread
From: Darrick J. Wong @ 2022-07-10  0:15 UTC (permalink / raw)
  To: Dave Chinner; +Cc: linux-xfs

On Fri, Jul 08, 2022 at 09:52:56AM +1000, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> Now that we factored xfs_buf_find(), we can start separating into
> distinct fast and slow paths from xfs_buf_get_map(). We start by
> moving the lookup map and perag setup to _get_map(), and then move
> all the specifics of the fast path lookup into xfs_buf_lookup()
> and call it directly from _get_map(). We the move all the slow path
> code to xfs_buf_find_insert(), which is now also called directly
> from _get_map(). As such, xfs_buf_find() now goes away.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>

Makes sense to /me...
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/xfs/xfs_buf.c | 207 ++++++++++++++++++++++-------------------------
>  1 file changed, 95 insertions(+), 112 deletions(-)
> 
> diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> index 91dc691f40a8..81ca951b451a 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -531,18 +531,16 @@ xfs_buf_map_verify(
>  
>  static int
>  xfs_buf_find_lock(
> -	struct xfs_buftarg	*btp,
>  	struct xfs_buf          *bp,
>  	xfs_buf_flags_t		flags)
>  {
>  	if (!xfs_buf_trylock(bp)) {
>  		if (flags & XBF_TRYLOCK) {
> -			xfs_buf_rele(bp);
> -			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
> +			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
>  			return -EAGAIN;
>  		}
>  		xfs_buf_lock(bp);
> -		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
> +		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
>  	}
>  
>  	/*
> @@ -558,113 +556,97 @@ xfs_buf_find_lock(
>  	return 0;
>  }
>  
> -static inline struct xfs_buf *
> +static inline int
>  xfs_buf_lookup(
>  	struct xfs_perag	*pag,
> -	struct xfs_buf_map	*map)
> +	struct xfs_buf_map	*map,
> +	xfs_buf_flags_t		flags,
> +	struct xfs_buf		**bpp)
>  {
>  	struct xfs_buf          *bp;
> +	int			error;
>  
> +	spin_lock(&pag->pag_buf_lock);
>  	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
> -	if (!bp)
> -		return NULL;
> +	if (!bp) {
> +		spin_unlock(&pag->pag_buf_lock);
> +		return -ENOENT;
> +	}
>  	atomic_inc(&bp->b_hold);
> -	return bp;
> -}
> +	spin_unlock(&pag->pag_buf_lock);
>  
> -/*
> - * Insert the new_bp into the hash table. This consumes the perag reference
> - * taken for the lookup.
> - */
> -static int
> -xfs_buf_find_insert(
> -	struct xfs_buftarg	*btp,
> -	struct xfs_perag	*pag,
> -	struct xfs_buf		*new_bp)
> -{
> -	/* No match found */
> -	if (!new_bp) {
> -		xfs_perag_put(pag);
> -		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
> -		return -ENOENT;
> +	error = xfs_buf_find_lock(bp, flags);
> +	if (error) {
> +		xfs_buf_rele(bp);
> +		return error;
>  	}
>  
> -	/* the buffer keeps the perag reference until it is freed */
> -	new_bp->b_pag = pag;
> -	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
> -			       xfs_buf_hash_params);
> +	trace_xfs_buf_find(bp, flags, _RET_IP_);
> +	*bpp = bp;
>  	return 0;
>  }
>  
>  /*
> - * Look up a buffer in the buffer cache and return it referenced and locked
> - * in @found_bp.
> - *
> - * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
> - * cache.
> - *
> - * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
> - * -EAGAIN if we fail to lock it.
> - *
> - * Return values are:
> - *	-EFSCORRUPTED if have been supplied with an invalid address
> - *	-EAGAIN on trylock failure
> - *	-ENOENT if we fail to find a match and @new_bp was NULL
> - *	0, with @found_bp:
> - *		- @new_bp if we inserted it into the cache
> - *		- the buffer we found and locked.
> + * Insert the new_bp into the hash table. This consumes the perag reference
> + * taken for the lookup regardless of the result of the insert.
>   */
>  static int
> -xfs_buf_find(
> +xfs_buf_find_insert(
>  	struct xfs_buftarg	*btp,
> +	struct xfs_perag	*pag,
> +	struct xfs_buf_map	*cmap,
>  	struct xfs_buf_map	*map,
>  	int			nmaps,
>  	xfs_buf_flags_t		flags,
> -	struct xfs_buf		*new_bp,
> -	struct xfs_buf		**found_bp)
> +	struct xfs_buf		**bpp)
>  {
> -	struct xfs_perag	*pag;
> +	struct xfs_buf		*new_bp;
>  	struct xfs_buf		*bp;
> -	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
>  	int			error;
> -	int			i;
> -
> -	*found_bp = NULL;
> -
> -	for (i = 0; i < nmaps; i++)
> -		cmap.bm_len += map[i].bm_len;
>  
> -	error = xfs_buf_map_verify(btp, &cmap);
> +	error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
>  	if (error)
> -		return error;
> +		goto out_drop_pag;
>  
> -	pag = xfs_perag_get(btp->bt_mount,
> -			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
> +	/*
> +	 * For buffers that fit entirely within a single page, first attempt to
> +	 * allocate the memory from the heap to minimise memory usage. If we
> +	 * can't get heap memory for these small buffers, we fall back to using
> +	 * the page allocator.
> +	 */
> +	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
> +	    xfs_buf_alloc_kmem(new_bp, flags) < 0) {
> +		error = xfs_buf_alloc_pages(new_bp, flags);
> +		if (error)
> +			goto out_free_buf;
> +	}
>  
>  	spin_lock(&pag->pag_buf_lock);
> -	bp = xfs_buf_lookup(pag, &cmap);
> -	if (bp)
> -		goto found;
> +	bp = rhashtable_lookup(&pag->pag_buf_hash, cmap, xfs_buf_hash_params);
> +	if (bp) {
> +		atomic_inc(&bp->b_hold);
> +		spin_unlock(&pag->pag_buf_lock);
> +		error = xfs_buf_find_lock(bp, flags);
> +		if (error)
> +			xfs_buf_rele(bp);
> +		else
> +			*bpp = bp;
> +		goto out_free_buf;
> +	}
>  
> -	error = xfs_buf_find_insert(btp, pag, new_bp);
> +	/* The buffer keeps the perag reference until it is freed. */
> +	new_bp->b_pag = pag;
> +	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
> +			       xfs_buf_hash_params);
>  	spin_unlock(&pag->pag_buf_lock);
> -	if (error)
> -		return error;
> -	*found_bp = new_bp;
> +	*bpp = new_bp;
>  	return 0;
>  
> -found:
> -	spin_unlock(&pag->pag_buf_lock);
> +out_free_buf:
> +	xfs_buf_free(new_bp);
> +out_drop_pag:
>  	xfs_perag_put(pag);
> -
> -	error = xfs_buf_find_lock(btp, bp, flags);
> -	if (error)
> -		return error;
> -
> -	trace_xfs_buf_find(bp, flags, _RET_IP_);
> -	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
> -	*found_bp = bp;
> -	return 0;
> +	return error;
>  }
>  
>  /*
> @@ -674,54 +656,54 @@ xfs_buf_find(
>   */
>  int
>  xfs_buf_get_map(
> -	struct xfs_buftarg	*target,
> +	struct xfs_buftarg	*btp,
>  	struct xfs_buf_map	*map,
>  	int			nmaps,
>  	xfs_buf_flags_t		flags,
>  	struct xfs_buf		**bpp)
>  {
> -	struct xfs_buf		*bp;
> -	struct xfs_buf		*new_bp;
> +	struct xfs_perag	*pag;
> +	struct xfs_buf		*bp = NULL;
> +	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
>  	int			error;
> +	int			i;
>  
> -	*bpp = NULL;
> -	error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
> -	if (!error)
> -		goto found;
> -	if (error != -ENOENT)
> -		return error;
> -	if (flags & XBF_INCORE)
> -		return -ENOENT;
> +	for (i = 0; i < nmaps; i++)
> +		cmap.bm_len += map[i].bm_len;
>  
> -	error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
> +	error = xfs_buf_map_verify(btp, &cmap);
>  	if (error)
>  		return error;
>  
> -	/*
> -	 * For buffers that fit entirely within a single page, first attempt to
> -	 * allocate the memory from the heap to minimise memory usage. If we
> -	 * can't get heap memory for these small buffers, we fall back to using
> -	 * the page allocator.
> -	 */
> -	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
> -	    xfs_buf_alloc_kmem(new_bp, flags) < 0) {
> -		error = xfs_buf_alloc_pages(new_bp, flags);
> -		if (error)
> -			goto out_free_buf;
> -	}
> +	pag = xfs_perag_get(btp->bt_mount,
> +			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
>  
> -	error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
> -	if (error)
> -		goto out_free_buf;
> +	error = xfs_buf_lookup(pag, &cmap, flags, &bp);
> +	if (error && error != -ENOENT)
> +		goto out_put_perag;
> +
> +	/* cache hits always outnumber misses by at least 10:1 */
> +	if (unlikely(!bp)) {
> +		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
>  
> -	if (bp != new_bp)
> -		xfs_buf_free(new_bp);
> +		if (flags & XBF_INCORE)
> +			goto out_put_perag;
>  
> -found:
> +		/* xfs_buf_find_insert() consumes the perag reference. */
> +		error = xfs_buf_find_insert(btp, pag, &cmap, map, nmaps,
> +				flags, &bp);
> +		if (error)
> +			return error;
> +	} else {
> +		XFS_STATS_INC(btp->bt_mount, xb_get_locked);
> +		xfs_perag_put(pag);
> +	}
> +
> +	/* We do not hold a perag reference anymore. */
>  	if (!bp->b_addr) {
>  		error = _xfs_buf_map_pages(bp, flags);
>  		if (unlikely(error)) {
> -			xfs_warn_ratelimited(target->bt_mount,
> +			xfs_warn_ratelimited(btp->bt_mount,
>  				"%s: failed to map %u pages", __func__,
>  				bp->b_page_count);
>  			xfs_buf_relse(bp);
> @@ -736,12 +718,13 @@ xfs_buf_get_map(
>  	if (!(flags & XBF_READ))
>  		xfs_buf_ioerror(bp, 0);
>  
> -	XFS_STATS_INC(target->bt_mount, xb_get);
> +	XFS_STATS_INC(btp->bt_mount, xb_get);
>  	trace_xfs_buf_get(bp, flags, _RET_IP_);
>  	*bpp = bp;
>  	return 0;
> -out_free_buf:
> -	xfs_buf_free(new_bp);
> +
> +out_put_perag:
> +	xfs_perag_put(pag);
>  	return error;
>  }
>  
> -- 
> 2.36.1
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
  2022-07-07 23:52 [PATCH 0/6 v3] xfs: lockless buffer lookups Dave Chinner
@ 2022-07-07 23:52 ` Dave Chinner
  2022-07-10  0:15   ` Darrick J. Wong
  2022-07-11  5:14   ` Christoph Hellwig
  0 siblings, 2 replies; 9+ messages in thread
From: Dave Chinner @ 2022-07-07 23:52 UTC (permalink / raw)
  To: linux-xfs

From: Dave Chinner <dchinner@redhat.com>

Now that we factored xfs_buf_find(), we can start separating into
distinct fast and slow paths from xfs_buf_get_map(). We start by
moving the lookup map and perag setup to _get_map(), and then move
all the specifics of the fast path lookup into xfs_buf_lookup()
and call it directly from _get_map(). We the move all the slow path
code to xfs_buf_find_insert(), which is now also called directly
from _get_map(). As such, xfs_buf_find() now goes away.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_buf.c | 207 ++++++++++++++++++++++-------------------------
 1 file changed, 95 insertions(+), 112 deletions(-)

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 91dc691f40a8..81ca951b451a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -531,18 +531,16 @@ xfs_buf_map_verify(
 
 static int
 xfs_buf_find_lock(
-	struct xfs_buftarg	*btp,
 	struct xfs_buf          *bp,
 	xfs_buf_flags_t		flags)
 {
 	if (!xfs_buf_trylock(bp)) {
 		if (flags & XBF_TRYLOCK) {
-			xfs_buf_rele(bp);
-			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
+			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
 			return -EAGAIN;
 		}
 		xfs_buf_lock(bp);
-		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
+		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
 	}
 
 	/*
@@ -558,113 +556,97 @@ xfs_buf_find_lock(
 	return 0;
 }
 
-static inline struct xfs_buf *
+static inline int
 xfs_buf_lookup(
 	struct xfs_perag	*pag,
-	struct xfs_buf_map	*map)
+	struct xfs_buf_map	*map,
+	xfs_buf_flags_t		flags,
+	struct xfs_buf		**bpp)
 {
 	struct xfs_buf          *bp;
+	int			error;
 
+	spin_lock(&pag->pag_buf_lock);
 	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
-	if (!bp)
-		return NULL;
+	if (!bp) {
+		spin_unlock(&pag->pag_buf_lock);
+		return -ENOENT;
+	}
 	atomic_inc(&bp->b_hold);
-	return bp;
-}
+	spin_unlock(&pag->pag_buf_lock);
 
-/*
- * Insert the new_bp into the hash table. This consumes the perag reference
- * taken for the lookup.
- */
-static int
-xfs_buf_find_insert(
-	struct xfs_buftarg	*btp,
-	struct xfs_perag	*pag,
-	struct xfs_buf		*new_bp)
-{
-	/* No match found */
-	if (!new_bp) {
-		xfs_perag_put(pag);
-		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
-		return -ENOENT;
+	error = xfs_buf_find_lock(bp, flags);
+	if (error) {
+		xfs_buf_rele(bp);
+		return error;
 	}
 
-	/* the buffer keeps the perag reference until it is freed */
-	new_bp->b_pag = pag;
-	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
-			       xfs_buf_hash_params);
+	trace_xfs_buf_find(bp, flags, _RET_IP_);
+	*bpp = bp;
 	return 0;
 }
 
 /*
- * Look up a buffer in the buffer cache and return it referenced and locked
- * in @found_bp.
- *
- * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
- * cache.
- *
- * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
- * -EAGAIN if we fail to lock it.
- *
- * Return values are:
- *	-EFSCORRUPTED if have been supplied with an invalid address
- *	-EAGAIN on trylock failure
- *	-ENOENT if we fail to find a match and @new_bp was NULL
- *	0, with @found_bp:
- *		- @new_bp if we inserted it into the cache
- *		- the buffer we found and locked.
+ * Insert the new_bp into the hash table. This consumes the perag reference
+ * taken for the lookup regardless of the result of the insert.
  */
 static int
-xfs_buf_find(
+xfs_buf_find_insert(
 	struct xfs_buftarg	*btp,
+	struct xfs_perag	*pag,
+	struct xfs_buf_map	*cmap,
 	struct xfs_buf_map	*map,
 	int			nmaps,
 	xfs_buf_flags_t		flags,
-	struct xfs_buf		*new_bp,
-	struct xfs_buf		**found_bp)
+	struct xfs_buf		**bpp)
 {
-	struct xfs_perag	*pag;
+	struct xfs_buf		*new_bp;
 	struct xfs_buf		*bp;
-	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 	int			error;
-	int			i;
-
-	*found_bp = NULL;
-
-	for (i = 0; i < nmaps; i++)
-		cmap.bm_len += map[i].bm_len;
 
-	error = xfs_buf_map_verify(btp, &cmap);
+	error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
 	if (error)
-		return error;
+		goto out_drop_pag;
 
-	pag = xfs_perag_get(btp->bt_mount,
-			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
+	/*
+	 * For buffers that fit entirely within a single page, first attempt to
+	 * allocate the memory from the heap to minimise memory usage. If we
+	 * can't get heap memory for these small buffers, we fall back to using
+	 * the page allocator.
+	 */
+	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
+	    xfs_buf_alloc_kmem(new_bp, flags) < 0) {
+		error = xfs_buf_alloc_pages(new_bp, flags);
+		if (error)
+			goto out_free_buf;
+	}
 
 	spin_lock(&pag->pag_buf_lock);
-	bp = xfs_buf_lookup(pag, &cmap);
-	if (bp)
-		goto found;
+	bp = rhashtable_lookup(&pag->pag_buf_hash, cmap, xfs_buf_hash_params);
+	if (bp) {
+		atomic_inc(&bp->b_hold);
+		spin_unlock(&pag->pag_buf_lock);
+		error = xfs_buf_find_lock(bp, flags);
+		if (error)
+			xfs_buf_rele(bp);
+		else
+			*bpp = bp;
+		goto out_free_buf;
+	}
 
-	error = xfs_buf_find_insert(btp, pag, new_bp);
+	/* The buffer keeps the perag reference until it is freed. */
+	new_bp->b_pag = pag;
+	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
+			       xfs_buf_hash_params);
 	spin_unlock(&pag->pag_buf_lock);
-	if (error)
-		return error;
-	*found_bp = new_bp;
+	*bpp = new_bp;
 	return 0;
 
-found:
-	spin_unlock(&pag->pag_buf_lock);
+out_free_buf:
+	xfs_buf_free(new_bp);
+out_drop_pag:
 	xfs_perag_put(pag);
-
-	error = xfs_buf_find_lock(btp, bp, flags);
-	if (error)
-		return error;
-
-	trace_xfs_buf_find(bp, flags, _RET_IP_);
-	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
-	*found_bp = bp;
-	return 0;
+	return error;
 }
 
 /*
@@ -674,54 +656,54 @@ xfs_buf_find(
  */
 int
 xfs_buf_get_map(
-	struct xfs_buftarg	*target,
+	struct xfs_buftarg	*btp,
 	struct xfs_buf_map	*map,
 	int			nmaps,
 	xfs_buf_flags_t		flags,
 	struct xfs_buf		**bpp)
 {
-	struct xfs_buf		*bp;
-	struct xfs_buf		*new_bp;
+	struct xfs_perag	*pag;
+	struct xfs_buf		*bp = NULL;
+	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 	int			error;
+	int			i;
 
-	*bpp = NULL;
-	error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
-	if (!error)
-		goto found;
-	if (error != -ENOENT)
-		return error;
-	if (flags & XBF_INCORE)
-		return -ENOENT;
+	for (i = 0; i < nmaps; i++)
+		cmap.bm_len += map[i].bm_len;
 
-	error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
+	error = xfs_buf_map_verify(btp, &cmap);
 	if (error)
 		return error;
 
-	/*
-	 * For buffers that fit entirely within a single page, first attempt to
-	 * allocate the memory from the heap to minimise memory usage. If we
-	 * can't get heap memory for these small buffers, we fall back to using
-	 * the page allocator.
-	 */
-	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
-	    xfs_buf_alloc_kmem(new_bp, flags) < 0) {
-		error = xfs_buf_alloc_pages(new_bp, flags);
-		if (error)
-			goto out_free_buf;
-	}
+	pag = xfs_perag_get(btp->bt_mount,
+			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
 
-	error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
-	if (error)
-		goto out_free_buf;
+	error = xfs_buf_lookup(pag, &cmap, flags, &bp);
+	if (error && error != -ENOENT)
+		goto out_put_perag;
+
+	/* cache hits always outnumber misses by at least 10:1 */
+	if (unlikely(!bp)) {
+		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
 
-	if (bp != new_bp)
-		xfs_buf_free(new_bp);
+		if (flags & XBF_INCORE)
+			goto out_put_perag;
 
-found:
+		/* xfs_buf_find_insert() consumes the perag reference. */
+		error = xfs_buf_find_insert(btp, pag, &cmap, map, nmaps,
+				flags, &bp);
+		if (error)
+			return error;
+	} else {
+		XFS_STATS_INC(btp->bt_mount, xb_get_locked);
+		xfs_perag_put(pag);
+	}
+
+	/* We do not hold a perag reference anymore. */
 	if (!bp->b_addr) {
 		error = _xfs_buf_map_pages(bp, flags);
 		if (unlikely(error)) {
-			xfs_warn_ratelimited(target->bt_mount,
+			xfs_warn_ratelimited(btp->bt_mount,
 				"%s: failed to map %u pages", __func__,
 				bp->b_page_count);
 			xfs_buf_relse(bp);
@@ -736,12 +718,13 @@ xfs_buf_get_map(
 	if (!(flags & XBF_READ))
 		xfs_buf_ioerror(bp, 0);
 
-	XFS_STATS_INC(target->bt_mount, xb_get);
+	XFS_STATS_INC(btp->bt_mount, xb_get);
 	trace_xfs_buf_get(bp, flags, _RET_IP_);
 	*bpp = bp;
 	return 0;
-out_free_buf:
-	xfs_buf_free(new_bp);
+
+out_put_perag:
+	xfs_perag_put(pag);
 	return error;
 }
 
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
  2022-06-29 22:06     ` Darrick J. Wong
@ 2022-07-07 12:39       ` Dave Chinner
  0 siblings, 0 replies; 9+ messages in thread
From: Dave Chinner @ 2022-07-07 12:39 UTC (permalink / raw)
  To: Darrick J. Wong; +Cc: Christoph Hellwig, linux-xfs

On Wed, Jun 29, 2022 at 03:06:31PM -0700, Darrick J. Wong wrote:
> On Wed, Jun 29, 2022 at 12:40:08AM -0700, Christoph Hellwig wrote:
> > >  
> > > -static inline struct xfs_buf *
> > > -xfs_buf_find_fast(
> > > -	struct xfs_perag	*pag,
> > > -	struct xfs_buf_map	*map)
> > > -{
> > > -	struct xfs_buf          *bp;
> > > -
> > > -	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
> > > -	if (!bp)
> > > -		return NULL;
> > > -	atomic_inc(&bp->b_hold);
> > > -	return bp;
> > > -}
> > 
> > > -static int
> > > -xfs_buf_find_insert(
> > > -	struct xfs_buftarg	*btp,
> > > -	struct xfs_perag	*pag,
> > 
> > Adding the function just in the last patch and moving them around
> > here and slighty changing seems a little counter productive.
> > I think just merging the two might actually end up with a result
> > that is easier to review.
> 
> I read the second patch and it makes sense, but I'm also curious if
> hch's suggestion here would make this change easier to read?

I moved the initial placement of these functions around and it took
a big chunk out of the diff in this patch. That should make it
easier to read without combining the two patches together...

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
  2022-06-29  7:40   ` Christoph Hellwig
@ 2022-06-29 22:06     ` Darrick J. Wong
  2022-07-07 12:39       ` Dave Chinner
  0 siblings, 1 reply; 9+ messages in thread
From: Darrick J. Wong @ 2022-06-29 22:06 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Dave Chinner, linux-xfs

On Wed, Jun 29, 2022 at 12:40:08AM -0700, Christoph Hellwig wrote:
> >  
> > -static inline struct xfs_buf *
> > -xfs_buf_find_fast(
> > -	struct xfs_perag	*pag,
> > -	struct xfs_buf_map	*map)
> > -{
> > -	struct xfs_buf          *bp;
> > -
> > -	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
> > -	if (!bp)
> > -		return NULL;
> > -	atomic_inc(&bp->b_hold);
> > -	return bp;
> > -}
> 
> > -static int
> > -xfs_buf_find_insert(
> > -	struct xfs_buftarg	*btp,
> > -	struct xfs_perag	*pag,
> 
> Adding the function just in the last patch and moving them around
> here and slighty changing seems a little counter productive.
> I think just merging the two might actually end up with a result
> that is easier to review.

I read the second patch and it makes sense, but I'm also curious if
hch's suggestion here would make this change easier to read?

--D

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
  2022-06-27  6:08 ` [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map() Dave Chinner
@ 2022-06-29  7:40   ` Christoph Hellwig
  2022-06-29 22:06     ` Darrick J. Wong
  0 siblings, 1 reply; 9+ messages in thread
From: Christoph Hellwig @ 2022-06-29  7:40 UTC (permalink / raw)
  To: Dave Chinner; +Cc: linux-xfs

>  
> -static inline struct xfs_buf *
> -xfs_buf_find_fast(
> -	struct xfs_perag	*pag,
> -	struct xfs_buf_map	*map)
> -{
> -	struct xfs_buf          *bp;
> -
> -	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
> -	if (!bp)
> -		return NULL;
> -	atomic_inc(&bp->b_hold);
> -	return bp;
> -}

> -static int
> -xfs_buf_find_insert(
> -	struct xfs_buftarg	*btp,
> -	struct xfs_perag	*pag,

Adding the function just in the last patch and moving them around
here and slighty changing seems a little counter productive.
I think just merging the two might actually end up with a result
that is easier to review.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()
  2022-06-27  6:08 [PATCH 0/6 v2] xfs: lockless buffer lookups Dave Chinner
@ 2022-06-27  6:08 ` Dave Chinner
  2022-06-29  7:40   ` Christoph Hellwig
  0 siblings, 1 reply; 9+ messages in thread
From: Dave Chinner @ 2022-06-27  6:08 UTC (permalink / raw)
  To: linux-xfs

From: Dave Chinner <dchinner@redhat.com>

Now that we factored xfs_buf_find(), we can start separating into
distinct fast and slow paths from xfs_buf_get_map(). We start by
moving the lookup map and perag setup to _get_map(), and then move
all the specifics of the fast path lookup into xfs_buf_find_fast()
and call it directly from _get_map(). We the move all the slow path
code to xfs_buf_find_insert(), which is now also called directly
from _get_map(). As such, xfs_buf_find() now goes away.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_buf.c | 233 ++++++++++++++++++++++-------------------------
 1 file changed, 108 insertions(+), 125 deletions(-)

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 95d4b428aec0..469e84fe21aa 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -529,58 +529,18 @@ xfs_buf_find_verify(
 	return 0;
 }
 
-static inline struct xfs_buf *
-xfs_buf_find_fast(
-	struct xfs_perag	*pag,
-	struct xfs_buf_map	*map)
-{
-	struct xfs_buf          *bp;
-
-	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
-	if (!bp)
-		return NULL;
-	atomic_inc(&bp->b_hold);
-	return bp;
-}
-
-/*
- * Insert the new_bp into the hash table. This consumes the perag reference
- * taken for the lookup.
- */
-static int
-xfs_buf_find_insert(
-	struct xfs_buftarg	*btp,
-	struct xfs_perag	*pag,
-	struct xfs_buf		*new_bp)
-{
-	/* No match found */
-	if (!new_bp) {
-		xfs_perag_put(pag);
-		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
-		return -ENOENT;
-	}
-
-	/* the buffer keeps the perag reference until it is freed */
-	new_bp->b_pag = pag;
-	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
-			       xfs_buf_hash_params);
-	return 0;
-}
-
 static int
 xfs_buf_find_lock(
-	struct xfs_buftarg	*btp,
 	struct xfs_buf          *bp,
 	xfs_buf_flags_t		flags)
 {
 	if (!xfs_buf_trylock(bp)) {
 		if (flags & XBF_TRYLOCK) {
-			xfs_buf_rele(bp);
-			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
+			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
 			return -EAGAIN;
 		}
 		xfs_buf_lock(bp);
-		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
+		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
 	}
 
 	/*
@@ -596,75 +556,97 @@ xfs_buf_find_lock(
 	return 0;
 }
 
+static inline int
+xfs_buf_find_fast(
+	struct xfs_perag	*pag,
+	struct xfs_buf_map	*map,
+	xfs_buf_flags_t		flags,
+	struct xfs_buf		**bpp)
+{
+	struct xfs_buf          *bp;
+	int			error;
+
+	spin_lock(&pag->pag_buf_lock);
+	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
+	if (!bp) {
+		spin_unlock(&pag->pag_buf_lock);
+		return -ENOENT;
+	}
+	atomic_inc(&bp->b_hold);
+	spin_unlock(&pag->pag_buf_lock);
+
+	error = xfs_buf_find_lock(bp, flags);
+	if (error) {
+		xfs_buf_rele(bp);
+		return error;
+	}
+
+	trace_xfs_buf_find(bp, flags, _RET_IP_);
+	*bpp = bp;
+	return 0;
+}
+
 /*
- * Look up a buffer in the buffer cache and return it referenced and locked
- * in @found_bp.
- *
- * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
- * cache.
- *
- * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
- * -EAGAIN if we fail to lock it.
- *
- * Return values are:
- *	-EFSCORRUPTED if have been supplied with an invalid address
- *	-EAGAIN on trylock failure
- *	-ENOENT if we fail to find a match and @new_bp was NULL
- *	0, with @found_bp:
- *		- @new_bp if we inserted it into the cache
- *		- the buffer we found and locked.
+ * Insert the new_bp into the hash table. This consumes the perag reference
+ * taken for the lookup regardless of the result of the insert.
  */
 static int
-xfs_buf_find(
+xfs_buf_find_insert(
 	struct xfs_buftarg	*btp,
+	struct xfs_perag	*pag,
+	struct xfs_buf_map	*cmap,
 	struct xfs_buf_map	*map,
 	int			nmaps,
 	xfs_buf_flags_t		flags,
-	struct xfs_buf		*new_bp,
-	struct xfs_buf		**found_bp)
+	struct xfs_buf		**bpp)
 {
-	struct xfs_perag	*pag;
+	struct xfs_buf		*new_bp;
 	struct xfs_buf		*bp;
-	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 	int			error;
-	int			i;
-
-	*found_bp = NULL;
 
-	for (i = 0; i < nmaps; i++)
-		cmap.bm_len += map[i].bm_len;
-
-	error = xfs_buf_find_verify(btp, &cmap);
+	error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
 	if (error)
-		return error;
+		goto out_drop_pag;
 
-	pag = xfs_perag_get(btp->bt_mount,
-			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
+	/*
+	 * For buffers that fit entirely within a single page, first attempt to
+	 * allocate the memory from the heap to minimise memory usage. If we
+	 * can't get heap memory for these small buffers, we fall back to using
+	 * the page allocator.
+	 */
+	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
+	    xfs_buf_alloc_kmem(new_bp, flags) < 0) {
+		error = xfs_buf_alloc_pages(new_bp, flags);
+		if (error)
+			goto out_free_buf;
+	}
 
 	spin_lock(&pag->pag_buf_lock);
-	bp = xfs_buf_find_fast(pag, &cmap);
-	if (bp)
-		goto found;
+	bp = rhashtable_lookup(&pag->pag_buf_hash, cmap, xfs_buf_hash_params);
+	if (bp) {
+		atomic_inc(&bp->b_hold);
+		spin_unlock(&pag->pag_buf_lock);
+		error = xfs_buf_find_lock(bp, flags);
+		if (error)
+			xfs_buf_rele(bp);
+		else
+			*bpp = bp;
+		goto out_free_buf;
+	}
 
-	error = xfs_buf_find_insert(btp, pag, new_bp);
+	/* The buffer keeps the perag reference until it is freed. */
+	new_bp->b_pag = pag;
+	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
+			       xfs_buf_hash_params);
 	spin_unlock(&pag->pag_buf_lock);
-	if (error)
-		return error;
-	*found_bp = new_bp;
+	*bpp = new_bp;
 	return 0;
 
-found:
-	spin_unlock(&pag->pag_buf_lock);
+out_free_buf:
+	xfs_buf_free(new_bp);
+out_drop_pag:
 	xfs_perag_put(pag);
-
-	error = xfs_buf_find_lock(btp, bp, flags);
-	if (error)
-		return error;
-
-	trace_xfs_buf_find(bp, flags, _RET_IP_);
-	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
-	*found_bp = bp;
-	return 0;
+	return error;
 }
 
 /*
@@ -674,54 +656,54 @@ xfs_buf_find(
  */
 int
 xfs_buf_get_map(
-	struct xfs_buftarg	*target,
+	struct xfs_buftarg	*btp,
 	struct xfs_buf_map	*map,
 	int			nmaps,
 	xfs_buf_flags_t		flags,
 	struct xfs_buf		**bpp)
 {
-	struct xfs_buf		*bp;
-	struct xfs_buf		*new_bp;
+	struct xfs_perag	*pag;
+	struct xfs_buf		*bp = NULL;
+	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 	int			error;
+	int			i;
 
-	*bpp = NULL;
-	error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
-	if (!error)
-		goto found;
-	if (error != -ENOENT)
-		return error;
-	if (flags & XBF_INCORE)
-		return -ENOENT;
+	for (i = 0; i < nmaps; i++)
+		cmap.bm_len += map[i].bm_len;
 
-	error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
+	error = xfs_buf_find_verify(btp, &cmap);
 	if (error)
 		return error;
 
-	/*
-	 * For buffers that fit entirely within a single page, first attempt to
-	 * allocate the memory from the heap to minimise memory usage. If we
-	 * can't get heap memory for these small buffers, we fall back to using
-	 * the page allocator.
-	 */
-	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
-	    xfs_buf_alloc_kmem(new_bp, flags) < 0) {
-		error = xfs_buf_alloc_pages(new_bp, flags);
-		if (error)
-			goto out_free_buf;
-	}
+	pag = xfs_perag_get(btp->bt_mount,
+			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
 
-	error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
-	if (error)
-		goto out_free_buf;
+	error = xfs_buf_find_fast(pag, &cmap, flags, &bp);
+	if (error && error != -ENOENT)
+		goto out_put_perag;
 
-	if (bp != new_bp)
-		xfs_buf_free(new_bp);
+	/* cache hits always outnumber misses by at least 10:1 */
+	if (unlikely(!bp)) {
+		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
 
-found:
+		if (flags & XBF_INCORE)
+			goto out_put_perag;
+
+		/* xfs_buf_find_insert() consumes the perag reference. */
+		error = xfs_buf_find_insert(btp, pag, &cmap, map, nmaps,
+				flags, &bp);
+		if (error)
+			return error;
+	} else {
+		XFS_STATS_INC(btp->bt_mount, xb_get_locked);
+		xfs_perag_put(pag);
+	}
+
+	/* We do not hold a perag reference anymore. */
 	if (!bp->b_addr) {
 		error = _xfs_buf_map_pages(bp, flags);
 		if (unlikely(error)) {
-			xfs_warn_ratelimited(target->bt_mount,
+			xfs_warn_ratelimited(btp->bt_mount,
 				"%s: failed to map %u pages", __func__,
 				bp->b_page_count);
 			xfs_buf_relse(bp);
@@ -736,12 +718,13 @@ xfs_buf_get_map(
 	if (!(flags & XBF_READ))
 		xfs_buf_ioerror(bp, 0);
 
-	XFS_STATS_INC(target->bt_mount, xb_get);
+	XFS_STATS_INC(btp->bt_mount, xb_get);
 	trace_xfs_buf_get(bp, flags, _RET_IP_);
 	*bpp = bp;
 	return 0;
-out_free_buf:
-	xfs_buf_free(new_bp);
+
+out_put_perag:
+	xfs_perag_put(pag);
 	return error;
 }
 
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2022-07-12  0:01 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-27 22:09 [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map() kernel test robot
  -- strict thread matches above, loose matches on Subject: below --
2022-07-07 23:52 [PATCH 0/6 v3] xfs: lockless buffer lookups Dave Chinner
2022-07-07 23:52 ` [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map() Dave Chinner
2022-07-10  0:15   ` Darrick J. Wong
2022-07-11  5:14   ` Christoph Hellwig
2022-07-12  0:01     ` Dave Chinner
2022-06-27  6:08 [PATCH 0/6 v2] xfs: lockless buffer lookups Dave Chinner
2022-06-27  6:08 ` [PATCH 3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map() Dave Chinner
2022-06-29  7:40   ` Christoph Hellwig
2022-06-29 22:06     ` Darrick J. Wong
2022-07-07 12:39       ` Dave Chinner

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.