All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Darrick J. Wong" <djwong@kernel.org>
To: Dave Chinner <david@fromorbit.com>
Cc: linux-xfs@vger.kernel.org, hch@lst.de
Subject: Re: [PATCH 01/10] xfs: split up xfs_buf_allocate_memory
Date: Thu, 27 May 2021 15:48:58 -0700	[thread overview]
Message-ID: <20210527224858.GA2402049@locust> (raw)
In-Reply-To: <20210526224722.1111377-2-david@fromorbit.com>

On Thu, May 27, 2021 at 08:47:13AM +1000, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> Based on a patch from Christoph Hellwig.
> 
> This splits out the heap allocation and page allocation portions of
> the buffer memory allocation into two separate helper functions.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>
> ---
>  fs/xfs/xfs_buf.c | 126 ++++++++++++++++++++++++++++-------------------
>  1 file changed, 74 insertions(+), 52 deletions(-)
> 
> diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> index 592800c8852f..2e35d344a69b 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -347,65 +347,55 @@ xfs_buf_free(
>  	kmem_cache_free(xfs_buf_zone, bp);
>  }
>  
> -/*
> - * Allocates all the pages for buffer in question and builds it's page list.
> - */
> -STATIC int
> -xfs_buf_allocate_memory(
> -	struct xfs_buf		*bp,
> -	uint			flags)
> +static int
> +xfs_buf_alloc_kmem(
> +	struct xfs_buf	*bp,
> +	size_t		size,
> +	xfs_buf_flags_t	flags)
>  {
> -	size_t			size;
> -	size_t			nbytes, offset;
> -	gfp_t			gfp_mask = xb_to_gfp(flags);
> -	unsigned short		page_count, i;
> -	xfs_off_t		start, end;
> -	int			error;
> -	xfs_km_flags_t		kmflag_mask = 0;
> +	int		align_mask = xfs_buftarg_dma_alignment(bp->b_target);
> +	xfs_km_flags_t	kmflag_mask = KM_NOFS;
>  
> -	/*
> -	 * assure zeroed buffer for non-read cases.
> -	 */
> -	if (!(flags & XBF_READ)) {
> +	/* Assure zeroed buffer for non-read cases. */
> +	if (!(flags & XBF_READ))
>  		kmflag_mask |= KM_ZERO;
> -		gfp_mask |= __GFP_ZERO;
> -	}
>  
> -	/*
> -	 * for buffers that are contained within a single page, just allocate
> -	 * the memory from the heap - there's no need for the complexity of
> -	 * page arrays to keep allocation down to order 0.
> -	 */
> -	size = BBTOB(bp->b_length);
> -	if (size < PAGE_SIZE) {
> -		int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
> -		bp->b_addr = kmem_alloc_io(size, align_mask,
> -					   KM_NOFS | kmflag_mask);
> -		if (!bp->b_addr) {
> -			/* low memory - use alloc_page loop instead */
> -			goto use_alloc_page;
> -		}
> +	bp->b_addr = kmem_alloc_io(size, align_mask, kmflag_mask);
> +	if (!bp->b_addr)
> +		return -ENOMEM;
>  
> -		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
> -		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
> -			/* b_addr spans two pages - use alloc_page instead */
> -			kmem_free(bp->b_addr);
> -			bp->b_addr = NULL;
> -			goto use_alloc_page;
> -		}
> -		bp->b_offset = offset_in_page(bp->b_addr);
> -		bp->b_pages = bp->b_page_array;
> -		bp->b_pages[0] = kmem_to_page(bp->b_addr);
> -		bp->b_page_count = 1;
> -		bp->b_flags |= _XBF_KMEM;
> -		return 0;
> +	if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
> +	    ((unsigned long)bp->b_addr & PAGE_MASK)) {
> +		/* b_addr spans two pages - use alloc_page instead */
> +		kmem_free(bp->b_addr);
> +		bp->b_addr = NULL;
> +		return -ENOMEM;
>  	}
> +	bp->b_offset = offset_in_page(bp->b_addr);
> +	bp->b_pages = bp->b_page_array;
> +	bp->b_pages[0] = kmem_to_page(bp->b_addr);
> +	bp->b_page_count = 1;
> +	bp->b_flags |= _XBF_KMEM;
> +	return 0;
> +}
> +
> +static int
> +xfs_buf_alloc_pages(
> +	struct xfs_buf	*bp,
> +	uint		page_count,
> +	xfs_buf_flags_t	flags)
> +{
> +	gfp_t		gfp_mask = xb_to_gfp(flags);
> +	size_t		size;
> +	size_t		offset;
> +	size_t		nbytes;
> +	int		i;
> +	int		error;
> +
> +	/* Assure zeroed buffer for non-read cases. */
> +	if (!(flags & XBF_READ))
> +		gfp_mask |= __GFP_ZERO;
>  
> -use_alloc_page:
> -	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
> -	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
> -								>> PAGE_SHIFT;
> -	page_count = end - start;
>  	error = _xfs_buf_get_pages(bp, page_count);
>  	if (unlikely(error))
>  		return error;
> @@ -458,6 +448,38 @@ xfs_buf_allocate_memory(
>  	return error;
>  }
>  
> +
> +/*
> + * Allocates all the pages for buffer in question and builds it's page list.
> + */
> +static int
> +xfs_buf_allocate_memory(
> +	struct xfs_buf		*bp,
> +	uint			flags)
> +{
> +	size_t			size;
> +	xfs_off_t		start, end;
> +	int			error;
> +
> +	/*
> +	 * For buffers that fit entirely within a single page, first attempt to
> +	 * allocate the memory from the heap to minimise memory usage. If we
> +	 * can't get heap memory for these small buffers, we fall back to using
> +	 * the page allocator.
> +	 */
> +	size = BBTOB(bp->b_length);
> +	if (size < PAGE_SIZE) {
> +		error = xfs_buf_alloc_kmem(bp, size, flags);
> +		if (!error)
> +			return 0;
> +	}
> +
> +	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
> +	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
> +								>> PAGE_SHIFT;

round_down and round_up?

As a straight translation this seems fine, but you might as well take
the opportunity to declutter some of this. :)

Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> +	return xfs_buf_alloc_pages(bp, end - start, flags);
> +}
> +
>  /*
>   *	Map buffer into kernel address-space if necessary.
>   */
> -- 
> 2.31.1
> 

  reply	other threads:[~2021-05-27 22:49 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-26 22:47 [PATCH 00/10] xfs: buffer bulk page allocation and cleanups Dave Chinner
2021-05-26 22:47 ` [PATCH 01/10] xfs: split up xfs_buf_allocate_memory Dave Chinner
2021-05-27 22:48   ` Darrick J. Wong [this message]
2021-05-27 23:10     ` Darrick J. Wong
2021-05-26 22:47 ` [PATCH 02/10] xfs: use xfs_buf_alloc_pages for uncached buffers Dave Chinner
2021-05-27 22:50   ` Darrick J. Wong
2021-05-26 22:47 ` [PATCH 03/10] xfs: use alloc_pages_bulk_array() for buffers Dave Chinner
2021-05-27 22:59   ` Darrick J. Wong
2021-05-27 23:01     ` Darrick J. Wong
2021-05-26 22:47 ` [PATCH 04/10] xfs: merge _xfs_buf_get_pages() Dave Chinner
2021-05-27 23:02   ` Darrick J. Wong
2021-05-26 22:47 ` [PATCH 05/10] xfs: move page freeing into _xfs_buf_free_pages() Dave Chinner
2021-05-27 23:03   ` Darrick J. Wong
2021-05-26 22:47 ` [PATCH 06/10] xfs: remove ->b_offset handling for page backed buffers Dave Chinner
2021-05-27 23:09   ` Darrick J. Wong
2021-06-01  1:46     ` Dave Chinner
2021-05-26 22:47 ` [PATCH 07/10] xfs: simplify the b_page_count calculation Dave Chinner
2021-05-27 23:15   ` Darrick J. Wong
2021-05-27 23:29     ` Dave Chinner
2021-05-26 22:47 ` [PATCH 08/10] xfs: get rid of xb_to_gfp() Dave Chinner
2021-05-27 23:12   ` Darrick J. Wong
2021-05-26 22:47 ` [PATCH 09/10] xfs: cleanup error handling in xfs_buf_get_map Dave Chinner
2021-05-27 23:16   ` Darrick J. Wong
2021-05-26 22:47 ` [PATCH 10/10] xfs: merge xfs_buf_allocate_memory Dave Chinner
2021-05-27 23:17   ` Darrick J. Wong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210527224858.GA2402049@locust \
    --to=djwong@kernel.org \
    --cc=david@fromorbit.com \
    --cc=hch@lst.de \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.