All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: linux-xfs@vger.kernel.org
Cc: Dave Chinner <david@fromorbit.com>
Subject: [PATCH 08/11] xfs: centralize page allocation and freeing for buffers
Date: Wed, 19 May 2021 21:08:57 +0200	[thread overview]
Message-ID: <20210519190900.320044-9-hch@lst.de> (raw)
In-Reply-To: <20210519190900.320044-1-hch@lst.de>

Factor out two helpers that do everything needed for allocating and
freeing pages that back a buffer, and remove the duplication between
the different interfaces.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/xfs/xfs_buf.c | 110 ++++++++++++++++-------------------------------
 1 file changed, 37 insertions(+), 73 deletions(-)

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 76a107e3cb2a22..31aff8323605cd 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -273,35 +273,17 @@ _xfs_buf_alloc(
 }
 
 /*
- *	Allocate a page array capable of holding a specified number
- *	of pages, and point the page buf at it.
+ * Free all pages allocated to the buffer including the page map.
  */
-STATIC int
-_xfs_buf_get_pages(
-	struct xfs_buf		*bp)
+static void
+xfs_buf_free_pages(
+	struct xfs_buf	*bp)
 {
-	ASSERT(bp->b_pages == NULL);
-
-	bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
-	if (bp->b_page_count > XB_PAGES) {
-		bp->b_pages = kmem_zalloc(sizeof(struct page *) *
-						bp->b_page_count, KM_NOFS);
-		if (!bp->b_pages)
-			return -ENOMEM;
-	} else {
-		bp->b_pages = bp->b_page_array;
-	}
+	unsigned int	i;
 
-	return 0;
-}
+	for (i = 0; i < bp->b_page_count; i++)
+		__free_page(bp->b_pages[i]);
 
-/*
- *	Frees b_pages if it was allocated.
- */
-STATIC void
-_xfs_buf_free_pages(
-	struct xfs_buf	*bp)
-{
 	if (bp->b_pages != bp->b_page_array) {
 		kmem_free(bp->b_pages);
 		bp->b_pages = NULL;
@@ -324,22 +306,14 @@ xfs_buf_free(
 	ASSERT(list_empty(&bp->b_lru));
 
 	if (bp->b_flags & _XBF_PAGES) {
-		uint		i;
-
 		if (xfs_buf_is_vmapped(bp))
 			vm_unmap_ram(bp->b_addr, bp->b_page_count);
-
-		for (i = 0; i < bp->b_page_count; i++) {
-			struct page	*page = bp->b_pages[i];
-
-			__free_page(page);
-		}
+		xfs_buf_free_pages(bp);
 		if (current->reclaim_state)
 			current->reclaim_state->reclaimed_slab +=
 							bp->b_page_count;
 	} else if (bp->b_flags & _XBF_KMEM)
 		kmem_free(bp->b_addr);
-	_xfs_buf_free_pages(bp);
 	xfs_buf_free_maps(bp);
 	kmem_cache_free(xfs_buf_zone, bp);
 }
@@ -380,34 +354,33 @@ xfs_buf_alloc_slab(
 static int
 xfs_buf_alloc_pages(
 	struct xfs_buf		*bp,
-	uint			flags)
+	gfp_t			gfp_mask,
+	bool			fail_fast)
 {
-	gfp_t			gfp_mask = xb_to_gfp(flags);
-	unsigned short		i;
-	int			error;
-
-	/*
-	 * assure zeroed buffer for non-read cases.
-	 */
-	if (!(flags & XBF_READ))
-		gfp_mask |= __GFP_ZERO;
+	int			i;
 
-	error = _xfs_buf_get_pages(bp);
-	if (unlikely(error))
-		return error;
+	ASSERT(bp->b_pages == NULL);
 
-	bp->b_flags |= _XBF_PAGES;
+	bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
+	if (bp->b_page_count > XB_PAGES) {
+		bp->b_pages = kmem_zalloc(sizeof(struct page *) *
+						bp->b_page_count, KM_NOFS);
+		if (!bp->b_pages)
+			return -ENOMEM;
+	} else {
+		bp->b_pages = bp->b_page_array;
+	}
 
 	for (i = 0; i < bp->b_page_count; i++) {
 		struct page	*page;
 		uint		retries = 0;
 retry:
 		page = alloc_page(gfp_mask);
-		if (unlikely(page == NULL)) {
-			if (flags & XBF_READ_AHEAD) {
+		if (unlikely(!page)) {
+			if (fail_fast) {
 				bp->b_page_count = i;
-				error = -ENOMEM;
-				goto out_free_pages;
+				xfs_buf_free_pages(bp);
+				return -ENOMEM;
 			}
 
 			/*
@@ -429,13 +402,9 @@ xfs_buf_alloc_pages(
 
 		bp->b_pages[i] = page;
 	}
-	return 0;
 
-out_free_pages:
-	for (i = 0; i < bp->b_page_count; i++)
-		__free_page(bp->b_pages[i]);
-	bp->b_flags &= ~_XBF_PAGES;
-	return error;
+	bp->b_flags |= _XBF_PAGES;
+	return 0;
 }
 
 /*
@@ -706,7 +675,13 @@ xfs_buf_get_map(
 	 */
 	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
 	    xfs_buf_alloc_slab(new_bp, flags) < 0) {
-		error = xfs_buf_alloc_pages(new_bp, flags);
+		gfp_t			gfp_mask = xb_to_gfp(flags);
+
+		/* assure a zeroed buffer for non-read cases */
+		if (!(flags & XBF_READ))
+			gfp_mask |= __GFP_ZERO;
+		error = xfs_buf_alloc_pages(new_bp, gfp_mask,
+					   flags & XBF_READ_AHEAD);
 		if (error)
 			goto out_free_buf;
 	}
@@ -936,7 +911,7 @@ xfs_buf_get_uncached(
 	int			flags,
 	struct xfs_buf		**bpp)
 {
-	int			error, i;
+	int			error;
 	struct xfs_buf		*bp;
 	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
 
@@ -947,19 +922,10 @@ xfs_buf_get_uncached(
 	if (error)
 		goto fail;
 
-	error = _xfs_buf_get_pages(bp);
+	error = xfs_buf_alloc_pages(bp, xb_to_gfp(flags), true);
 	if (error)
 		goto fail_free_buf;
 
-	for (i = 0; i < bp->b_page_count; i++) {
-		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
-		if (!bp->b_pages[i]) {
-			error = -ENOMEM;
-			goto fail_free_mem;
-		}
-	}
-	bp->b_flags |= _XBF_PAGES;
-
 	error = _xfs_buf_map_pages(bp, 0);
 	if (unlikely(error)) {
 		xfs_warn(target->bt_mount,
@@ -972,9 +938,7 @@ xfs_buf_get_uncached(
 	return 0;
 
  fail_free_mem:
-	while (--i >= 0)
-		__free_page(bp->b_pages[i]);
-	_xfs_buf_free_pages(bp);
+	xfs_buf_free_pages(bp);
  fail_free_buf:
 	xfs_buf_free_maps(bp);
 	kmem_cache_free(xfs_buf_zone, bp);
-- 
2.30.2


  parent reply	other threads:[~2021-05-19 19:09 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-19 19:08 RFC: buffer cache backing page allocation cleanup Christoph Hellwig
2021-05-19 19:08 ` [PATCH 01/11] xfs: cleanup error handling in xfs_buf_get_map Christoph Hellwig
2021-05-20 23:43   ` Darrick J. Wong
2021-05-19 19:08 ` [PATCH 02/11] xfs: split xfs_buf_allocate_memory Christoph Hellwig
2021-05-19 22:36   ` Dave Chinner
2021-05-19 19:08 ` [PATCH 03/11] xfs: remove ->b_offset handling for page backed buffers Christoph Hellwig
2021-05-19 22:27   ` Dave Chinner
2021-05-19 19:08 ` [PATCH 04/11] xfs: cleanup _xfs_buf_get_pages Christoph Hellwig
2021-05-19 22:40   ` Dave Chinner
2021-05-20  5:23     ` Christoph Hellwig
2021-05-25 22:43       ` Dave Chinner
2021-05-19 19:08 ` [PATCH 05/11] xfs: remove the xb_page_found stat counter in xfs_buf_alloc_pages Christoph Hellwig
2021-05-19 22:55   ` Dave Chinner
2021-05-19 19:08 ` [PATCH 06/11] xfs: remove the size and nbytes variables " Christoph Hellwig
2021-05-19 22:56   ` Dave Chinner
2021-05-19 19:08 ` [PATCH 07/11] xfs: simplify the b_page_count calculation Christoph Hellwig
2021-05-19 19:08 ` Christoph Hellwig [this message]
2021-05-19 23:22   ` [PATCH 08/11] xfs: centralize page allocation and freeing for buffers Dave Chinner
2021-05-20  5:35     ` Christoph Hellwig
2021-05-25 23:59       ` Dave Chinner
2021-05-19 19:08 ` [PATCH 09/11] xfs: lift the buffer zeroing logic into xfs_buf_alloc_pages Christoph Hellwig
2021-05-19 19:08 ` [PATCH 10/11] xfs: retry allocations from xfs_buf_get_uncached as well Christoph Hellwig
2021-05-19 19:09 ` [PATCH 11/11] xfs: use alloc_pages_bulk_array() for buffers Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210519190900.320044-9-hch@lst.de \
    --to=hch@lst.de \
    --cc=david@fromorbit.com \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.