All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yunsheng Lin <linyunsheng@huawei.com>
To: <davem@davemloft.net>, <kuba@kernel.org>, <pabeni@redhat.com>
Cc: <netdev@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	Yunsheng Lin <linyunsheng@huawei.com>,
	Andrew Morton <akpm@linux-foundation.org>, <linux-mm@kvack.org>
Subject: [PATCH RFC 09/10] mm: page_frag: introduce prepare/commit API for page_frag
Date: Thu, 28 Mar 2024 21:38:38 +0800	[thread overview]
Message-ID: <20240328133839.13620-10-linyunsheng@huawei.com> (raw)
In-Reply-To: <20240328133839.13620-1-linyunsheng@huawei.com>

There are many use cases that need minimum memory in order
for forward progressing, but can do better if there is more
memory available.

Currently skb_page_frag_refill() API is used to solve the
above usecases, as mentioned in [1], its implementation is
similar to the one in mm subsystem.

To unify those two page_frag implementations, introduce a
prepare API to ensure minimum memory is satisfied and return
how much the actual memory is available to the caller.

And the caller can decide how much memory to use by calling
commit API, or not calling the commit API if deciding to not
use any memory.

1. https://lore.kernel.org/all/20240228093013.8263-1-linyunsheng@huawei.com/

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 include/linux/page_frag_cache.h | 72 ++++++++++++++++++++++++++++++++-
 mm/page_frag_alloc.c            | 13 +++---
 2 files changed, 75 insertions(+), 10 deletions(-)

diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index a97a1ac017d6..3f17c0eba7fa 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -43,8 +43,76 @@ static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
 
 void page_frag_cache_drain(struct page_frag_cache *nc);
 void __page_frag_cache_drain(struct page *page, unsigned int count);
-void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
-			 gfp_t gfp_mask);
+void *__page_frag_alloc_prepare(struct page_frag_cache *nc, unsigned int fragsz,
+				gfp_t gfp_mask);
+
+static inline void *page_frag_alloc_va(struct page_frag_cache *nc,
+				       unsigned int fragsz, gfp_t gfp_mask)
+{
+	void *va;
+
+	va = __page_frag_alloc_prepare(nc, fragsz, gfp_mask);
+	if (unlikely(!va))
+		return NULL;
+
+	va += nc->offset;
+	nc->pagecnt_bias--;
+	nc->offset = nc->offset + fragsz;
+
+	return va;
+}
+
+static inline void *page_frag_alloc_prepare(struct page_frag_cache *nc,
+					    unsigned int *offset,
+					    unsigned int *size,
+					    gfp_t gfp_mask)
+{
+	void *va;
+
+	va = __page_frag_alloc_prepare(nc, *size, gfp_mask);
+	if (unlikely(!va))
+		return NULL;
+
+	*offset = nc->offset;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	*size = nc->size_mask - *offset + 1;
+#else
+	*size = PAGE_SIZE - *offset;
+#endif
+
+	return va;
+}
+
+static inline void *page_frag_alloc_prepare_align(struct page_frag_cache *nc,
+						  unsigned int *offset,
+						  unsigned int *size,
+						  unsigned int align,
+						  gfp_t gfp_mask)
+{
+	unsigned int old_offset = nc->offset;
+
+	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE ||
+		     *size < sizeof(unsigned int));
+
+	nc->offset = ALIGN(old_offset, align);
+	return page_frag_alloc_prepare(nc, offset, size, gfp_mask);
+}
+
+static inline void page_frag_alloc_commit(struct page_frag_cache *nc,
+					  unsigned int offset,
+					  unsigned int size)
+{
+	nc->pagecnt_bias--;
+	nc->offset = offset + size;
+}
+
+static inline void page_frag_alloc_commit_noref(struct page_frag_cache *nc,
+						unsigned int offset,
+						unsigned int size)
+{
+	nc->offset = offset + size;
+}
 
 static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 					       unsigned int fragsz,
diff --git a/mm/page_frag_alloc.c b/mm/page_frag_alloc.c
index ae1393d0619a..c4d4fc10a850 100644
--- a/mm/page_frag_alloc.c
+++ b/mm/page_frag_alloc.c
@@ -81,8 +81,8 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
-void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
-			 gfp_t gfp_mask)
+void *__page_frag_alloc_prepare(struct page_frag_cache *nc, unsigned int fragsz,
+				gfp_t gfp_mask)
 {
 	unsigned long size_mask;
 	unsigned int offset;
@@ -120,7 +120,7 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 		set_page_count(page, size_mask);
 		nc->pagecnt_bias |= size_mask;
 
-		offset = 0;
+		nc->offset = 0;
 		if (unlikely(fragsz > (size_mask + 1))) {
 			/*
 			 * The caller is trying to allocate a fragment
@@ -135,12 +135,9 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 		}
 	}
 
-	nc->pagecnt_bias--;
-	nc->offset = offset + fragsz;
-
-	return va + offset;
+	return va;
 }
-EXPORT_SYMBOL(page_frag_alloc_va);
+EXPORT_SYMBOL(__page_frag_alloc_prepare);
 
 /*
  * Frees a page fragment allocated out of either a compound or order 0 page.
-- 
2.33.0


  parent reply	other threads:[~2024-03-28 13:41 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-28 13:38 [PATCH RFC 00/10] First try to replace page_frag with page_frag_cache Yunsheng Lin
2024-03-28 13:38 ` Yunsheng Lin
2024-03-28 13:38 ` [PATCH RFC 01/10] mm: Move the page fragment allocator from page_alloc into its own file Yunsheng Lin
2024-03-29 17:19   ` Christophe Leroy
2024-03-30 12:01     ` Yunsheng Lin
2024-03-30 12:01       ` Yunsheng Lin
2024-03-30 12:01       ` Yunsheng Lin
2024-03-28 13:38 ` [PATCH RFC 02/10] mm: page_frag: use initial zero offset for page_frag_alloc_align() Yunsheng Lin
2024-03-28 13:38 ` [PATCH RFC 03/10] mm: page_frag: change page_frag_alloc_* API to accept align param Yunsheng Lin
2024-03-28 13:38 ` [PATCH RFC 04/10] mm: page_frag: add '_va' suffix to page_frag API Yunsheng Lin
2024-03-28 13:38   ` [Intel-wired-lan] " Yunsheng Lin
2024-03-28 13:38 ` [PATCH RFC 05/10] mm: page_frag: add two inline helper for " Yunsheng Lin
2024-03-28 13:38 ` [PATCH RFC 06/10] mm: page_frag: reuse MSB of 'size' field for pfmemalloc Yunsheng Lin
2024-03-28 13:38 ` [PATCH RFC 07/10] mm: page_frag: reuse existing bit field of 'va' for pagecnt_bias Yunsheng Lin
2024-03-28 13:38 ` [PATCH RFC 08/10] net: introduce the skb_copy_to_va_nocache() helper Yunsheng Lin
2024-03-28 13:38 ` Yunsheng Lin [this message]
2024-03-28 13:38 ` [PATCH RFC 10/10] net: replace page_frag with page_frag_cache Yunsheng Lin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240328133839.13620-10-linyunsheng@huawei.com \
    --to=linyunsheng@huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=davem@davemloft.net \
    --cc=kuba@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.