linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next v2 01/15] mm: page_frag: add a test module for page_frag
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-15 13:19 ` [PATCH net-next v2 03/15] mm: page_frag: use free_unref_page() to free page fragment Yunsheng Lin
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Andrew Morton,
	Alexander Duyck, linux-mm

Basing on the lib/objpool.c, change it to something like a
ptrpool, so that we can utilize that to test the correctness
and performance of the page_frag.

The testing is done by ensuring that the fragments allocated
from a frag_frag_cache instance is pushed into a ptrpool
instance in a kthread binded to the first cpu, and a kthread
binded to the current node will pop the fragmemt from the
ptrpool and call page_frag_alloc_va() to free the fragmemt.

We may refactor out the common part between objpool and ptrpool
if this ptrpool thing turns out to be helpful for other place.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 mm/Kconfig.debug    |   8 +
 mm/Makefile         |   1 +
 mm/page_frag_test.c | 364 ++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 373 insertions(+)
 create mode 100644 mm/page_frag_test.c

diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index afc72fde0f03..1ebcd45f47d4 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -142,6 +142,14 @@ config DEBUG_PAGE_REF
 	  kernel code.  However the runtime performance overhead is virtually
 	  nil until the tracepoints are actually enabled.
 
+config DEBUG_PAGE_FRAG_TEST
+	tristate "Test module for page_frag"
+	default n
+	depends on m && DEBUG_KERNEL
+	help
+	  This builds the "page_frag_test" module that is used to test the
+	  correctness and performance of page_frag's implementation.
+
 config DEBUG_RODATA_TEST
     bool "Testcase for the marking rodata read-only"
     depends on STRICT_KERNEL_RWX
diff --git a/mm/Makefile b/mm/Makefile
index 4abb40b911ec..5a14e6992f44 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
 obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
 obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
 obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o
+obj-$(CONFIG_DEBUG_PAGE_FRAG_TEST) += page_frag_test.o
 obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o
 obj-$(CONFIG_PAGE_OWNER) += page_owner.o
 obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
diff --git a/mm/page_frag_test.c b/mm/page_frag_test.c
new file mode 100644
index 000000000000..6743db672dad
--- /dev/null
+++ b/mm/page_frag_test.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Test module for page_frag cache
+ *
+ * Copyright: linyunsheng@huawei.com
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/atomic.h>
+#include <linux/irqflags.h>
+#include <linux/cpumask.h>
+#include <linux/log2.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+
+#define OBJPOOL_NR_OBJECT_MAX	BIT(24)
+
+struct objpool_slot {
+	u32 head;
+	u32 tail;
+	u32 last;
+	u32 mask;
+	void *entries[];
+} __packed;
+
+struct objpool_head {
+	int nr_cpus;
+	int capacity;
+	struct objpool_slot **cpu_slots;
+};
+
+/* initialize percpu objpool_slot */
+static void objpool_init_percpu_slot(struct objpool_head *pool,
+				     struct objpool_slot *slot)
+{
+	/* initialize elements of percpu objpool_slot */
+	slot->mask = pool->capacity - 1;
+}
+
+/* allocate and initialize percpu slots */
+static int objpool_init_percpu_slots(struct objpool_head *pool,
+				     int nr_objs, gfp_t gfp)
+{
+	int i;
+
+	for (i = 0; i < pool->nr_cpus; i++) {
+		struct objpool_slot *slot;
+		int size;
+
+		/* skip the cpu node which could never be present */
+		if (!cpu_possible(i))
+			continue;
+
+		size = struct_size(slot, entries, pool->capacity);
+
+		/*
+		 * here we allocate percpu-slot & objs together in a single
+		 * allocation to make it more compact, taking advantage of
+		 * warm caches and TLB hits. in default vmalloc is used to
+		 * reduce the pressure of kernel slab system. as we know,
+		 * minimal size of vmalloc is one page since vmalloc would
+		 * always align the requested size to page size
+		 */
+		if (gfp & GFP_ATOMIC)
+			slot = kmalloc_node(size, gfp, cpu_to_node(i));
+		else
+			slot = __vmalloc_node(size, sizeof(void *), gfp,
+					      cpu_to_node(i),
+					      __builtin_return_address(0));
+		if (!slot)
+			return -ENOMEM;
+
+		memset(slot, 0, size);
+		pool->cpu_slots[i] = slot;
+
+		objpool_init_percpu_slot(pool, slot);
+	}
+
+	return 0;
+}
+
+/* cleanup all percpu slots of the object pool */
+static void objpool_fini_percpu_slots(struct objpool_head *pool)
+{
+	int i;
+
+	if (!pool->cpu_slots)
+		return;
+
+	for (i = 0; i < pool->nr_cpus; i++)
+		kvfree(pool->cpu_slots[i]);
+	kfree(pool->cpu_slots);
+}
+
+/* initialize object pool and pre-allocate objects */
+static int objpool_init(struct objpool_head *pool, int nr_objs, gfp_t gfp)
+{
+	int rc, capacity, slot_size;
+
+	/* check input parameters */
+	if (nr_objs <= 0 || nr_objs > OBJPOOL_NR_OBJECT_MAX)
+		return -EINVAL;
+
+	/* calculate capacity of percpu objpool_slot */
+	capacity = roundup_pow_of_two(nr_objs);
+	if (!capacity)
+		return -EINVAL;
+
+	gfp = gfp & ~__GFP_ZERO;
+
+	/* initialize objpool pool */
+	memset(pool, 0, sizeof(struct objpool_head));
+	pool->nr_cpus = nr_cpu_ids;
+	pool->capacity = capacity;
+	slot_size = pool->nr_cpus * sizeof(struct objpool_slot *);
+	pool->cpu_slots = kzalloc(slot_size, gfp);
+	if (!pool->cpu_slots)
+		return -ENOMEM;
+
+	/* initialize per-cpu slots */
+	rc = objpool_init_percpu_slots(pool, nr_objs, gfp);
+	if (rc)
+		objpool_fini_percpu_slots(pool);
+
+	return rc;
+}
+
+/* adding object to slot, abort if the slot was already full */
+static int objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
+{
+	struct objpool_slot *slot = pool->cpu_slots[cpu];
+	u32 head, tail;
+
+	/* loading tail and head as a local snapshot, tail first */
+	tail = READ_ONCE(slot->tail);
+
+	do {
+		head = READ_ONCE(slot->head);
+		/* fault caught: something must be wrong */
+		if (unlikely(tail - head >= pool->capacity))
+			return -ENOSPC;
+	} while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
+
+	/* now the tail position is reserved for the given obj */
+	WRITE_ONCE(slot->entries[tail & slot->mask], obj);
+	/* update sequence to make this obj available for pop() */
+	smp_store_release(&slot->last, tail + 1);
+
+	return 0;
+}
+
+/* reclaim an object to object pool */
+static int objpool_push(void *obj, struct objpool_head *pool)
+{
+	unsigned long flags;
+	int rc;
+
+	/* disable local irq to avoid preemption & interruption */
+	raw_local_irq_save(flags);
+	rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id());
+	raw_local_irq_restore(flags);
+
+	return rc;
+}
+
+/* try to retrieve object from slot */
+static void *objpool_try_get_slot(struct objpool_head *pool, int cpu)
+{
+	struct objpool_slot *slot = pool->cpu_slots[cpu];
+	/* load head snapshot, other cpus may change it */
+	u32 head = smp_load_acquire(&slot->head);
+
+	while (head != READ_ONCE(slot->last)) {
+		void *obj;
+
+		/*
+		 * data visibility of 'last' and 'head' could be out of
+		 * order since memory updating of 'last' and 'head' are
+		 * performed in push() and pop() independently
+		 *
+		 * before any retrieving attempts, pop() must guarantee
+		 * 'last' is behind 'head', that is to say, there must
+		 * be available objects in slot, which could be ensured
+		 * by condition 'last != head && last - head <= nr_objs'
+		 * that is equivalent to 'last - head - 1 < nr_objs' as
+		 * 'last' and 'head' are both unsigned int32
+		 */
+		if (READ_ONCE(slot->last) - head - 1 >= pool->capacity) {
+			head = READ_ONCE(slot->head);
+			continue;
+		}
+
+		/* obj must be retrieved before moving forward head */
+		obj = READ_ONCE(slot->entries[head & slot->mask]);
+
+		/* move head forward to mark it's consumption */
+		if (try_cmpxchg_release(&slot->head, &head, head + 1))
+			return obj;
+	}
+
+	return NULL;
+}
+
+/* allocate an object from object pool */
+static void *objpool_pop(struct objpool_head *pool)
+{
+	void *obj = NULL;
+	unsigned long flags;
+	int i, cpu;
+
+	/* disable local irq to avoid preemption & interruption */
+	raw_local_irq_save(flags);
+
+	cpu = raw_smp_processor_id();
+	for (i = 0; i < num_possible_cpus(); i++) {
+		obj = objpool_try_get_slot(pool, cpu);
+		if (obj)
+			break;
+		cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
+	}
+	raw_local_irq_restore(flags);
+
+	return obj;
+}
+
+/* release whole objpool forcely */
+static void objpool_free(struct objpool_head *pool)
+{
+	if (!pool->cpu_slots)
+		return;
+
+	/* release percpu slots */
+	objpool_fini_percpu_slots(pool);
+}
+
+static struct objpool_head ptr_pool;
+static int nr_objs = 512;
+static int nr_test = 5120000;
+static atomic_t nthreads;
+static struct completion wait;
+static struct page_frag_cache test_frag;
+
+module_param(nr_test, int, 0600);
+MODULE_PARM_DESC(nr_test, "number of iterations to test");
+
+static int page_frag_pop_thread(void *arg)
+{
+	struct objpool_head *pool = arg;
+	int nr = nr_test;
+
+	pr_info("page_frag pop test thread begins on cpu %d\n",
+		smp_processor_id());
+
+	while (nr > 0) {
+		void *obj = objpool_pop(pool);
+
+		if (obj) {
+			nr--;
+			page_frag_free(obj);
+		} else {
+			cond_resched();
+		}
+	}
+
+	if (atomic_dec_and_test(&nthreads))
+		complete(&wait);
+
+	pr_info("page_frag pop test thread exits on cpu %d\n",
+		smp_processor_id());
+
+	return 0;
+}
+
+static int page_frag_push_thread(void *arg)
+{
+	struct objpool_head *pool = arg;
+	int nr = nr_test;
+
+	pr_info("page_frag push test thread begins on cpu %d\n",
+		smp_processor_id());
+
+	while (nr > 0) {
+		unsigned int size = get_random_u32();
+		void *va;
+		int ret;
+
+		size = clamp(size, 4U, 4096U);
+		va = page_frag_alloc(&test_frag, size, GFP_KERNEL);
+		if (!va)
+			continue;
+
+		ret = objpool_push(va, pool);
+		if (ret) {
+			page_frag_free(va);
+			cond_resched();
+		} else {
+			nr--;
+		}
+	}
+
+	pr_info("page_frag push test thread exits on cpu %d\n",
+		smp_processor_id());
+
+	if (atomic_dec_and_test(&nthreads))
+		complete(&wait);
+
+	return 0;
+}
+
+static int __init page_frag_test_init(void)
+{
+	struct task_struct *tsk_push, *tsk_pop;
+	ktime_t start;
+	u64 duration;
+	int ret;
+
+	test_frag.va = NULL;
+	atomic_set(&nthreads, 2);
+	init_completion(&wait);
+
+	ret = objpool_init(&ptr_pool, nr_objs, GFP_KERNEL);
+	if (ret)
+		return ret;
+
+	tsk_push = kthread_create_on_cpu(page_frag_push_thread, &ptr_pool,
+					 cpumask_first(cpu_online_mask),
+					 "page_frag_push");
+	if (IS_ERR(tsk_push))
+		return PTR_ERR(tsk_push);
+
+	tsk_pop = kthread_create(page_frag_pop_thread, &ptr_pool,
+				 "page_frag_pop");
+	if (IS_ERR(tsk_pop)) {
+		kthread_stop(tsk_push);
+		return PTR_ERR(tsk_pop);
+	}
+
+	start = ktime_get();
+	wake_up_process(tsk_push);
+	wake_up_process(tsk_pop);
+
+	pr_info("waiting for test to complete\n");
+	wait_for_completion(&wait);
+
+	duration = (u64)ktime_us_delta(ktime_get(), start);
+	pr_info("%d of iterations took: %lluus\n", nr_test, duration);
+
+	objpool_free(&ptr_pool);
+	page_frag_cache_drain(&test_frag);
+
+	return -EAGAIN;
+}
+
+static void __exit page_frag_test_exit(void)
+{
+}
+
+module_init(page_frag_test_init);
+module_exit(page_frag_test_exit);
+
+MODULE_LICENSE("GPL");
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 03/15] mm: page_frag: use free_unref_page() to free page fragment
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
  2024-04-15 13:19 ` [PATCH net-next v2 01/15] mm: page_frag: add a test module for page_frag Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-15 13:19 ` [PATCH net-next v2 04/15] mm: move the page fragment allocator from page_alloc into its own file Yunsheng Lin
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Andrew Morton, linux-mm

free_the_page() used by page_frag call free_unref_page() or
__free_pages_ok() depending on pcp_allowed_order(), as the
max order of page allocated for page_frag is 3, the checking
in pcp_allowed_order() is unnecessary.

So call free_unref_page() directly to free a page_frag page
to aovid the unnecessary checking.

As the free_the_page() is a static function in page_alloc.c,
using the new one also allow moving page_frag related code
to a new file in the next patch.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 mm/page_alloc.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 14d39f34d336..7adb29f8f364 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4693,6 +4693,9 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
 	gfp_t gfp = gfp_mask;
 
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	/* Ensure free_unref_page() can be used to free the page fragment */
+	BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_ALLOC_COSTLY_ORDER);
+
 	gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) |  __GFP_COMP |
 		   __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
@@ -4722,7 +4725,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 
 	if (page_ref_sub_and_test(page, count))
-		free_the_page(page, compound_order(page));
+		free_unref_page(page, compound_order(page));
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
@@ -4763,7 +4766,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
 			goto refill;
 
 		if (unlikely(nc->pfmemalloc)) {
-			free_the_page(page, compound_order(page));
+			free_unref_page(page, compound_order(page));
 			goto refill;
 		}
 
@@ -4807,7 +4810,7 @@ void page_frag_free(void *addr)
 	struct page *page = virt_to_head_page(addr);
 
 	if (unlikely(put_page_testzero(page)))
-		free_the_page(page, compound_order(page));
+		free_unref_page(page, compound_order(page));
 }
 EXPORT_SYMBOL(page_frag_free);
 
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 04/15] mm: move the page fragment allocator from page_alloc into its own file
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
  2024-04-15 13:19 ` [PATCH net-next v2 01/15] mm: page_frag: add a test module for page_frag Yunsheng Lin
  2024-04-15 13:19 ` [PATCH net-next v2 03/15] mm: page_frag: use free_unref_page() to free page fragment Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-15 13:19 ` [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align() Yunsheng Lin
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, David Howells, Andrew Morton,
	Alexander Duyck, linux-mm

Inspired by [1], move the page fragment allocator from page_alloc
into its own c file and header file, as we are about to make more
change for it to replace another page_frag implementation in
sock.c

1. https://lore.kernel.org/all/20230411160902.4134381-3-dhowells@redhat.com/

CC: David Howells <dhowells@redhat.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 include/linux/gfp.h             |  22 -----
 include/linux/mm_types.h        |  18 ----
 include/linux/page_frag_cache.h |  47 ++++++++++
 include/linux/skbuff.h          |   1 +
 mm/Makefile                     |   1 +
 mm/page_alloc.c                 | 139 ------------------------------
 mm/page_frag_cache.c            | 147 ++++++++++++++++++++++++++++++++
 mm/page_frag_test.c             |   1 +
 8 files changed, 197 insertions(+), 179 deletions(-)
 create mode 100644 include/linux/page_frag_cache.h
 create mode 100644 mm/page_frag_cache.c

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index c775ea3c6015..5afeab2b906f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -310,28 +310,6 @@ __meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __al
 extern void __free_pages(struct page *page, unsigned int order);
 extern void free_pages(unsigned long addr, unsigned int order);
 
-struct page_frag_cache;
-void page_frag_cache_drain(struct page_frag_cache *nc);
-extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
-			      gfp_t gfp_mask, unsigned int align_mask);
-
-static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
-					  unsigned int fragsz, gfp_t gfp_mask,
-					  unsigned int align)
-{
-	WARN_ON_ONCE(!is_power_of_2(align));
-	return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
-}
-
-static inline void *page_frag_alloc(struct page_frag_cache *nc,
-			     unsigned int fragsz, gfp_t gfp_mask)
-{
-	return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
-}
-
-extern void page_frag_free(void *addr);
-
 #define __free_page(page) __free_pages((page), 0)
 #define free_page(addr) free_pages((addr), 0)
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5240bd7bca33..78a92b4475a7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -504,9 +504,6 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
  */
 #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
 
-#define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
-#define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
-
 /*
  * page_private can be used on tail pages.  However, PagePrivate is only
  * checked by the VM on the head page.  So page_private on the tail pages
@@ -525,21 +522,6 @@ static inline void *folio_get_private(struct folio *folio)
 	return folio->private;
 }
 
-struct page_frag_cache {
-	void * va;
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-	__u16 offset;
-	__u16 size;
-#else
-	__u32 offset;
-#endif
-	/* we maintain a pagecount bias, so that we dont dirty cache line
-	 * containing page->_refcount every time we allocate a fragment.
-	 */
-	unsigned int		pagecnt_bias;
-	bool pfmemalloc;
-};
-
 typedef unsigned long vm_flags_t;
 
 /*
diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
new file mode 100644
index 000000000000..04810d8d6a7d
--- /dev/null
+++ b/include/linux/page_frag_cache.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_PAGE_FRAG_CACHE_H
+#define _LINUX_PAGE_FRAG_CACHE_H
+
+#include <linux/gfp.h>
+
+#define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+	void *va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	__u16 offset;
+	__u16 size;
+#else
+	__u32 offset;
+#endif
+	/* we maintain a pagecount bias, so that we dont dirty cache line
+	 * containing page->_refcount every time we allocate a fragment.
+	 */
+	unsigned int		pagecnt_bias;
+	bool pfmemalloc;
+};
+
+void page_frag_cache_drain(struct page_frag_cache *nc);
+void __page_frag_cache_drain(struct page *page, unsigned int count);
+void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
+			      gfp_t gfp_mask, unsigned int align_mask);
+
+static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
+					  unsigned int fragsz, gfp_t gfp_mask,
+					  unsigned int align)
+{
+	WARN_ON_ONCE(!is_power_of_2(align));
+	return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
+}
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+				    unsigned int fragsz, gfp_t gfp_mask)
+{
+	return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
+void page_frag_free(void *addr);
+
+#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4072a7ee3859..f2dc1f735c79 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -31,6 +31,7 @@
 #include <linux/in6.h>
 #include <linux/if_packet.h>
 #include <linux/llist.h>
+#include <linux/page_frag_cache.h>
 #include <net/flow.h>
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <linux/netfilter/nf_conntrack_common.h>
diff --git a/mm/Makefile b/mm/Makefile
index 5a14e6992f44..8b62f5de48a7 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -59,6 +59,7 @@ page-alloc-$(CONFIG_SHUFFLE_PAGE_ALLOCATOR) += shuffle.o
 memory-hotplug-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
 
 obj-y += page-alloc.o
+obj-y += page_frag_cache.o
 obj-y += init-mm.o
 obj-y += memblock.o
 obj-y += $(memory-hotplug-y)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7adb29f8f364..2308360d78eb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4675,145 +4675,6 @@ void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
-/*
- * Page Fragment:
- *  An arbitrary-length arbitrary-offset area of memory which resides
- *  within a 0 or higher order page.  Multiple fragments within that page
- *  are individually refcounted, in the page's reference counter.
- *
- * The page_frag functions below provide a simple allocation framework for
- * page fragments.  This is used by the network stack and network device
- * drivers to provide a backing region of memory for use as either an
- * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
- */
-static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
-					     gfp_t gfp_mask)
-{
-	struct page *page = NULL;
-	gfp_t gfp = gfp_mask;
-
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-	/* Ensure free_unref_page() can be used to free the page fragment */
-	BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_ALLOC_COSTLY_ORDER);
-
-	gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) |  __GFP_COMP |
-		   __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
-	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
-				PAGE_FRAG_CACHE_MAX_ORDER);
-	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
-#endif
-	if (unlikely(!page))
-		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
-
-	nc->va = page ? page_address(page) : NULL;
-
-	return page;
-}
-
-void page_frag_cache_drain(struct page_frag_cache *nc)
-{
-	if (!nc->va)
-		return;
-
-	__page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
-	nc->va = NULL;
-}
-EXPORT_SYMBOL(page_frag_cache_drain);
-
-void __page_frag_cache_drain(struct page *page, unsigned int count)
-{
-	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
-
-	if (page_ref_sub_and_test(page, count))
-		free_unref_page(page, compound_order(page));
-}
-EXPORT_SYMBOL(__page_frag_cache_drain);
-
-void *__page_frag_alloc_align(struct page_frag_cache *nc,
-			      unsigned int fragsz, gfp_t gfp_mask,
-			      unsigned int align_mask)
-{
-	unsigned int size = PAGE_SIZE;
-	struct page *page;
-	int offset;
-
-	if (unlikely(!nc->va)) {
-refill:
-		page = __page_frag_cache_refill(nc, gfp_mask);
-		if (!page)
-			return NULL;
-
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-		/* if size can vary use size else just use PAGE_SIZE */
-		size = nc->size;
-#endif
-		/* Even if we own the page, we do not use atomic_set().
-		 * This would break get_page_unless_zero() users.
-		 */
-		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
-
-		/* reset page count bias and offset to start of new frag */
-		nc->pfmemalloc = page_is_pfmemalloc(page);
-		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		nc->offset = size;
-	}
-
-	offset = nc->offset - fragsz;
-	if (unlikely(offset < 0)) {
-		page = virt_to_page(nc->va);
-
-		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
-			goto refill;
-
-		if (unlikely(nc->pfmemalloc)) {
-			free_unref_page(page, compound_order(page));
-			goto refill;
-		}
-
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-		/* if size can vary use size else just use PAGE_SIZE */
-		size = nc->size;
-#endif
-		/* OK, page count is 0, we can safely set it */
-		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
-
-		/* reset page count bias and offset to start of new frag */
-		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		offset = size - fragsz;
-		if (unlikely(offset < 0)) {
-			/*
-			 * The caller is trying to allocate a fragment
-			 * with fragsz > PAGE_SIZE but the cache isn't big
-			 * enough to satisfy the request, this may
-			 * happen in low memory conditions.
-			 * We don't release the cache page because
-			 * it could make memory pressure worse
-			 * so we simply return NULL here.
-			 */
-			return NULL;
-		}
-	}
-
-	nc->pagecnt_bias--;
-	offset &= align_mask;
-	nc->offset = offset;
-
-	return nc->va + offset;
-}
-EXPORT_SYMBOL(__page_frag_alloc_align);
-
-/*
- * Frees a page fragment allocated out of either a compound or order 0 page.
- */
-void page_frag_free(void *addr)
-{
-	struct page *page = virt_to_head_page(addr);
-
-	if (unlikely(put_page_testzero(page)))
-		free_unref_page(page, compound_order(page));
-}
-EXPORT_SYMBOL(page_frag_free);
-
 static void *make_alloc_exact(unsigned long addr, unsigned int order,
 		size_t size)
 {
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
new file mode 100644
index 000000000000..64993b5d1243
--- /dev/null
+++ b/mm/page_frag_cache.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Page fragment allocator
+ *
+ * Page Fragment:
+ *  An arbitrary-length arbitrary-offset area of memory which resides within a
+ *  0 or higher order page.  Multiple fragments within that page are
+ *  individually refcounted, in the page's reference counter.
+ *
+ * The page_frag functions provide a simple allocation framework for page
+ * fragments.  This is used by the network stack and network device drivers to
+ * provide a backing region of memory for use as either an sk_buff->head, or to
+ * be used in the "frags" portion of skb_shared_info.
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/page_frag_cache.h>
+#include "internal.h"
+
+static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
+					     gfp_t gfp_mask)
+{
+	struct page *page = NULL;
+	gfp_t gfp = gfp_mask;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	/* Ensure free_unref_page() can be used to free the page fragment */
+	BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_ALLOC_COSTLY_ORDER);
+
+	gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) |  __GFP_COMP |
+		   __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
+	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+				PAGE_FRAG_CACHE_MAX_ORDER);
+	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
+#endif
+	if (unlikely(!page))
+		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+
+	nc->va = page ? page_address(page) : NULL;
+
+	return page;
+}
+
+void page_frag_cache_drain(struct page_frag_cache *nc)
+{
+	if (!nc->va)
+		return;
+
+	__page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
+	nc->va = NULL;
+}
+EXPORT_SYMBOL(page_frag_cache_drain);
+
+void __page_frag_cache_drain(struct page *page, unsigned int count)
+{
+	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
+
+	if (page_ref_sub_and_test(page, count))
+		free_unref_page(page, compound_order(page));
+}
+EXPORT_SYMBOL(__page_frag_cache_drain);
+
+void *__page_frag_alloc_align(struct page_frag_cache *nc,
+			      unsigned int fragsz, gfp_t gfp_mask,
+			      unsigned int align_mask)
+{
+	unsigned int size = PAGE_SIZE;
+	struct page *page;
+	int offset;
+
+	if (unlikely(!nc->va)) {
+refill:
+		page = __page_frag_cache_refill(nc, gfp_mask);
+		if (!page)
+			return NULL;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+		/* if size can vary use size else just use PAGE_SIZE */
+		size = nc->size;
+#endif
+		/* Even if we own the page, we do not use atomic_set().
+		 * This would break get_page_unless_zero() users.
+		 */
+		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
+
+		/* reset page count bias and offset to start of new frag */
+		nc->pfmemalloc = page_is_pfmemalloc(page);
+		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
+		nc->offset = size;
+	}
+
+	offset = nc->offset - fragsz;
+	if (unlikely(offset < 0)) {
+		page = virt_to_page(nc->va);
+
+		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
+			goto refill;
+
+		if (unlikely(nc->pfmemalloc)) {
+			free_unref_page(page, compound_order(page));
+			goto refill;
+		}
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+		/* if size can vary use size else just use PAGE_SIZE */
+		size = nc->size;
+#endif
+		/* OK, page count is 0, we can safely set it */
+		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
+
+		/* reset page count bias and offset to start of new frag */
+		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
+		offset = size - fragsz;
+		if (unlikely(offset < 0)) {
+			/*
+			 * The caller is trying to allocate a fragment
+			 * with fragsz > PAGE_SIZE but the cache isn't big
+			 * enough to satisfy the request, this may
+			 * happen in low memory conditions.
+			 * We don't release the cache page because
+			 * it could make memory pressure worse
+			 * so we simply return NULL here.
+			 */
+			return NULL;
+		}
+	}
+
+	nc->pagecnt_bias--;
+	offset &= align_mask;
+	nc->offset = offset;
+
+	return nc->va + offset;
+}
+EXPORT_SYMBOL(__page_frag_alloc_align);
+
+/*
+ * Frees a page fragment allocated out of either a compound or order 0 page.
+ */
+void page_frag_free(void *addr)
+{
+	struct page *page = virt_to_head_page(addr);
+
+	if (unlikely(put_page_testzero(page)))
+		free_unref_page(page, compound_order(page));
+}
+EXPORT_SYMBOL(page_frag_free);
diff --git a/mm/page_frag_test.c b/mm/page_frag_test.c
index 6743db672dad..ebfd1c3dae8f 100644
--- a/mm/page_frag_test.c
+++ b/mm/page_frag_test.c
@@ -15,6 +15,7 @@
 #include <linux/log2.h>
 #include <linux/completion.h>
 #include <linux/kthread.h>
+#include <linux/page_frag_cache.h>
 
 #define OBJPOOL_NR_OBJECT_MAX	BIT(24)
 
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align()
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
                   ` (2 preceding siblings ...)
  2024-04-15 13:19 ` [PATCH net-next v2 04/15] mm: move the page fragment allocator from page_alloc into its own file Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-15 23:55   ` Alexander H Duyck
  2024-04-15 13:19 ` [PATCH net-next v2 06/15] mm: page_frag: change page_frag_alloc_* API to accept align param Yunsheng Lin
                   ` (6 subsequent siblings)
  10 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Alexander Duyck,
	Andrew Morton, linux-mm

We are above to use page_frag_alloc_*() API to not just
allocate memory for skb->data, but also use them to do
the memory allocation for skb frag too. Currently the
implementation of page_frag in mm subsystem is running
the offset as a countdown rather than count-up value,
there may have several advantages to that as mentioned
in [1], but it may have some disadvantages, for example,
it may disable skb frag coaleasing and more correct cache
prefetching

We have a trade-off to make in order to have a unified
implementation and API for page_frag, so use a initial zero
offset in this patch, and the following patch will try to
make some optimization to aovid the disadvantages as much
as possible.

1. https://lore.kernel.org/all/f4abe71b3439b39d17a6fb2d410180f367cadf5c.camel@gmail.com/

CC: Alexander Duyck <alexander.duyck@gmail.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 mm/page_frag_cache.c | 31 ++++++++++++++-----------------
 1 file changed, 14 insertions(+), 17 deletions(-)

diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index 64993b5d1243..dc864ee09536 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -65,9 +65,8 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
 			      unsigned int fragsz, gfp_t gfp_mask,
 			      unsigned int align_mask)
 {
-	unsigned int size = PAGE_SIZE;
+	unsigned int size, offset;
 	struct page *page;
-	int offset;
 
 	if (unlikely(!nc->va)) {
 refill:
@@ -75,10 +74,6 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
 		if (!page)
 			return NULL;
 
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-		/* if size can vary use size else just use PAGE_SIZE */
-		size = nc->size;
-#endif
 		/* Even if we own the page, we do not use atomic_set().
 		 * This would break get_page_unless_zero() users.
 		 */
@@ -87,11 +82,18 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
 		/* reset page count bias and offset to start of new frag */
 		nc->pfmemalloc = page_is_pfmemalloc(page);
 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		nc->offset = size;
+		nc->offset = 0;
 	}
 
-	offset = nc->offset - fragsz;
-	if (unlikely(offset < 0)) {
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	/* if size can vary use size else just use PAGE_SIZE */
+	size = nc->size;
+#else
+	size = PAGE_SIZE;
+#endif
+
+	offset = ALIGN(nc->offset, -align_mask);
+	if (unlikely(offset + fragsz > size)) {
 		page = virt_to_page(nc->va);
 
 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
@@ -102,17 +104,13 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
 			goto refill;
 		}
 
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-		/* if size can vary use size else just use PAGE_SIZE */
-		size = nc->size;
-#endif
 		/* OK, page count is 0, we can safely set it */
 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
 
 		/* reset page count bias and offset to start of new frag */
 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		offset = size - fragsz;
-		if (unlikely(offset < 0)) {
+		offset = 0;
+		if (unlikely(fragsz > size)) {
 			/*
 			 * The caller is trying to allocate a fragment
 			 * with fragsz > PAGE_SIZE but the cache isn't big
@@ -127,8 +125,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
 	}
 
 	nc->pagecnt_bias--;
-	offset &= align_mask;
-	nc->offset = offset;
+	nc->offset = offset + fragsz;
 
 	return nc->va + offset;
 }
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 06/15] mm: page_frag: change page_frag_alloc_* API to accept align param
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
                   ` (3 preceding siblings ...)
  2024-04-15 13:19 ` [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align() Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-16 16:08   ` Alexander Duyck
  2024-04-15 13:19 ` [PATCH net-next v2 07/15] mm: page_frag: add '_va' suffix to page_frag API Yunsheng Lin
                   ` (5 subsequent siblings)
  10 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Alexander Duyck,
	Andrew Morton, Eric Dumazet, David Howells, Marc Dionne,
	linux-mm, linux-afs

When page_frag_alloc_* API doesn't need data alignment, the
ALIGN() operation is unnecessary, so change page_frag_alloc_*
API to accept align param instead of align_mask param, and do
the ALIGN()'ing in the inline helper when needed.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 include/linux/page_frag_cache.h | 20 ++++++++++++--------
 include/linux/skbuff.h          | 12 ++++++------
 mm/page_frag_cache.c            |  9 ++++-----
 net/core/skbuff.c               | 12 +++++-------
 net/rxrpc/txbuf.c               |  5 +++--
 5 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index 04810d8d6a7d..cc0ede0912f3 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -25,21 +25,25 @@ struct page_frag_cache {
 
 void page_frag_cache_drain(struct page_frag_cache *nc);
 void __page_frag_cache_drain(struct page *page, unsigned int count);
-void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
-			      gfp_t gfp_mask, unsigned int align_mask);
+void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz,
+		      gfp_t gfp_mask);
+
+static inline void *__page_frag_alloc_align(struct page_frag_cache *nc,
+					    unsigned int fragsz, gfp_t gfp_mask,
+					    unsigned int align)
+{
+	nc->offset = ALIGN(nc->offset, align);
+
+	return page_frag_alloc(nc, fragsz, gfp_mask);
+}
 
 static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
 					  unsigned int fragsz, gfp_t gfp_mask,
 					  unsigned int align)
 {
 	WARN_ON_ONCE(!is_power_of_2(align));
-	return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
-}
 
-static inline void *page_frag_alloc(struct page_frag_cache *nc,
-				    unsigned int fragsz, gfp_t gfp_mask)
-{
-	return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+	return __page_frag_alloc_align(nc, fragsz, gfp_mask, align);
 }
 
 void page_frag_free(void *addr);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f2dc1f735c79..43c704589deb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3268,7 +3268,7 @@ static inline void skb_queue_purge(struct sk_buff_head *list)
 unsigned int skb_rbtree_purge(struct rb_root *root);
 void skb_errqueue_purge(struct sk_buff_head *list);
 
-void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align);
 
 /**
  * netdev_alloc_frag - allocate a page fragment
@@ -3279,14 +3279,14 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
  */
 static inline void *netdev_alloc_frag(unsigned int fragsz)
 {
-	return __netdev_alloc_frag_align(fragsz, ~0u);
+	return __netdev_alloc_frag_align(fragsz, 1u);
 }
 
 static inline void *netdev_alloc_frag_align(unsigned int fragsz,
 					    unsigned int align)
 {
 	WARN_ON_ONCE(!is_power_of_2(align));
-	return __netdev_alloc_frag_align(fragsz, -align);
+	return __netdev_alloc_frag_align(fragsz, align);
 }
 
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -3346,18 +3346,18 @@ static inline void skb_free_frag(void *addr)
 	page_frag_free(addr);
 }
 
-void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align);
 
 static inline void *napi_alloc_frag(unsigned int fragsz)
 {
-	return __napi_alloc_frag_align(fragsz, ~0u);
+	return __napi_alloc_frag_align(fragsz, 1u);
 }
 
 static inline void *napi_alloc_frag_align(unsigned int fragsz,
 					  unsigned int align)
 {
 	WARN_ON_ONCE(!is_power_of_2(align));
-	return __napi_alloc_frag_align(fragsz, -align);
+	return __napi_alloc_frag_align(fragsz, align);
 }
 
 struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length);
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index dc864ee09536..b4408187e1ab 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -61,9 +61,8 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
-void *__page_frag_alloc_align(struct page_frag_cache *nc,
-			      unsigned int fragsz, gfp_t gfp_mask,
-			      unsigned int align_mask)
+void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz,
+		      gfp_t gfp_mask)
 {
 	unsigned int size, offset;
 	struct page *page;
@@ -92,7 +91,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
 	size = PAGE_SIZE;
 #endif
 
-	offset = ALIGN(nc->offset, -align_mask);
+	offset = nc->offset;
 	if (unlikely(offset + fragsz > size)) {
 		page = virt_to_page(nc->va);
 
@@ -129,7 +128,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
 
 	return nc->va + offset;
 }
-EXPORT_SYMBOL(__page_frag_alloc_align);
+EXPORT_SYMBOL(page_frag_alloc);
 
 /*
  * Frees a page fragment allocated out of either a compound or order 0 page.
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ea052fa710d8..676e2d857f02 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -306,18 +306,17 @@ void napi_get_frags_check(struct napi_struct *napi)
 	local_bh_enable();
 }
 
-void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align)
 {
 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 
 	fragsz = SKB_DATA_ALIGN(fragsz);
 
-	return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
-				       align_mask);
+	return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align);
 }
 EXPORT_SYMBOL(__napi_alloc_frag_align);
 
-void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align)
 {
 	void *data;
 
@@ -325,15 +324,14 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
 	if (in_hardirq() || irqs_disabled()) {
 		struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
 
-		data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
-					       align_mask);
+		data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align);
 	} else {
 		struct napi_alloc_cache *nc;
 
 		local_bh_disable();
 		nc = this_cpu_ptr(&napi_alloc_cache);
 		data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
-					       align_mask);
+					       align);
 		local_bh_enable();
 	}
 	return data;
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index e0679658d9de..eb640875bf07 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -32,9 +32,10 @@ struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_
 		hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
 	total = hoff + sizeof(*whdr) + data_size;
 
+	data_align = max_t(size_t, data_align, L1_CACHE_BYTES);
 	mutex_lock(&call->conn->tx_data_alloc_lock);
-	buf = __page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
-				      ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
+	buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+				    data_align);
 	mutex_unlock(&call->conn->tx_data_alloc_lock);
 	if (!buf) {
 		kfree(txb);
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 07/15] mm: page_frag: add '_va' suffix to page_frag API
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
                   ` (4 preceding siblings ...)
  2024-04-15 13:19 ` [PATCH net-next v2 06/15] mm: page_frag: change page_frag_alloc_* API to accept align param Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-16 16:12   ` Alexander H Duyck
  2024-04-15 13:19 ` [PATCH net-next v2 08/15] mm: page_frag: add two inline helper for " Yunsheng Lin
                   ` (4 subsequent siblings)
  10 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Jeroen de Borst,
	Praveen Kaligineedi, Shailend Chand, Eric Dumazet,
	Jesse Brandeburg, Tony Nguyen, Sunil Goutham, Geetha sowjanya,
	Subbaraya Sundeep, hariprasad, Felix Fietkau, Sean Wang,
	Mark Lee, Lorenzo Bianconi, Matthias Brugger,
	AngeloGioacchino Del Regno, Keith Busch, Jens Axboe,
	Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
	Michael S. Tsirkin, Jason Wang, Alexander Duyck, Andrew Morton,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, Andrii Nakryiko, Martin KaFai Lau,
	Eduard Zingerman, Song Liu, Yonghong Song, KP Singh,
	Stanislav Fomichev, Hao Luo, Jiri Olsa, David Howells,
	Marc Dionne, Chuck Lever, Jeff Layton, Neil Brown,
	Olga Kornievskaia, Dai Ngo, Tom Talpey, Trond Myklebust,
	Anna Schumaker, intel-wired-lan, linux-arm-kernel,
	linux-mediatek, linux-nvme, kvm, virtualization, linux-mm, bpf,
	linux-afs, linux-nfs

Currently most of the API for page_frag API is returning
'virtual address' as output or expecting 'virtual address'
as input, in order to differentiate the API handling between
'virtual address' and 'struct page', add '_va' suffix to the
corresponding API mirroring the page_pool_alloc_va() API of
the page_pool.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 drivers/net/ethernet/google/gve/gve_rx.c      |  4 ++--
 drivers/net/ethernet/intel/ice/ice_txrx.c     |  2 +-
 drivers/net/ethernet/intel/ice/ice_txrx.h     |  2 +-
 drivers/net/ethernet/intel/ice/ice_txrx_lib.c |  2 +-
 .../net/ethernet/intel/ixgbevf/ixgbevf_main.c |  4 ++--
 .../marvell/octeontx2/nic/otx2_common.c       |  2 +-
 drivers/net/ethernet/mediatek/mtk_wed_wo.c    |  4 ++--
 drivers/nvme/host/tcp.c                       |  8 +++----
 drivers/nvme/target/tcp.c                     | 22 ++++++++---------
 drivers/vhost/net.c                           |  6 ++---
 include/linux/page_frag_cache.h               | 24 ++++++++++---------
 include/linux/skbuff.h                        |  2 +-
 kernel/bpf/cpumap.c                           |  2 +-
 mm/page_frag_cache.c                          | 10 ++++----
 mm/page_frag_test.c                           |  6 ++---
 net/core/skbuff.c                             | 15 ++++++------
 net/core/xdp.c                                |  2 +-
 net/rxrpc/txbuf.c                             | 15 ++++++------
 net/sunrpc/svcsock.c                          |  6 ++---
 19 files changed, 71 insertions(+), 67 deletions(-)

diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index cd727e55ae0f..820874c1c570 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -687,7 +687,7 @@ static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
 
 	total_len = headroom + SKB_DATA_ALIGN(len) +
 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-	frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
+	frame = page_frag_alloc_va(&rx->page_cache, total_len, GFP_ATOMIC);
 	if (!frame) {
 		u64_stats_update_begin(&rx->statss);
 		rx->xdp_alloc_fails++;
@@ -700,7 +700,7 @@ static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
 
 	err = xdp_do_redirect(dev, &new, xdp_prog);
 	if (err)
-		page_frag_free(frame);
+		page_frag_free_va(frame);
 
 	return err;
 }
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 8bb743f78fcb..399b317c509d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -126,7 +126,7 @@ ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
 		dev_kfree_skb_any(tx_buf->skb);
 		break;
 	case ICE_TX_BUF_XDP_TX:
-		page_frag_free(tx_buf->raw_buf);
+		page_frag_free_va(tx_buf->raw_buf);
 		break;
 	case ICE_TX_BUF_XDP_XMIT:
 		xdp_return_frame(tx_buf->xdpf);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index feba314a3fe4..6379f57d8228 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -148,7 +148,7 @@ static inline int ice_skb_pad(void)
  * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
  * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
  * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
- * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
+ * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free_va(), stats
  * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
  * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
  */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index df072ce767b1..c34cc02ad578 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -288,7 +288,7 @@ ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
 
 	switch (tx_buf->type) {
 	case ICE_TX_BUF_XDP_TX:
-		page_frag_free(tx_buf->raw_buf);
+		page_frag_free_va(tx_buf->raw_buf);
 		break;
 	case ICE_TX_BUF_XDP_XMIT:
 		xdp_return_frame_bulk(tx_buf->xdpf, bq);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3161a13079fe..c35b8f675b48 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -303,7 +303,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
 
 		/* free the skb */
 		if (ring_is_xdp(tx_ring))
-			page_frag_free(tx_buffer->data);
+			page_frag_free_va(tx_buffer->data);
 		else
 			napi_consume_skb(tx_buffer->skb, napi_budget);
 
@@ -2413,7 +2413,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
 
 		/* Free all the Tx ring sk_buffs */
 		if (ring_is_xdp(tx_ring))
-			page_frag_free(tx_buffer->data);
+			page_frag_free_va(tx_buffer->data);
 		else
 			dev_kfree_skb_any(tx_buffer->skb);
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index a85ac039d779..8eb5820b8a70 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -553,7 +553,7 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
 	*dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
 				    DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 	if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
-		page_frag_free(buf);
+		page_frag_free_va(buf);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index 7063c78bd35f..c4228719f8a4 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -142,8 +142,8 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
 		dma_addr_t addr;
 		void *buf;
 
-		buf = page_frag_alloc(&q->cache, q->buf_size,
-				      GFP_ATOMIC | GFP_DMA32);
+		buf = page_frag_alloc_va(&q->cache, q->buf_size,
+					 GFP_ATOMIC | GFP_DMA32);
 		if (!buf)
 			break;
 
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index fdbcdcedcee9..79eddd74bfbb 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -500,7 +500,7 @@ static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
 {
 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
 
-	page_frag_free(req->pdu);
+	page_frag_free_va(req->pdu);
 }
 
 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
@@ -514,7 +514,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
 	u8 hdgst = nvme_tcp_hdgst_len(queue);
 
-	req->pdu = page_frag_alloc(&queue->pf_cache,
+	req->pdu = page_frag_alloc_va(&queue->pf_cache,
 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
 		GFP_KERNEL | __GFP_ZERO);
 	if (!req->pdu)
@@ -1331,7 +1331,7 @@ static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
 {
 	struct nvme_tcp_request *async = &ctrl->async_req;
 
-	page_frag_free(async->pdu);
+	page_frag_free_va(async->pdu);
 }
 
 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
@@ -1340,7 +1340,7 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
 	struct nvme_tcp_request *async = &ctrl->async_req;
 	u8 hdgst = nvme_tcp_hdgst_len(queue);
 
-	async->pdu = page_frag_alloc(&queue->pf_cache,
+	async->pdu = page_frag_alloc_va(&queue->pf_cache,
 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
 		GFP_KERNEL | __GFP_ZERO);
 	if (!async->pdu)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index a5422e2c979a..ea356ce22672 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1462,24 +1462,24 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
 	c->queue = queue;
 	c->req.port = queue->port->nport;
 
-	c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
+	c->cmd_pdu = page_frag_alloc_va(&queue->pf_cache,
 			sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
 	if (!c->cmd_pdu)
 		return -ENOMEM;
 	c->req.cmd = &c->cmd_pdu->cmd;
 
-	c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
+	c->rsp_pdu = page_frag_alloc_va(&queue->pf_cache,
 			sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
 	if (!c->rsp_pdu)
 		goto out_free_cmd;
 	c->req.cqe = &c->rsp_pdu->cqe;
 
-	c->data_pdu = page_frag_alloc(&queue->pf_cache,
+	c->data_pdu = page_frag_alloc_va(&queue->pf_cache,
 			sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
 	if (!c->data_pdu)
 		goto out_free_rsp;
 
-	c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
+	c->r2t_pdu = page_frag_alloc_va(&queue->pf_cache,
 			sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
 	if (!c->r2t_pdu)
 		goto out_free_data;
@@ -1494,20 +1494,20 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
 
 	return 0;
 out_free_data:
-	page_frag_free(c->data_pdu);
+	page_frag_free_va(c->data_pdu);
 out_free_rsp:
-	page_frag_free(c->rsp_pdu);
+	page_frag_free_va(c->rsp_pdu);
 out_free_cmd:
-	page_frag_free(c->cmd_pdu);
+	page_frag_free_va(c->cmd_pdu);
 	return -ENOMEM;
 }
 
 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
 {
-	page_frag_free(c->r2t_pdu);
-	page_frag_free(c->data_pdu);
-	page_frag_free(c->rsp_pdu);
-	page_frag_free(c->cmd_pdu);
+	page_frag_free_va(c->r2t_pdu);
+	page_frag_free_va(c->data_pdu);
+	page_frag_free_va(c->rsp_pdu);
+	page_frag_free_va(c->cmd_pdu);
 }
 
 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c64ded183f8d..96d5ca299552 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -682,8 +682,8 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
 		return -ENOSPC;
 
 	buflen += SKB_DATA_ALIGN(len + pad);
-	buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
-				    SMP_CACHE_BYTES);
+	buf = page_frag_alloc_va_align(&net->pf_cache, buflen, GFP_KERNEL,
+				       SMP_CACHE_BYTES);
 	if (unlikely(!buf))
 		return -ENOMEM;
 
@@ -730,7 +730,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
 	return 0;
 
 err:
-	page_frag_free(buf);
+	page_frag_free_va(buf);
 	return ret;
 }
 
diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index cc0ede0912f3..9d5d86b2d3ab 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -25,27 +25,29 @@ struct page_frag_cache {
 
 void page_frag_cache_drain(struct page_frag_cache *nc);
 void __page_frag_cache_drain(struct page *page, unsigned int count);
-void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz,
-		      gfp_t gfp_mask);
+void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
+			 gfp_t gfp_mask);
 
-static inline void *__page_frag_alloc_align(struct page_frag_cache *nc,
-					    unsigned int fragsz, gfp_t gfp_mask,
-					    unsigned int align)
+static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
+					       unsigned int fragsz,
+					       gfp_t gfp_mask,
+					       unsigned int align)
 {
 	nc->offset = ALIGN(nc->offset, align);
 
-	return page_frag_alloc(nc, fragsz, gfp_mask);
+	return page_frag_alloc_va(nc, fragsz, gfp_mask);
 }
 
-static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
-					  unsigned int fragsz, gfp_t gfp_mask,
-					  unsigned int align)
+static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
+					     unsigned int fragsz,
+					     gfp_t gfp_mask,
+					     unsigned int align)
 {
 	WARN_ON_ONCE(!is_power_of_2(align));
 
-	return __page_frag_alloc_align(nc, fragsz, gfp_mask, align);
+	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);
 }
 
-void page_frag_free(void *addr);
+void page_frag_free_va(void *addr);
 
 #endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 43c704589deb..cc80600dcedf 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3343,7 +3343,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
 
 static inline void skb_free_frag(void *addr)
 {
-	page_frag_free(addr);
+	page_frag_free_va(addr);
 }
 
 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align);
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index a8e34416e960..3a6a237e7dd3 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -322,7 +322,7 @@ static int cpu_map_kthread_run(void *data)
 
 			/* Bring struct page memory area to curr CPU. Read by
 			 * build_skb_around via page_is_pfmemalloc(), and when
-			 * freed written by page_frag_free call.
+			 * freed written by page_frag_free_va call.
 			 */
 			prefetchw(page);
 		}
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index b4408187e1ab..50511d8522d0 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -61,8 +61,8 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
-void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz,
-		      gfp_t gfp_mask)
+void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
+			 gfp_t gfp_mask)
 {
 	unsigned int size, offset;
 	struct page *page;
@@ -128,16 +128,16 @@ void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz,
 
 	return nc->va + offset;
 }
-EXPORT_SYMBOL(page_frag_alloc);
+EXPORT_SYMBOL(page_frag_alloc_va);
 
 /*
  * Frees a page fragment allocated out of either a compound or order 0 page.
  */
-void page_frag_free(void *addr)
+void page_frag_free_va(void *addr)
 {
 	struct page *page = virt_to_head_page(addr);
 
 	if (unlikely(put_page_testzero(page)))
 		free_unref_page(page, compound_order(page));
 }
-EXPORT_SYMBOL(page_frag_free);
+EXPORT_SYMBOL(page_frag_free_va);
diff --git a/mm/page_frag_test.c b/mm/page_frag_test.c
index ebfd1c3dae8f..cab05b8a2e77 100644
--- a/mm/page_frag_test.c
+++ b/mm/page_frag_test.c
@@ -260,7 +260,7 @@ static int page_frag_pop_thread(void *arg)
 
 		if (obj) {
 			nr--;
-			page_frag_free(obj);
+			page_frag_free_va(obj);
 		} else {
 			cond_resched();
 		}
@@ -289,13 +289,13 @@ static int page_frag_push_thread(void *arg)
 		int ret;
 
 		size = clamp(size, 4U, 4096U);
-		va = page_frag_alloc(&test_frag, size, GFP_KERNEL);
+		va = page_frag_alloc_va(&test_frag, size, GFP_KERNEL);
 		if (!va)
 			continue;
 
 		ret = objpool_push(va, pool);
 		if (ret) {
-			page_frag_free(va);
+			page_frag_free_va(va);
 			cond_resched();
 		} else {
 			nr--;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 676e2d857f02..139a193853cc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -312,7 +312,7 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align)
 
 	fragsz = SKB_DATA_ALIGN(fragsz);
 
-	return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align);
+	return __page_frag_alloc_va_align(&nc->page, fragsz, GFP_ATOMIC, align);
 }
 EXPORT_SYMBOL(__napi_alloc_frag_align);
 
@@ -324,14 +324,15 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align)
 	if (in_hardirq() || irqs_disabled()) {
 		struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
 
-		data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align);
+		data = __page_frag_alloc_va_align(nc, fragsz, GFP_ATOMIC,
+						  align);
 	} else {
 		struct napi_alloc_cache *nc;
 
 		local_bh_disable();
 		nc = this_cpu_ptr(&napi_alloc_cache);
-		data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
-					       align);
+		data = __page_frag_alloc_va_align(&nc->page, fragsz, GFP_ATOMIC,
+						  align);
 		local_bh_enable();
 	}
 	return data;
@@ -741,12 +742,12 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 
 	if (in_hardirq() || irqs_disabled()) {
 		nc = this_cpu_ptr(&netdev_alloc_cache);
-		data = page_frag_alloc(nc, len, gfp_mask);
+		data = page_frag_alloc_va(nc, len, gfp_mask);
 		pfmemalloc = nc->pfmemalloc;
 	} else {
 		local_bh_disable();
 		nc = this_cpu_ptr(&napi_alloc_cache.page);
-		data = page_frag_alloc(nc, len, gfp_mask);
+		data = page_frag_alloc_va(nc, len, gfp_mask);
 		pfmemalloc = nc->pfmemalloc;
 		local_bh_enable();
 	}
@@ -834,7 +835,7 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
 	} else {
 		len = SKB_HEAD_ALIGN(len);
 
-		data = page_frag_alloc(&nc->page, len, gfp_mask);
+		data = page_frag_alloc_va(&nc->page, len, gfp_mask);
 		pfmemalloc = nc->page.pfmemalloc;
 	}
 
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 41693154e426..245a2d011aeb 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -391,7 +391,7 @@ void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
 		page_pool_put_full_page(page->pp, page, napi_direct);
 		break;
 	case MEM_TYPE_PAGE_SHARED:
-		page_frag_free(data);
+		page_frag_free_va(data);
 		break;
 	case MEM_TYPE_PAGE_ORDER0:
 		page = virt_to_page(data); /* Assumes order0 page*/
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index eb640875bf07..f2fa98360789 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -34,8 +34,8 @@ struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_
 
 	data_align = max_t(size_t, data_align, L1_CACHE_BYTES);
 	mutex_lock(&call->conn->tx_data_alloc_lock);
-	buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
-				    data_align);
+	buf = page_frag_alloc_va_align(&call->conn->tx_data_alloc, total, gfp,
+				       data_align);
 	mutex_unlock(&call->conn->tx_data_alloc_lock);
 	if (!buf) {
 		kfree(txb);
@@ -97,17 +97,18 @@ struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_s
 	if (!txb)
 		return NULL;
 
-	buf = page_frag_alloc(&call->local->tx_alloc,
-			      sizeof(*whdr) + sizeof(*ack) + 1 + 3 + sizeof(*trailer), gfp);
+	buf = page_frag_alloc_va(&call->local->tx_alloc,
+				 sizeof(*whdr) + sizeof(*ack) + 1 + 3 + sizeof(*trailer), gfp);
 	if (!buf) {
 		kfree(txb);
 		return NULL;
 	}
 
 	if (sack_size) {
-		buf2 = page_frag_alloc(&call->local->tx_alloc, sack_size, gfp);
+		buf2 = page_frag_alloc_va(&call->local->tx_alloc, sack_size,
+					  gfp);
 		if (!buf2) {
-			page_frag_free(buf);
+			page_frag_free_va(buf);
 			kfree(txb);
 			return NULL;
 		}
@@ -181,7 +182,7 @@ static void rxrpc_free_txbuf(struct rxrpc_txbuf *txb)
 			  rxrpc_txbuf_free);
 	for (i = 0; i < txb->nr_kvec; i++)
 		if (txb->kvec[i].iov_base)
-			page_frag_free(txb->kvec[i].iov_base);
+			page_frag_free_va(txb->kvec[i].iov_base);
 	kfree(txb);
 	atomic_dec(&rxrpc_nr_txbuf);
 }
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 6b3f01beb294..42d20412c1c3 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1222,8 +1222,8 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
 	/* The stream record marker is copied into a temporary page
 	 * fragment buffer so that it can be included in rq_bvec.
 	 */
-	buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker),
-			      GFP_KERNEL);
+	buf = page_frag_alloc_va(&svsk->sk_frag_cache, sizeof(marker),
+				 GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 	memcpy(buf, &marker, sizeof(marker));
@@ -1235,7 +1235,7 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
 		      1 + count, sizeof(marker) + rqstp->rq_res.len);
 	ret = sock_sendmsg(svsk->sk_sock, &msg);
-	page_frag_free(buf);
+	page_frag_free_va(buf);
 	if (ret < 0)
 		return ret;
 	*sentp += ret;
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 08/15] mm: page_frag: add two inline helper for page_frag API
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
                   ` (5 preceding siblings ...)
  2024-04-15 13:19 ` [PATCH net-next v2 07/15] mm: page_frag: add '_va' suffix to page_frag API Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-15 13:19 ` [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc Yunsheng Lin
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Alexander Duyck,
	Andrew Morton, Eric Dumazet, linux-mm

Add two inline helpers for page_frag API to avoid calling
accessing the field of 'struct page_frag_cache'.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 include/linux/page_frag_cache.h | 10 ++++++++++
 mm/page_frag_test.c             |  2 +-
 net/core/skbuff.c               |  4 ++--
 3 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index 9d5d86b2d3ab..fe5faa80b6c3 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -23,6 +23,16 @@ struct page_frag_cache {
 	bool pfmemalloc;
 };
 
+static inline void page_frag_cache_init(struct page_frag_cache *nc)
+{
+	nc->va = NULL;
+}
+
+static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
+{
+	return !!nc->pfmemalloc;
+}
+
 void page_frag_cache_drain(struct page_frag_cache *nc);
 void __page_frag_cache_drain(struct page *page, unsigned int count);
 void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
diff --git a/mm/page_frag_test.c b/mm/page_frag_test.c
index cab05b8a2e77..20756b28df4a 100644
--- a/mm/page_frag_test.c
+++ b/mm/page_frag_test.c
@@ -318,7 +318,7 @@ static int __init page_frag_test_init(void)
 	u64 duration;
 	int ret;
 
-	test_frag.va = NULL;
+	page_frag_cache_init(&test_frag);
 	atomic_set(&nthreads, 2);
 	init_completion(&wait);
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 139a193853cc..cdbfdf651001 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -743,12 +743,12 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 	if (in_hardirq() || irqs_disabled()) {
 		nc = this_cpu_ptr(&netdev_alloc_cache);
 		data = page_frag_alloc_va(nc, len, gfp_mask);
-		pfmemalloc = nc->pfmemalloc;
+		pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
 	} else {
 		local_bh_disable();
 		nc = this_cpu_ptr(&napi_alloc_cache.page);
 		data = page_frag_alloc_va(nc, len, gfp_mask);
-		pfmemalloc = nc->pfmemalloc;
+		pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
 		local_bh_enable();
 	}
 
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
                   ` (6 preceding siblings ...)
  2024-04-15 13:19 ` [PATCH net-next v2 08/15] mm: page_frag: add two inline helper for " Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-16 16:22   ` Alexander H Duyck
  2024-04-15 13:19 ` [PATCH net-next v2 10/15] mm: page_frag: reuse existing bit field of 'va' for pagecnt_bias Yunsheng Lin
                   ` (2 subsequent siblings)
  10 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Alexander Duyck,
	Andrew Morton, linux-mm

The '(PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)' case is for the
system with page size less than 32KB, which is 0x8000 bytes
requiring 16 bits space, change 'size' to 'size_mask' to avoid
using the MSB, and change 'pfmemalloc' field to reuse the that
MSB, so that we remove the orginal space needed by 'pfmemalloc'.

For another case, the MSB of 'offset' is reused for 'pfmemalloc'.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 include/linux/page_frag_cache.h | 13 ++++++++-----
 mm/page_frag_cache.c            |  5 +++--
 2 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index fe5faa80b6c3..40a7d6da9ef0 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -12,15 +12,16 @@ struct page_frag_cache {
 	void *va;
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 	__u16 offset;
-	__u16 size;
+	__u16 size_mask:15;
+	__u16 pfmemalloc:1;
 #else
-	__u32 offset;
+	__u32 offset:31;
+	__u32 pfmemalloc:1;
 #endif
 	/* we maintain a pagecount bias, so that we dont dirty cache line
 	 * containing page->_refcount every time we allocate a fragment.
 	 */
 	unsigned int		pagecnt_bias;
-	bool pfmemalloc;
 };
 
 static inline void page_frag_cache_init(struct page_frag_cache *nc)
@@ -43,7 +44,9 @@ static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 					       gfp_t gfp_mask,
 					       unsigned int align)
 {
-	nc->offset = ALIGN(nc->offset, align);
+	unsigned int offset = nc->offset;
+
+	nc->offset = ALIGN(offset, align);
 
 	return page_frag_alloc_va(nc, fragsz, gfp_mask);
 }
@@ -53,7 +56,7 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
 					     gfp_t gfp_mask,
 					     unsigned int align)
 {
-	WARN_ON_ONCE(!is_power_of_2(align));
+	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE);
 
 	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);
 }
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index 50511d8522d0..8d93029116e1 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -32,7 +32,8 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
 		   __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
 				PAGE_FRAG_CACHE_MAX_ORDER);
-	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
+	nc->size_mask = page ? PAGE_FRAG_CACHE_MAX_SIZE - 1 : PAGE_SIZE - 1;
+	VM_BUG_ON(page && nc->size_mask != PAGE_FRAG_CACHE_MAX_SIZE - 1);
 #endif
 	if (unlikely(!page))
 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
@@ -86,7 +87,7 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 	/* if size can vary use size else just use PAGE_SIZE */
-	size = nc->size;
+	size = nc->size_mask + 1;
 #else
 	size = PAGE_SIZE;
 #endif
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 10/15] mm: page_frag: reuse existing bit field of 'va' for pagecnt_bias
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
                   ` (7 preceding siblings ...)
  2024-04-15 13:19 ` [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-16 16:33   ` Alexander H Duyck
  2024-04-15 13:19 ` [PATCH net-next v2 12/15] mm: page_frag: introduce prepare/commit API for page_frag Yunsheng Lin
  2024-04-15 13:19 ` [PATCH net-next v2 14/15] mm: page_frag: update documentation " Yunsheng Lin
  10 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Alexander Duyck,
	Andrew Morton, linux-mm

As alignment of 'va' is always aligned with the order of the
page allocated, we can reuse the LSB bits for the pagecount
bias, and remove the orginal space needed by 'pagecnt_bias'.
Also limit the 'fragsz' to be at least the size of
'usigned int' to match the limited pagecnt_bias.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 include/linux/page_frag_cache.h | 20 +++++++----
 mm/page_frag_cache.c            | 63 +++++++++++++++++++--------------
 2 files changed, 50 insertions(+), 33 deletions(-)

diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index 40a7d6da9ef0..a97a1ac017d6 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -9,7 +9,18 @@
 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 
 struct page_frag_cache {
-	void *va;
+	union {
+		void *va;
+		/* we maintain a pagecount bias, so that we dont dirty cache
+		 * line containing page->_refcount every time we allocate a
+		 * fragment. As 'va' is always aligned with the order of the
+		 * page allocated, we can reuse the LSB bits for the pagecount
+		 * bias, and its bit width happens to be indicated by the
+		 * 'size_mask' below.
+		 */
+		unsigned long pagecnt_bias;
+
+	};
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 	__u16 offset;
 	__u16 size_mask:15;
@@ -18,10 +29,6 @@ struct page_frag_cache {
 	__u32 offset:31;
 	__u32 pfmemalloc:1;
 #endif
-	/* we maintain a pagecount bias, so that we dont dirty cache line
-	 * containing page->_refcount every time we allocate a fragment.
-	 */
-	unsigned int		pagecnt_bias;
 };
 
 static inline void page_frag_cache_init(struct page_frag_cache *nc)
@@ -56,7 +63,8 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
 					     gfp_t gfp_mask,
 					     unsigned int align)
 {
-	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE);
+	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE ||
+		     fragsz < sizeof(unsigned int));
 
 	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);
 }
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index 8d93029116e1..5f7f96c88163 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -18,8 +18,8 @@
 #include <linux/page_frag_cache.h>
 #include "internal.h"
 
-static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
-					     gfp_t gfp_mask)
+static bool __page_frag_cache_refill(struct page_frag_cache *nc,
+				     gfp_t gfp_mask)
 {
 	struct page *page = NULL;
 	gfp_t gfp = gfp_mask;
@@ -38,9 +38,26 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
 	if (unlikely(!page))
 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
 
-	nc->va = page ? page_address(page) : NULL;
+	if (unlikely(!page)) {
+		nc->va = NULL;
+		return false;
+	}
+
+	nc->va = page_address(page);
 
-	return page;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	VM_BUG_ON(nc->pagecnt_bias & nc->size_mask);
+	page_ref_add(page, nc->size_mask - 1);
+	nc->pagecnt_bias |= nc->size_mask;
+#else
+	VM_BUG_ON(nc->pagecnt_bias & (PAGE_SIZE - 1));
+	page_ref_add(page, PAGE_SIZE - 2);
+	nc->pagecnt_bias |= (PAGE_SIZE - 1);
+#endif
+
+	nc->pfmemalloc = page_is_pfmemalloc(page);
+	nc->offset = 0;
+	return true;
 }
 
 void page_frag_cache_drain(struct page_frag_cache *nc)
@@ -65,38 +82,31 @@ EXPORT_SYMBOL(__page_frag_cache_drain);
 void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 			 gfp_t gfp_mask)
 {
-	unsigned int size, offset;
+	unsigned long size_mask;
+	unsigned int offset;
 	struct page *page;
+	void *va;
 
 	if (unlikely(!nc->va)) {
 refill:
-		page = __page_frag_cache_refill(nc, gfp_mask);
-		if (!page)
+		if (!__page_frag_cache_refill(nc, gfp_mask))
 			return NULL;
-
-		/* Even if we own the page, we do not use atomic_set().
-		 * This would break get_page_unless_zero() users.
-		 */
-		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
-
-		/* reset page count bias and offset to start of new frag */
-		nc->pfmemalloc = page_is_pfmemalloc(page);
-		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		nc->offset = 0;
 	}
 
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 	/* if size can vary use size else just use PAGE_SIZE */
-	size = nc->size_mask + 1;
+	size_mask = nc->size_mask;
 #else
-	size = PAGE_SIZE;
+	size_mask = PAGE_SIZE - 1;
 #endif
 
+	va = (void *)((unsigned long)nc->va & ~size_mask);
 	offset = nc->offset;
-	if (unlikely(offset + fragsz > size)) {
-		page = virt_to_page(nc->va);
 
-		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
+	if (unlikely(offset + fragsz > (size_mask + 1))) {
+		page = virt_to_page(va);
+
+		if (!page_ref_sub_and_test(page, nc->pagecnt_bias & size_mask))
 			goto refill;
 
 		if (unlikely(nc->pfmemalloc)) {
@@ -105,12 +115,11 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 		}
 
 		/* OK, page count is 0, we can safely set it */
-		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
+		set_page_count(page, size_mask);
+		nc->pagecnt_bias |= size_mask;
 
-		/* reset page count bias and offset to start of new frag */
-		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
 		offset = 0;
-		if (unlikely(fragsz > size)) {
+		if (unlikely(fragsz > (size_mask + 1))) {
 			/*
 			 * The caller is trying to allocate a fragment
 			 * with fragsz > PAGE_SIZE but the cache isn't big
@@ -127,7 +136,7 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 	nc->pagecnt_bias--;
 	nc->offset = offset + fragsz;
 
-	return nc->va + offset;
+	return va + offset;
 }
 EXPORT_SYMBOL(page_frag_alloc_va);
 
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 12/15] mm: page_frag: introduce prepare/commit API for page_frag
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
                   ` (8 preceding siblings ...)
  2024-04-15 13:19 ` [PATCH net-next v2 10/15] mm: page_frag: reuse existing bit field of 'va' for pagecnt_bias Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-15 13:19 ` [PATCH net-next v2 14/15] mm: page_frag: update documentation " Yunsheng Lin
  10 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Alexander Duyck,
	Andrew Morton, linux-mm

There are many use cases that need minimum memory in order
for forward progressing, but can do better if there is more
memory available.

Currently skb_page_frag_refill() API is used to solve the
above usecases, as mentioned in [1], its implementation is
similar to the one in mm subsystem.

To unify those two page_frag implementations, introduce a
prepare API to ensure minimum memory is satisfied and return
how much the actual memory is available to the caller.

And the caller can decide how much memory to use by calling
commit API, or not calling the commit API if deciding to not
use any memory.

Note it seems hard to decide which header files for caling
virt_to_page() in the inline helper, so macro is used instead
of inline helper to avoid dealing with that.

1. https://lore.kernel.org/all/20240228093013.8263-1-linyunsheng@huawei.com/

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 include/linux/page_frag_cache.h | 141 +++++++++++++++++++++++++++++++-
 mm/page_frag_cache.c            |  13 ++-
 2 files changed, 144 insertions(+), 10 deletions(-)

diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index a97a1ac017d6..28185969cd2c 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -43,8 +43,25 @@ static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
 
 void page_frag_cache_drain(struct page_frag_cache *nc);
 void __page_frag_cache_drain(struct page *page, unsigned int count);
-void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
-			 gfp_t gfp_mask);
+void *page_frag_cache_refill(struct page_frag_cache *nc, unsigned int fragsz,
+			     gfp_t gfp_mask);
+
+static inline void *page_frag_alloc_va(struct page_frag_cache *nc,
+				       unsigned int fragsz, gfp_t gfp_mask)
+{
+	unsigned int offset;
+	void *va;
+
+	va = page_frag_cache_refill(nc, fragsz, gfp_mask);
+	if (unlikely(!va))
+		return NULL;
+
+	offset = nc->offset;
+	nc->pagecnt_bias--;
+	nc->offset = offset + fragsz;
+
+	return va + offset;
+}
 
 static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 					       unsigned int fragsz,
@@ -69,6 +86,126 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
 	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);
 }
 
+static inline void *page_frag_alloc_va_prepare(struct page_frag_cache *nc,
+					       unsigned int *offset,
+					       unsigned int *size,
+					       gfp_t gfp_mask)
+{
+	void *va;
+
+	va = page_frag_cache_refill(nc, *size, gfp_mask);
+	if (unlikely(!va))
+		return NULL;
+
+	*offset = nc->offset;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	*size = nc->size_mask - *offset + 1;
+#else
+	*size = PAGE_SIZE - *offset;
+#endif
+
+	return va + *offset;
+}
+
+static inline void *page_frag_alloc_va_prepare_align(struct page_frag_cache *nc,
+						     unsigned int *offset,
+						     unsigned int *size,
+						     unsigned int align,
+						     gfp_t gfp_mask)
+{
+	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE ||
+		     *size < sizeof(unsigned int));
+
+	*offset = nc->offset;
+	nc->offset = ALIGN(*offset, align);
+	return page_frag_alloc_va_prepare(nc, offset, size, gfp_mask);
+}
+
+static inline void *__page_frag_alloc_pg_prepare(struct page_frag_cache *nc,
+						 unsigned int *offset,
+						 unsigned int *size,
+						 gfp_t gfp_mask)
+{
+	void *va;
+
+	va = page_frag_cache_refill(nc, *size, gfp_mask);
+	if (unlikely(!va))
+		return NULL;
+
+	*offset = nc->offset;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	*size = nc->size_mask - *offset + 1;
+#else
+	*size = PAGE_SIZE - *offset;
+#endif
+
+	return va;
+}
+
+#define page_frag_alloc_pg_prepare(nc, offset, size, gfp)		\
+({									\
+	struct page *__page = NULL;					\
+	void *__va;							\
+									\
+	__va = __page_frag_alloc_pg_prepare(nc, offset, size, gfp);	\
+	if (likely(__va))						\
+		__page = virt_to_page(__va);				\
+									\
+	__page;								\
+})
+
+static inline void *__page_frag_alloc_prepare(struct page_frag_cache *nc,
+					      unsigned int *offset,
+					      unsigned int *size,
+					      void **va, gfp_t gfp_mask)
+{
+	void *nc_va;
+
+	nc_va = page_frag_cache_refill(nc, *size, gfp_mask);
+	if (unlikely(!nc_va))
+		return NULL;
+
+	*offset = nc->offset;
+	*va = nc_va + *offset;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	*size = nc->size_mask - *offset + 1;
+#else
+	*size = PAGE_SIZE - *offset;
+#endif
+
+	return nc_va;
+}
+
+#define page_frag_alloc_prepare(nc, offset, size, va, gfp)		\
+({									\
+	struct page *__page = NULL;					\
+	void *__va;							\
+									\
+	__va = __page_frag_alloc_prepare(nc, offset, size, va, gfp);	\
+	if (likely(__va))						\
+		__page = virt_to_page(__va);				\
+									\
+	__page;								\
+})
+
+static inline void page_frag_alloc_commit(struct page_frag_cache *nc,
+					  unsigned int offset,
+					  unsigned int size)
+{
+	nc->pagecnt_bias--;
+	nc->offset = offset + size;
+}
+
+static inline void page_frag_alloc_commit_noref(struct page_frag_cache *nc,
+						unsigned int offset,
+						unsigned int size)
+{
+	nc->offset = offset + size;
+}
+
 void page_frag_free_va(void *addr);
 
 #endif
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index 5f7f96c88163..8774cb07e630 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -79,8 +79,8 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
-void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
-			 gfp_t gfp_mask)
+void *page_frag_cache_refill(struct page_frag_cache *nc, unsigned int fragsz,
+			     gfp_t gfp_mask)
 {
 	unsigned long size_mask;
 	unsigned int offset;
@@ -118,7 +118,7 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 		set_page_count(page, size_mask);
 		nc->pagecnt_bias |= size_mask;
 
-		offset = 0;
+		nc->offset = 0;
 		if (unlikely(fragsz > (size_mask + 1))) {
 			/*
 			 * The caller is trying to allocate a fragment
@@ -133,12 +133,9 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 		}
 	}
 
-	nc->pagecnt_bias--;
-	nc->offset = offset + fragsz;
-
-	return va + offset;
+	return va;
 }
-EXPORT_SYMBOL(page_frag_alloc_va);
+EXPORT_SYMBOL(page_frag_cache_refill);
 
 /*
  * Frees a page fragment allocated out of either a compound or order 0 page.
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH net-next v2 14/15] mm: page_frag: update documentation for page_frag
       [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
                   ` (9 preceding siblings ...)
  2024-04-15 13:19 ` [PATCH net-next v2 12/15] mm: page_frag: introduce prepare/commit API for page_frag Yunsheng Lin
@ 2024-04-15 13:19 ` Yunsheng Lin
  2024-04-16  6:13   ` Bagas Sanjaya
  10 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-15 13:19 UTC (permalink / raw)
  To: davem, kuba, pabeni
  Cc: netdev, linux-kernel, Yunsheng Lin, Alexander Duyck,
	Jonathan Corbet, Andrew Morton, linux-mm, linux-doc

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="y", Size: 14624 bytes --]

Update documentation about design, implementation and API usages
for page_frag.

CC: Alexander Duyck <alexander.duyck@gmail.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 Documentation/mm/page_frags.rst | 148 +++++++++++++++++++++++++++++++-
 include/linux/page_frag_cache.h | 133 ++++++++++++++++++++++++++++
 mm/page_frag_cache.c            |   4 +
 3 files changed, 284 insertions(+), 1 deletion(-)

diff --git a/Documentation/mm/page_frags.rst b/Documentation/mm/page_frags.rst
index 503ca6cdb804..ac9dd9e8ee16 100644
--- a/Documentation/mm/page_frags.rst
+++ b/Documentation/mm/page_frags.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==============
 Page fragments
 ==============
@@ -40,4 +42,148 @@ page via a single call.  The advantage to doing this is that it allows for
 cleaning up the multiple references that were added to a page in order to
 avoid calling get_page per allocation.
 
-Alexander Duyck, Nov 29, 2016.
+
+Architecture overview
+=====================
+
+.. code-block:: none
+
+                +----------------------+
+                | page_frag API caller |
+                +----------------------+
+                            ^
+                            |
+                            |
+                            |
+                            v
+    +------------------------------------------------+
+    |             request page fragment              |
+    +------------------------------------------------+
+        ^                      ^                   ^
+        |                      | Cache not enough  |
+        | Cache empty          v                   |
+        |             +-----------------+          |
+        |             | drain old cache |          |
+        |             +-----------------+          |
+        |                      ^                   |
+        |                      |                   |
+        v                      v                   |
+    +----------------------------------+           |
+    |  refill cache with order 3 page  |           |
+    +----------------------------------+           |
+     ^                  ^                          |
+     |                  |                          |
+     |                  | Refill failed            |
+     |                  |                          | Cache is enough
+     |                  |                          |
+     |                  v                          |
+     |    +----------------------------------+     |
+     |    |  refill cache with order 0 page  |     |
+     |    +----------------------------------+     |
+     |                       ^                     |
+     | Refill succeed        |                     |
+     |                       | Refill succeed      |
+     |                       |                     |
+     v                       v                     v
+    +------------------------------------------------+
+    |         allocate fragment from cache           |
+    +------------------------------------------------+
+
+API interface
+=============
+As the design and implementation of page_frag API, the allocation side does not
+allow concurrent calling, it is assumed that the caller must ensure there is not
+concurrent alloc calling to the same page_frag_cache instance by using it's own
+lock or rely on some lockless guarantee like NAPI softirq.
+
+Depending on different use cases, callers expecting to deal with va, page or
+both va and page for them may call page_frag_alloc_va*, page_frag_alloc_pg*,
+or page_frag_alloc* API accordingly.
+
+There is also a use case that need minimum memory in order for forward
+progressing, but can do better if there is more memory available. Introduce
+page_frag_alloc_prepare() and page_frag_alloc_commit() related API, the caller
+requests the minimum memory it need and the prepare API will return the maximum
+size of the fragment returned, caller need to report back to the page_frag core
+how much memory it actually use by calling commit API, or not calling the commit
+API if deciding to not use any memory.
+
+.. kernel-doc:: include/linux/page_frag_cache.h
+   :identifiers: page_frag_cache_init page_frag_cache_is_pfmemalloc
+                 page_frag_alloc_va __page_frag_alloc_va_align
+                 page_frag_alloc_va_align page_frag_alloc_va_prepare
+                 page_frag_alloc_va_prepare_align page_frag_alloc_pg_prepare
+                 page_frag_alloc_prepare page_frag_alloc_commit
+                 page_frag_alloc_commit_noref page_frag_free_va
+
+.. kernel-doc:: mm/page_frag_cache.c
+   :identifiers: page_frag_cache_drain
+
+Coding examples
+===============
+
+Init & Drain API
+----------------
+
+.. code-block:: c
+
+   page_frag_cache_init(pfrag);
+   ...
+   page_frag_cache_drain(pfrag);
+
+
+Alloc & Free API
+----------------
+
+.. code-block:: c
+
+    void *va;
+
+    va = page_frag_alloc_va_align(pfrag, size, gfp, align);
+    if (!va)
+        goto do_error;
+
+    err = do_something(va, size);
+    if (err) {
+        page_frag_free_va(va);
+        goto do_error;
+    }
+
+Prepare & Commit API
+--------------------
+
+.. code-block:: c
+
+    unsigned int offset, size;
+    bool merge = true;
+    struct page *page;
+    void *va;
+
+    size = 32U;
+    page = page_frag_alloc_prepare(pfrag, &offset, &size, &va);
+    if (!page)
+        goto wait_for_space;
+
+    copy = min_t(int, copy, size);
+    if (!skb_can_coalesce(skb, i, page, offset)) {
+        if (i >= max_skb_frags)
+            goto new_segment;
+
+        merge = false;
+    }
+
+    copy = mem_schedule(copy);
+    if (!copy)
+        goto wait_for_space;
+
+    err = copy_from_iter_full_nocache(va, copy, iter);
+    if (err)
+        goto do_error;
+
+    if (merge) {
+        skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
+        page_frag_alloc_commit_noref(pfrag, offset, copy);
+    } else {
+        skb_fill_page_desc(skb, i, page, offset, copy);
+        page_frag_alloc_commit(pfrag, offset, copy);
+    }
diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index 28185969cd2c..529e7c040dad 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -31,11 +31,28 @@ struct page_frag_cache {
 #endif
 };
 
+/**
+ * page_frag_cache_init() - Init page_frag cache.
+ * @nc: page_frag cache from which to init
+ *
+ * Inline helper to init the page_frag cache.
+ */
 static inline void page_frag_cache_init(struct page_frag_cache *nc)
 {
 	nc->va = NULL;
 }
 
+/**
+ * page_frag_cache_is_pfmemalloc() - Check for pfmemalloc.
+ * @nc: page_frag cache from which to check
+ *
+ * Used to check if the current page in page_frag cache is pfmemalloc'ed.
+ * It has the same calling context expection as the alloc API.
+ *
+ * Return:
+ * Return true if the current page in page_frag cache is pfmemalloc'ed,
+ * otherwise return false.
+ */
 static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
 {
 	return !!nc->pfmemalloc;
@@ -46,6 +63,17 @@ void __page_frag_cache_drain(struct page *page, unsigned int count);
 void *page_frag_cache_refill(struct page_frag_cache *nc, unsigned int fragsz,
 			     gfp_t gfp_mask);
 
+/**
+ * page_frag_alloc_va() - Alloc a page fragment.
+ * @nc: page_frag cache from which to allocate
+ * @fragsz: the requested fragment size
+ * @gfp_mask: the allocation gfp to use when cache need to be refilled
+ *
+ * Get a page fragment from page_frag cache.
+ *
+ * Return:
+ * Return va of the page fragment, otherwise return NULL.
+ */
 static inline void *page_frag_alloc_va(struct page_frag_cache *nc,
 				       unsigned int fragsz, gfp_t gfp_mask)
 {
@@ -63,6 +91,19 @@ static inline void *page_frag_alloc_va(struct page_frag_cache *nc,
 	return va + offset;
 }
 
+/**
+ * __page_frag_alloc_va_align() - Alloc a page fragment with aligning
+ * requirement.
+ * @nc: page_frag cache from which to allocate
+ * @fragsz: the requested fragment size
+ * @gfp_mask: the allocation gfp to use when cache need to be refilled
+ * @align: the requested aligning requirement
+ *
+ * Get a page fragment from page_frag cache with aligning requirement.
+ *
+ * Return:
+ * Return va of the page fragment, otherwise return NULL.
+ */
 static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 					       unsigned int fragsz,
 					       gfp_t gfp_mask,
@@ -75,6 +116,19 @@ static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 	return page_frag_alloc_va(nc, fragsz, gfp_mask);
 }
 
+/**
+ * page_frag_alloc_va_align() - Alloc a page fragment with aligning requirement.
+ * @nc: page_frag cache from which to allocate
+ * @fragsz: the requested fragment size
+ * @gfp_mask: the allocation gfp to use when cache need to be refilled
+ * @align: the requested aligning requirement
+ *
+ * WARN_ON_ONCE() checking for align and fragsz before getting a page fragment
+ * from page_frag cache with aligning requirement.
+ *
+ * Return:
+ * Return va of the page fragment, otherwise return NULL.
+ */
 static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
 					     unsigned int fragsz,
 					     gfp_t gfp_mask,
@@ -86,6 +140,19 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
 	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);
 }
 
+/**
+ * page_frag_alloc_va_prepare() - Prepare allocing a page fragment.
+ * @nc: page_frag cache from which to prepare
+ * @offset: out as the offset of the page fragment
+ * @size: in as the requested size, out as the available size
+ * @gfp_mask: the allocation gfp to use when cache need to be refilled
+ *
+ * Prepare a page fragment with minimum size of ‘size’, 'size' is also used to
+ * report the maximum size of the page fragment the caller can use.
+ *
+ * Return:
+ * Return va of the page fragment, otherwise return NULL.
+ */
 static inline void *page_frag_alloc_va_prepare(struct page_frag_cache *nc,
 					       unsigned int *offset,
 					       unsigned int *size,
@@ -108,6 +175,21 @@ static inline void *page_frag_alloc_va_prepare(struct page_frag_cache *nc,
 	return va + *offset;
 }
 
+/**
+ * page_frag_alloc_va_prepare_align() - Prepare allocing a page fragment with
+ * aligning requirement.
+ * @nc: page_frag cache from which to prepare
+ * @offset: out as the offset of the page fragment
+ * @size: in as the requested size, out as the available size
+ * @align: the requested aligning requirement
+ * @gfp_mask: the allocation gfp to use when cache need to be refilled
+ *
+ * Prepare an aligned page fragment with minimum size of ‘size’, 'size' is also
+ * used to report the maximum size of the page fragment the caller can use.
+ *
+ * Return:
+ * Return va of the page fragment, otherwise return NULL.
+ */
 static inline void *page_frag_alloc_va_prepare_align(struct page_frag_cache *nc,
 						     unsigned int *offset,
 						     unsigned int *size,
@@ -144,6 +226,19 @@ static inline void *__page_frag_alloc_pg_prepare(struct page_frag_cache *nc,
 	return va;
 }
 
+/**
+ * page_frag_alloc_pg_prepare - Prepare allocing a page fragment.
+ * @nc: page_frag cache from which to prepare
+ * @offset: out as the offset of the page fragment
+ * @size: in as the requested size, out as the available size
+ * @gfp: the allocation gfp to use when cache need to be refilled
+ *
+ * Prepare a page fragment with minimum size of ‘size’, 'size' is also used to
+ * report the maximum size of the page fragment the caller can use.
+ *
+ * Return:
+ * Return the page fragment, otherwise return NULL.
+ */
 #define page_frag_alloc_pg_prepare(nc, offset, size, gfp)		\
 ({									\
 	struct page *__page = NULL;					\
@@ -179,6 +274,21 @@ static inline void *__page_frag_alloc_prepare(struct page_frag_cache *nc,
 	return nc_va;
 }
 
+/**
+ * page_frag_alloc_prepare - Prepare allocing a page fragment.
+ * @nc: page_frag cache from which to prepare
+ * @offset: out as the offset of the page fragment
+ * @size: in as the requested size, out as the available size
+ * @va: out as the va of the returned page fragment
+ * @gfp: the allocation gfp to use when cache need to be refilled
+ *
+ * Prepare a page fragment with minimum size of ‘size’, 'size' is also used to
+ * report the maximum size of the page fragment. Return both 'page' and 'va' of
+ * the fragment to the caller.
+ *
+ * Return:
+ * Return the page fragment, otherwise return NULL.
+ */
 #define page_frag_alloc_prepare(nc, offset, size, va, gfp)		\
 ({									\
 	struct page *__page = NULL;					\
@@ -191,6 +301,14 @@ static inline void *__page_frag_alloc_prepare(struct page_frag_cache *nc,
 	__page;								\
 })
 
+/**
+ * page_frag_alloc_commit - Commit allocing a page fragment.
+ * @nc: page_frag cache from which to commit
+ * @offset: offset of the page fragment
+ * @size: size of the page fragment has been used
+ *
+ * Commit the alloc preparing by passing offset and the actual used size.
+ */
 static inline void page_frag_alloc_commit(struct page_frag_cache *nc,
 					  unsigned int offset,
 					  unsigned int size)
@@ -199,6 +317,17 @@ static inline void page_frag_alloc_commit(struct page_frag_cache *nc,
 	nc->offset = offset + size;
 }
 
+/**
+ * page_frag_alloc_commit_noref - Commit allocing a page fragment without taking
+ * page refcount.
+ * @nc: page_frag cache from which to commit
+ * @offset: offset of the page fragment
+ * @size: size of the page fragment has been used
+ *
+ * Commit the alloc preparing by passing offset and the actual used size, but
+ * not taking page refcount. Mostly used for fragmemt coaleasing case when the
+ * current fragmemt can share the same refcount with previous fragmemt.
+ */
 static inline void page_frag_alloc_commit_noref(struct page_frag_cache *nc,
 						unsigned int offset,
 						unsigned int size)
@@ -206,6 +335,10 @@ static inline void page_frag_alloc_commit_noref(struct page_frag_cache *nc,
 	nc->offset = offset + size;
 }
 
+/**
+ * page_frag_free_va - Free a page fragment by va.
+ * @addr: va of page fragment to be freed
+ */
 void page_frag_free_va(void *addr);
 
 #endif
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index 8774cb07e630..8b1d35aafcc1 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -60,6 +60,10 @@ static bool __page_frag_cache_refill(struct page_frag_cache *nc,
 	return true;
 }
 
+/**
+ * page_frag_cache_drain - Drain the current page from page_frag cache.
+ * @nc: page_frag cache from which to drain
+ */
 void page_frag_cache_drain(struct page_frag_cache *nc)
 {
 	if (!nc->va)
-- 
2.33.0



^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align()
  2024-04-15 13:19 ` [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align() Yunsheng Lin
@ 2024-04-15 23:55   ` Alexander H Duyck
  2024-04-16 13:11     ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander H Duyck @ 2024-04-15 23:55 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
> We are above to use page_frag_alloc_*() API to not just
> allocate memory for skb->data, but also use them to do
> the memory allocation for skb frag too. Currently the
> implementation of page_frag in mm subsystem is running
> the offset as a countdown rather than count-up value,
> there may have several advantages to that as mentioned
> in [1], but it may have some disadvantages, for example,
> it may disable skb frag coaleasing and more correct cache
> prefetching
> 
> We have a trade-off to make in order to have a unified
> implementation and API for page_frag, so use a initial zero
> offset in this patch, and the following patch will try to
> make some optimization to aovid the disadvantages as much
> as possible.
> 
> 1. https://lore.kernel.org/all/f4abe71b3439b39d17a6fb2d410180f367cadf5c.camel@gmail.com/
> 
> CC: Alexander Duyck <alexander.duyck@gmail.com>
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> ---
>  mm/page_frag_cache.c | 31 ++++++++++++++-----------------
>  1 file changed, 14 insertions(+), 17 deletions(-)
> 
> diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
> index 64993b5d1243..dc864ee09536 100644
> --- a/mm/page_frag_cache.c
> +++ b/mm/page_frag_cache.c
> @@ -65,9 +65,8 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>  			      unsigned int fragsz, gfp_t gfp_mask,
>  			      unsigned int align_mask)
>  {
> -	unsigned int size = PAGE_SIZE;
> +	unsigned int size, offset;
>  	struct page *page;
> -	int offset;
>  
>  	if (unlikely(!nc->va)) {
>  refill:
> @@ -75,10 +74,6 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>  		if (!page)
>  			return NULL;
>  
> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> -		/* if size can vary use size else just use PAGE_SIZE */
> -		size = nc->size;
> -#endif
>  		/* Even if we own the page, we do not use atomic_set().
>  		 * This would break get_page_unless_zero() users.
>  		 */
> @@ -87,11 +82,18 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>  		/* reset page count bias and offset to start of new frag */
>  		nc->pfmemalloc = page_is_pfmemalloc(page);
>  		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
> -		nc->offset = size;
> +		nc->offset = 0;
>  	}
>  
> -	offset = nc->offset - fragsz;
> -	if (unlikely(offset < 0)) {
> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> +	/* if size can vary use size else just use PAGE_SIZE */
> +	size = nc->size;
> +#else
> +	size = PAGE_SIZE;
> +#endif
> +
> +	offset = ALIGN(nc->offset, -align_mask);

I am not sure if using -align_mask here with the ALIGN macro is really
to your benefit. I would be curious what the compiler is generating.

Again, I think you would be much better off with:
	offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);

That will save you a number of conversions as the use of the ALIGN
macro gives you:
	offset = (nc->offset + (-align_mask - 1)) & ~(-align_mask -
1);

whereas what I am suggesting gives you:
	offset = (nc->offset + ~align_mask) & ~(~align_mask));

My main concern is that I am not sure the compiler will optimize around
the combination of bit operations and arithmetic operations. It seems
much cleaner to me to stick to the bitwise operations for the alignment
than to force this into the vhost approach which requires a power of 2
aligned mask.

Also the old code was aligning on the combination of offset AND fragsz.
This new logic is aligning on offset only. Do we run the risk of
overwriting blocks of neighbouring fragments if two users of
napi_alloc_frag_align end up passing arguments that have different
alignment values?

> +	if (unlikely(offset + fragsz > size)) {
>  		page = virt_to_page(nc->va);
>  
>  		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
> @@ -102,17 +104,13 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>  			goto refill;
>  		}
>  
> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> -		/* if size can vary use size else just use PAGE_SIZE */
> -		size = nc->size;
> -#endif
>  		/* OK, page count is 0, we can safely set it */
>  		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
>  
>  		/* reset page count bias and offset to start of new frag */
>  		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
> -		offset = size - fragsz;
> -		if (unlikely(offset < 0)) {
> +		offset = 0;
> +		if (unlikely(fragsz > size)) {

This check can probably be moved now. It was placed here to optimize
things as a check of offset < 0 was a single jump command based on the
signed flag being set as a result of the offset calculation.

It might make sense to pull this out of here and instead place it at
the start of this block after the initial check with offset + fragsz >
size since that would shorten the need to carry the size variable.

>  			/*
>  			 * The caller is trying to allocate a fragment
>  			 * with fragsz > PAGE_SIZE but the cache isn't big
> @@ -127,8 +125,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>  	}
>  
>  	nc->pagecnt_bias--;
> -	offset &= align_mask;
> -	nc->offset = offset;
> +	nc->offset = offset + fragsz;
>  
>  	return nc->va + offset;
>  }



^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 14/15] mm: page_frag: update documentation for page_frag
  2024-04-15 13:19 ` [PATCH net-next v2 14/15] mm: page_frag: update documentation " Yunsheng Lin
@ 2024-04-16  6:13   ` Bagas Sanjaya
  2024-04-16 13:11     ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Bagas Sanjaya @ 2024-04-16  6:13 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Alexander Duyck, Jonathan Corbet,
	Andrew Morton, linux-mm, linux-doc

[-- Attachment #1: Type: text/plain, Size: 1752 bytes --]

On Mon, Apr 15, 2024 at 09:19:39PM +0800, Yunsheng Lin wrote:
> +API interface
> +=============
> +As the design and implementation of page_frag API, the allocation side does not
                                        "... implies, the allocation side ..."
> +allow concurrent calling, it is assumed that the caller must ensure there is not
                      "... . Instead, it is assumed that ...:
> +concurrent alloc calling to the same page_frag_cache instance by using it's own
                                                            "... by using its own ..."
> +lock or rely on some lockless guarantee like NAPI softirq.
> +
> +Depending on different use cases, callers expecting to deal with va, page or
> +both va and page for them may call page_frag_alloc_va*, page_frag_alloc_pg*,
> +or page_frag_alloc* API accordingly.
> +
> +There is also a use case that need minimum memory in order for forward
> +progressing, but can do better if there is more memory available. Introduce
Did you mean "... but more performant if more memory is available"?
> +page_frag_alloc_prepare() and page_frag_alloc_commit() related API, the caller
s/Introduce/Using/
> +requests the minimum memory it need and the prepare API will return the maximum
> +size of the fragment returned, caller need to report back to the page_frag core
                                  "The caller needs to either call the commit API ..."
> +how much memory it actually use by calling commit API, or not calling the commit
"... to report how much memory it actually uses ..."
> +API if deciding to not use any memory.
"... or not do so if deciding to not use any memory."

Thanks.

-- 
An old man doll... just what I always wanted! - Clara

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align()
  2024-04-15 23:55   ` Alexander H Duyck
@ 2024-04-16 13:11     ` Yunsheng Lin
  2024-04-16 15:51       ` Alexander H Duyck
  0 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-16 13:11 UTC (permalink / raw)
  To: Alexander H Duyck, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On 2024/4/16 7:55, Alexander H Duyck wrote:
> On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
>> We are above to use page_frag_alloc_*() API to not just
>> allocate memory for skb->data, but also use them to do
>> the memory allocation for skb frag too. Currently the
>> implementation of page_frag in mm subsystem is running
>> the offset as a countdown rather than count-up value,
>> there may have several advantages to that as mentioned
>> in [1], but it may have some disadvantages, for example,
>> it may disable skb frag coaleasing and more correct cache
>> prefetching
>>
>> We have a trade-off to make in order to have a unified
>> implementation and API for page_frag, so use a initial zero
>> offset in this patch, and the following patch will try to
>> make some optimization to aovid the disadvantages as much
>> as possible.
>>
>> 1. https://lore.kernel.org/all/f4abe71b3439b39d17a6fb2d410180f367cadf5c.camel@gmail.com/
>>
>> CC: Alexander Duyck <alexander.duyck@gmail.com>
>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
>> ---
>>  mm/page_frag_cache.c | 31 ++++++++++++++-----------------
>>  1 file changed, 14 insertions(+), 17 deletions(-)
>>
>> diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
>> index 64993b5d1243..dc864ee09536 100644
>> --- a/mm/page_frag_cache.c
>> +++ b/mm/page_frag_cache.c
>> @@ -65,9 +65,8 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>>  			      unsigned int fragsz, gfp_t gfp_mask,
>>  			      unsigned int align_mask)
>>  {
>> -	unsigned int size = PAGE_SIZE;
>> +	unsigned int size, offset;
>>  	struct page *page;
>> -	int offset;
>>  
>>  	if (unlikely(!nc->va)) {
>>  refill:
>> @@ -75,10 +74,6 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>>  		if (!page)
>>  			return NULL;
>>  
>> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>> -		/* if size can vary use size else just use PAGE_SIZE */
>> -		size = nc->size;
>> -#endif
>>  		/* Even if we own the page, we do not use atomic_set().
>>  		 * This would break get_page_unless_zero() users.
>>  		 */
>> @@ -87,11 +82,18 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>>  		/* reset page count bias and offset to start of new frag */
>>  		nc->pfmemalloc = page_is_pfmemalloc(page);
>>  		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
>> -		nc->offset = size;
>> +		nc->offset = 0;
>>  	}
>>  
>> -	offset = nc->offset - fragsz;
>> -	if (unlikely(offset < 0)) {
>> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>> +	/* if size can vary use size else just use PAGE_SIZE */
>> +	size = nc->size;
>> +#else
>> +	size = PAGE_SIZE;
>> +#endif
>> +
>> +	offset = ALIGN(nc->offset, -align_mask);
> 
> I am not sure if using -align_mask here with the ALIGN macro is really
> to your benefit. I would be curious what the compiler is generating.
> 
> Again, I think you would be much better off with:
> 	offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);
> 
> That will save you a number of conversions as the use of the ALIGN
> macro gives you:
> 	offset = (nc->offset + (-align_mask - 1)) & ~(-align_mask -
> 1);
> 
> whereas what I am suggesting gives you:
> 	offset = (nc->offset + ~align_mask) & ~(~align_mask));
> 
> My main concern is that I am not sure the compiler will optimize around
> the combination of bit operations and arithmetic operations. It seems
> much cleaner to me to stick to the bitwise operations for the alignment
> than to force this into the vhost approach which requires a power of 2
> aligned mask.

My argument about the above is in [1]. But since you seems to not be working
through the next patch yet, I might just do it as you suggested in the next
version so that I don't have to repeat my argument again:(

1. https://lore.kernel.org/all/df826acf-8867-7eb6-e7f0-962c106bc28b@huawei.com/

> 
> Also the old code was aligning on the combination of offset AND fragsz.
> This new logic is aligning on offset only. Do we run the risk of
> overwriting blocks of neighbouring fragments if two users of
> napi_alloc_frag_align end up passing arguments that have different
> alignment values?

I am not sure I understand the question here.
As my understanding, both the old code and new code is aligning on
the offset, and both might have space reserved before the offset
due to aligning. The memory returned to the caller is in the range
of [offset, offset + fragsz). Am I missing something obvious here?

> 
>> +	if (unlikely(offset + fragsz > size)) {
>>  		page = virt_to_page(nc->va);
>>  
>>  		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
>> @@ -102,17 +104,13 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>>  			goto refill;
>>  		}
>>  
>> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>> -		/* if size can vary use size else just use PAGE_SIZE */
>> -		size = nc->size;
>> -#endif
>>  		/* OK, page count is 0, we can safely set it */
>>  		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
>>  
>>  		/* reset page count bias and offset to start of new frag */
>>  		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
>> -		offset = size - fragsz;
>> -		if (unlikely(offset < 0)) {
>> +		offset = 0;
>> +		if (unlikely(fragsz > size)) {
> 
> This check can probably be moved now. It was placed here to optimize
> things as a check of offset < 0 was a single jump command based on the
> signed flag being set as a result of the offset calculation.
> 
> It might make sense to pull this out of here and instead place it at
> the start of this block after the initial check with offset + fragsz >
> size since that would shorten the need to carry the size variable.

Yes, that is better.

But does it make more sense to just do the 'fragsz > PAGE_SIZE' checking
alongside with the aligning checking, as we have a better chance of
succeding in allocating order 0 page than order 3 page, so it seems the
caller is not allowed to pass a fragsz being bigger than PAGE_SIZE anyway?

> 
>>  			/*
>>  			 * The caller is trying to allocate a fragment
>>  			 * with fragsz > PAGE_SIZE but the cache isn't big
>> @@ -127,8 +125,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>>  	}
>>  
>>  	nc->pagecnt_bias--;
>> -	offset &= align_mask;
>> -	nc->offset = offset;
>> +	nc->offset = offset + fragsz;
>>  
>>  	return nc->va + offset;
>>  }
> 
> .
> 


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 14/15] mm: page_frag: update documentation for page_frag
  2024-04-16  6:13   ` Bagas Sanjaya
@ 2024-04-16 13:11     ` Yunsheng Lin
  0 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-16 13:11 UTC (permalink / raw)
  To: Bagas Sanjaya, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Alexander Duyck, Jonathan Corbet,
	Andrew Morton, linux-mm, linux-doc

On 2024/4/16 14:13, Bagas Sanjaya wrote:
> On Mon, Apr 15, 2024 at 09:19:39PM +0800, Yunsheng Lin wrote:
>> +API interface
>> +=============
>> +As the design and implementation of page_frag API, the allocation side does not
>                                         "... implies, the allocation side ..."
>> +allow concurrent calling, it is assumed that the caller must ensure there is not
>                       "... . Instead, it is assumed that ...:
>> +concurrent alloc calling to the same page_frag_cache instance by using it's own
>                                                             "... by using its own ..."
>> +lock or rely on some lockless guarantee like NAPI softirq.
>> +
>> +Depending on different use cases, callers expecting to deal with va, page or
>> +both va and page for them may call page_frag_alloc_va*, page_frag_alloc_pg*,
>> +or page_frag_alloc* API accordingly.
>> +
>> +There is also a use case that need minimum memory in order for forward
>> +progressing, but can do better if there is more memory available. Introduce
> Did you mean "... but more performant if more memory is available"?
>> +page_frag_alloc_prepare() and page_frag_alloc_commit() related API, the caller
> s/Introduce/Using/
>> +requests the minimum memory it need and the prepare API will return the maximum
>> +size of the fragment returned, caller need to report back to the page_frag core
>                                   "The caller needs to either call the commit API ..."
>> +how much memory it actually use by calling commit API, or not calling the commit
> "... to report how much memory it actually uses ..."
>> +API if deciding to not use any memory.
> "... or not do so if deciding to not use any memory."

Thanks.
Your wording seems better than mine, will update it accordingly.

> 
> Thanks.
> 


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align()
  2024-04-16 13:11     ` Yunsheng Lin
@ 2024-04-16 15:51       ` Alexander H Duyck
  2024-04-17 13:17         ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander H Duyck @ 2024-04-16 15:51 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On Tue, 2024-04-16 at 21:11 +0800, Yunsheng Lin wrote:
> On 2024/4/16 7:55, Alexander H Duyck wrote:
> > On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
> > > We are above to use page_frag_alloc_*() API to not just
> > > allocate memory for skb->data, but also use them to do
> > > the memory allocation for skb frag too. Currently the
> > > implementation of page_frag in mm subsystem is running
> > > the offset as a countdown rather than count-up value,
> > > there may have several advantages to that as mentioned
> > > in [1], but it may have some disadvantages, for example,
> > > it may disable skb frag coaleasing and more correct cache
> > > prefetching
> > > 
> > > We have a trade-off to make in order to have a unified
> > > implementation and API for page_frag, so use a initial zero
> > > offset in this patch, and the following patch will try to
> > > make some optimization to aovid the disadvantages as much
> > > as possible.
> > > 
> > > 1. https://lore.kernel.org/all/f4abe71b3439b39d17a6fb2d410180f367cadf5c.camel@gmail.com/
> > > 
> > > CC: Alexander Duyck <alexander.duyck@gmail.com>
> > > Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> > > ---
> > >  mm/page_frag_cache.c | 31 ++++++++++++++-----------------
> > >  1 file changed, 14 insertions(+), 17 deletions(-)
> > > 
> > > diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
> > > index 64993b5d1243..dc864ee09536 100644
> > > --- a/mm/page_frag_cache.c
> > > +++ b/mm/page_frag_cache.c
> > > @@ -65,9 +65,8 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> > >  			      unsigned int fragsz, gfp_t gfp_mask,
> > >  			      unsigned int align_mask)
> > >  {
> > > -	unsigned int size = PAGE_SIZE;
> > > +	unsigned int size, offset;
> > >  	struct page *page;
> > > -	int offset;
> > >  
> > >  	if (unlikely(!nc->va)) {
> > >  refill:
> > > @@ -75,10 +74,6 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> > >  		if (!page)
> > >  			return NULL;
> > >  
> > > -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> > > -		/* if size can vary use size else just use PAGE_SIZE */
> > > -		size = nc->size;
> > > -#endif
> > >  		/* Even if we own the page, we do not use atomic_set().
> > >  		 * This would break get_page_unless_zero() users.
> > >  		 */
> > > @@ -87,11 +82,18 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> > >  		/* reset page count bias and offset to start of new frag */
> > >  		nc->pfmemalloc = page_is_pfmemalloc(page);
> > >  		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
> > > -		nc->offset = size;
> > > +		nc->offset = 0;
> > >  	}
> > >  
> > > -	offset = nc->offset - fragsz;
> > > -	if (unlikely(offset < 0)) {
> > > +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> > > +	/* if size can vary use size else just use PAGE_SIZE */
> > > +	size = nc->size;
> > > +#else
> > > +	size = PAGE_SIZE;
> > > +#endif
> > > +
> > > +	offset = ALIGN(nc->offset, -align_mask);
> > 
> > I am not sure if using -align_mask here with the ALIGN macro is really
> > to your benefit. I would be curious what the compiler is generating.
> > 
> > Again, I think you would be much better off with:
> > 	offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);
> > 
> > That will save you a number of conversions as the use of the ALIGN
> > macro gives you:
> > 	offset = (nc->offset + (-align_mask - 1)) & ~(-align_mask -
> > 1);
> > 
> > whereas what I am suggesting gives you:
> > 	offset = (nc->offset + ~align_mask) & ~(~align_mask));
> > 
> > My main concern is that I am not sure the compiler will optimize around
> > the combination of bit operations and arithmetic operations. It seems
> > much cleaner to me to stick to the bitwise operations for the alignment
> > than to force this into the vhost approach which requires a power of 2
> > aligned mask.
> 
> My argument about the above is in [1]. But since you seems to not be working
> through the next patch yet, I might just do it as you suggested in the next
> version so that I don't have to repeat my argument again:(
> 
> 1. https://lore.kernel.org/all/df826acf-8867-7eb6-e7f0-962c106bc28b@huawei.com/

Sorry, I didn't have time to go digging through the mailing list to
review all the patches from the last set. I was only Cced on a few of
them as I recall. As you know I have the fbnic patches I also have been
trying to get pushed out so that was my primary focus the last couple
weeks.

That said, this just goes into my earlier complaints. You are now
optimizing for the non-aligned paths. There are few callers that are
asking for this to provide non-aligned segments. In most cases they are
at least cache aligned. Specifically the __netdev_alloc_frag_align and
__napi_alloc_frag_align are aligning things at a minimum to
SMP_CACHE_BYTES by aligning the fragsz argument using SKB_DATA_ALIGN.
Perhaps it would be better to actually incorporate that alignment
guarantee into the calls themselves by doing an &= with the align_mask
request for those two functions to make this more transparent.

> > 
> > Also the old code was aligning on the combination of offset AND fragsz.
> > This new logic is aligning on offset only. Do we run the risk of
> > overwriting blocks of neighbouring fragments if two users of
> > napi_alloc_frag_align end up passing arguments that have different
> > alignment values?
> 
> I am not sure I understand the question here.
> As my understanding, both the old code and new code is aligning on
> the offset, and both might have space reserved before the offset
> due to aligning. The memory returned to the caller is in the range
> of [offset, offset + fragsz). Am I missing something obvious here?

My main concern is that by aligning offset - fragsz by the alignment
mask we were taking care of all our variables immediately ourselves. If
we didn't provide a correct value it was all traceable to one call as
the assumption was that fragsz would be a multiple of the alignment
value.

With your change the alignment is done in the following call. So now it
splits up the alignment of fragsz from the alignment of the offset. As
such we will probably need to add additional overhead to guarantee
fragsz is a multiple of the alignment.

> > 
> > > +	if (unlikely(offset + fragsz > size)) {
> > >  		page = virt_to_page(nc->va);
> > >  
> > >  		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
> > > @@ -102,17 +104,13 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> > >  			goto refill;
> > >  		}
> > >  
> > > -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> > > -		/* if size can vary use size else just use PAGE_SIZE */
> > > -		size = nc->size;
> > > -#endif
> > >  		/* OK, page count is 0, we can safely set it */
> > >  		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
> > >  
> > >  		/* reset page count bias and offset to start of new frag */
> > >  		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
> > > -		offset = size - fragsz;
> > > -		if (unlikely(offset < 0)) {
> > > +		offset = 0;
> > > +		if (unlikely(fragsz > size)) {
> > 
> > This check can probably be moved now. It was placed here to optimize
> > things as a check of offset < 0 was a single jump command based on the
> > signed flag being set as a result of the offset calculation.
> > 
> > It might make sense to pull this out of here and instead place it at
> > the start of this block after the initial check with offset + fragsz >
> > size since that would shorten the need to carry the size variable.
> 
> Yes, that is better.
> 
> But does it make more sense to just do the 'fragsz > PAGE_SIZE' checking
> alongside with the aligning checking, as we have a better chance of
> succeding in allocating order 0 page than order 3 page, so it seems the
> caller is not allowed to pass a fragsz being bigger than PAGE_SIZE anyway?

Yeah, that should be fine.

> > 
> > >  			/*
> > >  			 * The caller is trying to allocate a fragment
> > >  			 * with fragsz > PAGE_SIZE but the cache isn't big
> > > @@ -127,8 +125,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> > >  	}
> > >  
> > >  	nc->pagecnt_bias--;
> > > -	offset &= align_mask;
> > > -	nc->offset = offset;
> > > +	nc->offset = offset + fragsz;
> > >  
> > >  	return nc->va + offset;
> > >  }
> > 
> > .
> > 



^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 06/15] mm: page_frag: change page_frag_alloc_* API to accept align param
  2024-04-15 13:19 ` [PATCH net-next v2 06/15] mm: page_frag: change page_frag_alloc_* API to accept align param Yunsheng Lin
@ 2024-04-16 16:08   ` Alexander Duyck
  2024-04-17 13:18     ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander Duyck @ 2024-04-16 16:08 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Andrew Morton,
	Eric Dumazet, David Howells, Marc Dionne, linux-mm, linux-afs

On Mon, Apr 15, 2024 at 6:22 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>
> When page_frag_alloc_* API doesn't need data alignment, the
> ALIGN() operation is unnecessary, so change page_frag_alloc_*
> API to accept align param instead of align_mask param, and do
> the ALIGN()'ing in the inline helper when needed.
>
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>

The vast majority of callers are using this aligned one way or
another. If anything with your recent changes we should probably be
making sure to align the fragsz as well as the offset since most
callers were only using the alignment of the fragsz in order to get
their alignment.

My main concern is that this change implies that most are using an
unaligned setup when it is in fact quite the opposite.

> ---
>  include/linux/page_frag_cache.h | 20 ++++++++++++--------
>  include/linux/skbuff.h          | 12 ++++++------
>  mm/page_frag_cache.c            |  9 ++++-----
>  net/core/skbuff.c               | 12 +++++-------
>  net/rxrpc/txbuf.c               |  5 +++--
>  5 files changed, 30 insertions(+), 28 deletions(-)
>
> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
> index 04810d8d6a7d..cc0ede0912f3 100644
> --- a/include/linux/page_frag_cache.h
> +++ b/include/linux/page_frag_cache.h
> @@ -25,21 +25,25 @@ struct page_frag_cache {
>
>  void page_frag_cache_drain(struct page_frag_cache *nc);
>  void __page_frag_cache_drain(struct page *page, unsigned int count);
> -void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
> -                             gfp_t gfp_mask, unsigned int align_mask);
> +void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz,
> +                     gfp_t gfp_mask);
> +
> +static inline void *__page_frag_alloc_align(struct page_frag_cache *nc,
> +                                           unsigned int fragsz, gfp_t gfp_mask,
> +                                           unsigned int align)
> +{
> +       nc->offset = ALIGN(nc->offset, align);
> +
> +       return page_frag_alloc(nc, fragsz, gfp_mask);
> +}
>

I would rather not have us breaking up the alignment into another
function. It makes this much more difficult to work with. In addition
you are adding offsets without actually adding to the pages which
makes this seem exploitable. Basically just pass an alignment value of
32K and you are forcing a page eviction regardless.

>  static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
>                                           unsigned int fragsz, gfp_t gfp_mask,
>                                           unsigned int align)
>  {
>         WARN_ON_ONCE(!is_power_of_2(align));
> -       return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
> -}
>
> -static inline void *page_frag_alloc(struct page_frag_cache *nc,
> -                                   unsigned int fragsz, gfp_t gfp_mask)
> -{
> -       return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
> +       return __page_frag_alloc_align(nc, fragsz, gfp_mask, align);
>  }
>
>  void page_frag_free(void *addr);
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index f2dc1f735c79..43c704589deb 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -3268,7 +3268,7 @@ static inline void skb_queue_purge(struct sk_buff_head *list)
>  unsigned int skb_rbtree_purge(struct rb_root *root);
>  void skb_errqueue_purge(struct sk_buff_head *list);
>
> -void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
> +void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align);
>
>  /**
>   * netdev_alloc_frag - allocate a page fragment
> @@ -3279,14 +3279,14 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
>   */
>  static inline void *netdev_alloc_frag(unsigned int fragsz)
>  {
> -       return __netdev_alloc_frag_align(fragsz, ~0u);
> +       return __netdev_alloc_frag_align(fragsz, 1u);
>  }
>
>  static inline void *netdev_alloc_frag_align(unsigned int fragsz,
>                                             unsigned int align)
>  {
>         WARN_ON_ONCE(!is_power_of_2(align));
> -       return __netdev_alloc_frag_align(fragsz, -align);
> +       return __netdev_alloc_frag_align(fragsz, align);
>  }
>
>  struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
> @@ -3346,18 +3346,18 @@ static inline void skb_free_frag(void *addr)
>         page_frag_free(addr);
>  }
>
> -void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
> +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align);
>
>  static inline void *napi_alloc_frag(unsigned int fragsz)
>  {
> -       return __napi_alloc_frag_align(fragsz, ~0u);
> +       return __napi_alloc_frag_align(fragsz, 1u);
>  }
>
>  static inline void *napi_alloc_frag_align(unsigned int fragsz,
>                                           unsigned int align)
>  {
>         WARN_ON_ONCE(!is_power_of_2(align));
> -       return __napi_alloc_frag_align(fragsz, -align);
> +       return __napi_alloc_frag_align(fragsz, align);
>  }
>
>  struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length);
> diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
> index dc864ee09536..b4408187e1ab 100644
> --- a/mm/page_frag_cache.c
> +++ b/mm/page_frag_cache.c
> @@ -61,9 +61,8 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
>  }
>  EXPORT_SYMBOL(__page_frag_cache_drain);
>
> -void *__page_frag_alloc_align(struct page_frag_cache *nc,
> -                             unsigned int fragsz, gfp_t gfp_mask,
> -                             unsigned int align_mask)
> +void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz,
> +                     gfp_t gfp_mask)
>  {
>         unsigned int size, offset;
>         struct page *page;
> @@ -92,7 +91,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>         size = PAGE_SIZE;
>  #endif
>
> -       offset = ALIGN(nc->offset, -align_mask);
> +       offset = nc->offset;
>         if (unlikely(offset + fragsz > size)) {
>                 page = virt_to_page(nc->va);
>
> @@ -129,7 +128,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>
>         return nc->va + offset;
>  }
> -EXPORT_SYMBOL(__page_frag_alloc_align);
> +EXPORT_SYMBOL(page_frag_alloc);
>
>  /*
>   * Frees a page fragment allocated out of either a compound or order 0 page.
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index ea052fa710d8..676e2d857f02 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -306,18 +306,17 @@ void napi_get_frags_check(struct napi_struct *napi)
>         local_bh_enable();
>  }
>
> -void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
> +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align)
>  {
>         struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
>
>         fragsz = SKB_DATA_ALIGN(fragsz);
>

So this is a perfect example. This caller is aligning the size by
SMP_CACHE_BYTES. This is the most typical case. Either this or
L1_CACHE_BYTES. As such all requests should be aligned to at least
that. I would prefer it if we didn't strip the alignment code out of
our main allocating function. If anything, maybe we should make it
more specific that the expectation is that fragsz is a multiple of the
alignment.

> -       return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
> -                                      align_mask);
> +       return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align);
>  }
>  EXPORT_SYMBOL(__napi_alloc_frag_align);
>
> -void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
> +void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align)
>  {
>         void *data;
>
> @@ -325,15 +324,14 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
>         if (in_hardirq() || irqs_disabled()) {
>                 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
>
> -               data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
> -                                              align_mask);
> +               data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align);
>         } else {
>                 struct napi_alloc_cache *nc;
>
>                 local_bh_disable();
>                 nc = this_cpu_ptr(&napi_alloc_cache);
>                 data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
> -                                              align_mask);
> +                                              align);
>                 local_bh_enable();
>         }
>         return data;
> diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
> index e0679658d9de..eb640875bf07 100644
> --- a/net/rxrpc/txbuf.c
> +++ b/net/rxrpc/txbuf.c
> @@ -32,9 +32,10 @@ struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_
>                 hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
>         total = hoff + sizeof(*whdr) + data_size;
>
> +       data_align = max_t(size_t, data_align, L1_CACHE_BYTES);
>         mutex_lock(&call->conn->tx_data_alloc_lock);
> -       buf = __page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
> -                                     ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
> +       buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
> +                                   data_align);
>         mutex_unlock(&call->conn->tx_data_alloc_lock);
>         if (!buf) {
>                 kfree(txb);
> --
> 2.33.0
>


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 07/15] mm: page_frag: add '_va' suffix to page_frag API
  2024-04-15 13:19 ` [PATCH net-next v2 07/15] mm: page_frag: add '_va' suffix to page_frag API Yunsheng Lin
@ 2024-04-16 16:12   ` Alexander H Duyck
  2024-04-17 13:18     ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander H Duyck @ 2024-04-16 16:12 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Jeroen de Borst, Praveen Kaligineedi,
	Shailend Chand, Eric Dumazet, Jesse Brandeburg, Tony Nguyen,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Felix Fietkau, Sean Wang, Mark Lee, Lorenzo Bianconi,
	Matthias Brugger, AngeloGioacchino Del Regno, Keith Busch,
	Jens Axboe, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
	Michael S. Tsirkin, Jason Wang, Andrew Morton,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, Andrii Nakryiko, Martin KaFai Lau,
	Eduard Zingerman, Song Liu, Yonghong Song, KP Singh,
	Stanislav Fomichev, Hao Luo, Jiri Olsa, David Howells,
	Marc Dionne, Chuck Lever, Jeff Layton, Neil Brown,
	Olga Kornievskaia, Dai Ngo, Tom Talpey, Trond Myklebust,
	Anna Schumaker, intel-wired-lan, linux-arm-kernel,
	linux-mediatek, linux-nvme, kvm, virtualization, linux-mm, bpf,
	linux-afs, linux-nfs

On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
> Currently most of the API for page_frag API is returning
> 'virtual address' as output or expecting 'virtual address'
> as input, in order to differentiate the API handling between
> 'virtual address' and 'struct page', add '_va' suffix to the
> corresponding API mirroring the page_pool_alloc_va() API of
> the page_pool.
> 
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>

This patch is a total waste of time. By that logic we should be
renaming __get_free_pages since it essentially does the same thing.

This just seems like more code changes for the sake of adding code
changes rather than fixing anything. In my opinion it should be dropped
from the set.



^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-15 13:19 ` [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc Yunsheng Lin
@ 2024-04-16 16:22   ` Alexander H Duyck
  2024-04-17 13:19     ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander H Duyck @ 2024-04-16 16:22 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
> The '(PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)' case is for the
> system with page size less than 32KB, which is 0x8000 bytes
> requiring 16 bits space, change 'size' to 'size_mask' to avoid
> using the MSB, and change 'pfmemalloc' field to reuse the that
> MSB, so that we remove the orginal space needed by 'pfmemalloc'.
> 
> For another case, the MSB of 'offset' is reused for 'pfmemalloc'.
> 
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> ---
>  include/linux/page_frag_cache.h | 13 ++++++++-----
>  mm/page_frag_cache.c            |  5 +++--
>  2 files changed, 11 insertions(+), 7 deletions(-)
> 
> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
> index fe5faa80b6c3..40a7d6da9ef0 100644
> --- a/include/linux/page_frag_cache.h
> +++ b/include/linux/page_frag_cache.h
> @@ -12,15 +12,16 @@ struct page_frag_cache {
>  	void *va;
>  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>  	__u16 offset;
> -	__u16 size;
> +	__u16 size_mask:15;
> +	__u16 pfmemalloc:1;
>  #else
> -	__u32 offset;
> +	__u32 offset:31;
> +	__u32 pfmemalloc:1;
>  #endif

This seems like a really bad idea. Using a bit-field like this seems
like a waste as it means that all the accesses now have to add
additional operations to access either offset or size. It wasn't as if
this is an oversized struct, or one that we are allocating a ton of. As
such I am not sure why we need to optmize for size like this.

>  	/* we maintain a pagecount bias, so that we dont dirty cache line
>  	 * containing page->_refcount every time we allocate a fragment.
>  	 */
>  	unsigned int		pagecnt_bias;
> -	bool pfmemalloc;
>  };
>  
>  static inline void page_frag_cache_init(struct page_frag_cache *nc)
> @@ -43,7 +44,9 @@ static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
>  					       gfp_t gfp_mask,
>  					       unsigned int align)
>  {
> -	nc->offset = ALIGN(nc->offset, align);
> +	unsigned int offset = nc->offset;
> +
> +	nc->offset = ALIGN(offset, align);
>  
>  	return page_frag_alloc_va(nc, fragsz, gfp_mask);
>  }
> @@ -53,7 +56,7 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
>  					     gfp_t gfp_mask,
>  					     unsigned int align)
>  {
> -	WARN_ON_ONCE(!is_power_of_2(align));
> +	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE);

The "align >= PAGE_SIZE" fix should probably go with your change that
reversed the direction.

>  
>  	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);
>  }
> diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
> index 50511d8522d0..8d93029116e1 100644
> --- a/mm/page_frag_cache.c
> +++ b/mm/page_frag_cache.c
> @@ -32,7 +32,8 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
>  		   __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
>  	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
>  				PAGE_FRAG_CACHE_MAX_ORDER);
> -	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
> +	nc->size_mask = page ? PAGE_FRAG_CACHE_MAX_SIZE - 1 : PAGE_SIZE - 1;
> +	VM_BUG_ON(page && nc->size_mask != PAGE_FRAG_CACHE_MAX_SIZE - 1);
>  #endif
>  	if (unlikely(!page))
>  		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
> @@ -86,7 +87,7 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
>  
>  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>  	/* if size can vary use size else just use PAGE_SIZE */
> -	size = nc->size;
> +	size = nc->size_mask + 1;
>  #else
>  	size = PAGE_SIZE;
>  #endif

So now we are having to add arithmetic operations to the size in
addition having to mask in order to read the values. That just seems
like that much more overhead.



^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 10/15] mm: page_frag: reuse existing bit field of 'va' for pagecnt_bias
  2024-04-15 13:19 ` [PATCH net-next v2 10/15] mm: page_frag: reuse existing bit field of 'va' for pagecnt_bias Yunsheng Lin
@ 2024-04-16 16:33   ` Alexander H Duyck
  2024-04-17 13:23     ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander H Duyck @ 2024-04-16 16:33 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
> As alignment of 'va' is always aligned with the order of the
> page allocated, we can reuse the LSB bits for the pagecount
> bias, and remove the orginal space needed by 'pagecnt_bias'.
> Also limit the 'fragsz' to be at least the size of
> 'usigned int' to match the limited pagecnt_bias.
> 
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>

What is the point of this? You are trading off space for size on a data
structure that is only something like 24B in size and only allocated a
few times.

> ---
>  include/linux/page_frag_cache.h | 20 +++++++----
>  mm/page_frag_cache.c            | 63 +++++++++++++++++++--------------
>  2 files changed, 50 insertions(+), 33 deletions(-)
> 
> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
> index 40a7d6da9ef0..a97a1ac017d6 100644
> --- a/include/linux/page_frag_cache.h
> +++ b/include/linux/page_frag_cache.h
> @@ -9,7 +9,18 @@
>  #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
>  
>  struct page_frag_cache {
> -	void *va;
> +	union {
> +		void *va;
> +		/* we maintain a pagecount bias, so that we dont dirty cache
> +		 * line containing page->_refcount every time we allocate a
> +		 * fragment. As 'va' is always aligned with the order of the
> +		 * page allocated, we can reuse the LSB bits for the pagecount
> +		 * bias, and its bit width happens to be indicated by the
> +		 * 'size_mask' below.
> +		 */
> +		unsigned long pagecnt_bias;
> +
> +	};

Both va and pagecnt_bias are frequently accessed items. If pagecnt_bias
somehow ends up exceeding the alignment of the page we run the risk of
corrupting data or creating an page fault.

In my opinion this is not worth the risk especially since with the
previous change your new change results in 0 size savings on 64b
systems as the structure will be aligned to the size of the pointer.

>  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>  	__u16 offset;
>  	__u16 size_mask:15;
> @@ -18,10 +29,6 @@ struct page_frag_cache {
>  	__u32 offset:31;
>  	__u32 pfmemalloc:1;
>  #endif
> -	/* we maintain a pagecount bias, so that we dont dirty cache line
> -	 * containing page->_refcount every time we allocate a fragment.
> -	 */
> -	unsigned int		pagecnt_bias;
>  };
>  
>  static inline void page_frag_cache_init(struct page_frag_cache *nc)
> @@ -56,7 +63,8 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
>  					     gfp_t gfp_mask,
>  					     unsigned int align)
>  {
> -	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE);
> +	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE ||
> +		     fragsz < sizeof(unsigned int));

What is the reason for this change? Seems like it is to account for an
issue somewhere.

>  
>  	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);
>  }
> diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
> index 8d93029116e1..5f7f96c88163 100644
> --- a/mm/page_frag_cache.c
> +++ b/mm/page_frag_cache.c
> @@ -18,8 +18,8 @@
>  #include <linux/page_frag_cache.h>
>  #include "internal.h"
>  
> -static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
> -					     gfp_t gfp_mask)
> +static bool __page_frag_cache_refill(struct page_frag_cache *nc,
> +				     gfp_t gfp_mask)
>  {
>  	struct page *page = NULL;
>  	gfp_t gfp = gfp_mask;
> @@ -38,9 +38,26 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
>  	if (unlikely(!page))
>  		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
>  
> -	nc->va = page ? page_address(page) : NULL;
> +	if (unlikely(!page)) {
> +		nc->va = NULL;
> +		return false;
> +	}
> +
> +	nc->va = page_address(page);
>  
> -	return page;
> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> +	VM_BUG_ON(nc->pagecnt_bias & nc->size_mask);
> +	page_ref_add(page, nc->size_mask - 1);
> +	nc->pagecnt_bias |= nc->size_mask;
> +#else
> +	VM_BUG_ON(nc->pagecnt_bias & (PAGE_SIZE - 1));
> +	page_ref_add(page, PAGE_SIZE - 2);
> +	nc->pagecnt_bias |= (PAGE_SIZE - 1);
> +#endif
> +
> +	nc->pfmemalloc = page_is_pfmemalloc(page);
> +	nc->offset = 0;
> +	return true;
>  }
>  
>  void page_frag_cache_drain(struct page_frag_cache *nc)
> @@ -65,38 +82,31 @@ EXPORT_SYMBOL(__page_frag_cache_drain);
>  void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
>  			 gfp_t gfp_mask)
>  {
> -	unsigned int size, offset;
> +	unsigned long size_mask;
> +	unsigned int offset;
>  	struct page *page;
> +	void *va;
>  
>  	if (unlikely(!nc->va)) {
>  refill:
> -		page = __page_frag_cache_refill(nc, gfp_mask);
> -		if (!page)
> +		if (!__page_frag_cache_refill(nc, gfp_mask))
>  			return NULL;
> -
> -		/* Even if we own the page, we do not use atomic_set().
> -		 * This would break get_page_unless_zero() users.
> -		 */
> -		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
> -
> -		/* reset page count bias and offset to start of new frag */
> -		nc->pfmemalloc = page_is_pfmemalloc(page);
> -		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
> -		nc->offset = 0;
>  	}
>  
>  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>  	/* if size can vary use size else just use PAGE_SIZE */
> -	size = nc->size_mask + 1;
> +	size_mask = nc->size_mask;
>  #else
> -	size = PAGE_SIZE;
> +	size_mask = PAGE_SIZE - 1;
>  #endif
>  
> +	va = (void *)((unsigned long)nc->va & ~size_mask);
>  	offset = nc->offset;
> -	if (unlikely(offset + fragsz > size)) {
> -		page = virt_to_page(nc->va);
>  
> -		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
> +	if (unlikely(offset + fragsz > (size_mask + 1))) {
> +		page = virt_to_page(va);
> +
> +		if (!page_ref_sub_and_test(page, nc->pagecnt_bias & size_mask))
>  			goto refill;
>  
>  		if (unlikely(nc->pfmemalloc)) {
> @@ -105,12 +115,11 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
>  		}
>  
>  		/* OK, page count is 0, we can safely set it */
> -		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
> +		set_page_count(page, size_mask);
> +		nc->pagecnt_bias |= size_mask;
>  
> -		/* reset page count bias and offset to start of new frag */
> -		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
>  		offset = 0;
> -		if (unlikely(fragsz > size)) {
> +		if (unlikely(fragsz > (size_mask + 1))) {
>  			/*
>  			 * The caller is trying to allocate a fragment
>  			 * with fragsz > PAGE_SIZE but the cache isn't big
> @@ -127,7 +136,7 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
>  	nc->pagecnt_bias--;
>  	nc->offset = offset + fragsz;
>  
> -	return nc->va + offset;
> +	return va + offset;
>  }
>  EXPORT_SYMBOL(page_frag_alloc_va);
>  

The rest of this seems like unnecessary obfuscation and change.
Basically it is adding more overhead to page allocation for no reward.



^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align()
  2024-04-16 15:51       ` Alexander H Duyck
@ 2024-04-17 13:17         ` Yunsheng Lin
  0 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-17 13:17 UTC (permalink / raw)
  To: Alexander H Duyck, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On 2024/4/16 23:51, Alexander H Duyck wrote:
> On Tue, 2024-04-16 at 21:11 +0800, Yunsheng Lin wrote:
>> On 2024/4/16 7:55, Alexander H Duyck wrote:
>>> On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
>>>> We are above to use page_frag_alloc_*() API to not just
>>>> allocate memory for skb->data, but also use them to do
>>>> the memory allocation for skb frag too. Currently the
>>>> implementation of page_frag in mm subsystem is running
>>>> the offset as a countdown rather than count-up value,
>>>> there may have several advantages to that as mentioned
>>>> in [1], but it may have some disadvantages, for example,
>>>> it may disable skb frag coaleasing and more correct cache
>>>> prefetching
>>>>
>>>> We have a trade-off to make in order to have a unified
>>>> implementation and API for page_frag, so use a initial zero
>>>> offset in this patch, and the following patch will try to
>>>> make some optimization to aovid the disadvantages as much
>>>> as possible.
>>>>
>>>> 1. https://lore.kernel.org/all/f4abe71b3439b39d17a6fb2d410180f367cadf5c.camel@gmail.com/
>>>>
>>>> CC: Alexander Duyck <alexander.duyck@gmail.com>
>>>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
>>>> ---
>>>>  mm/page_frag_cache.c | 31 ++++++++++++++-----------------
>>>>  1 file changed, 14 insertions(+), 17 deletions(-)
>>>>
>>>> diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
>>>> index 64993b5d1243..dc864ee09536 100644
>>>> --- a/mm/page_frag_cache.c
>>>> +++ b/mm/page_frag_cache.c
>>>> @@ -65,9 +65,8 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>>>>  			      unsigned int fragsz, gfp_t gfp_mask,
>>>>  			      unsigned int align_mask)
>>>>  {
>>>> -	unsigned int size = PAGE_SIZE;
>>>> +	unsigned int size, offset;
>>>>  	struct page *page;
>>>> -	int offset;
>>>>  
>>>>  	if (unlikely(!nc->va)) {
>>>>  refill:
>>>> @@ -75,10 +74,6 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>>>>  		if (!page)
>>>>  			return NULL;
>>>>  
>>>> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>>>> -		/* if size can vary use size else just use PAGE_SIZE */
>>>> -		size = nc->size;
>>>> -#endif
>>>>  		/* Even if we own the page, we do not use atomic_set().
>>>>  		 * This would break get_page_unless_zero() users.
>>>>  		 */
>>>> @@ -87,11 +82,18 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
>>>>  		/* reset page count bias and offset to start of new frag */
>>>>  		nc->pfmemalloc = page_is_pfmemalloc(page);
>>>>  		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
>>>> -		nc->offset = size;
>>>> +		nc->offset = 0;
>>>>  	}
>>>>  
>>>> -	offset = nc->offset - fragsz;
>>>> -	if (unlikely(offset < 0)) {
>>>> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>>>> +	/* if size can vary use size else just use PAGE_SIZE */
>>>> +	size = nc->size;
>>>> +#else
>>>> +	size = PAGE_SIZE;
>>>> +#endif
>>>> +
>>>> +	offset = ALIGN(nc->offset, -align_mask);
>>>
>>> I am not sure if using -align_mask here with the ALIGN macro is really
>>> to your benefit. I would be curious what the compiler is generating.
>>>
>>> Again, I think you would be much better off with:
>>> 	offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);
>>>
>>> That will save you a number of conversions as the use of the ALIGN
>>> macro gives you:
>>> 	offset = (nc->offset + (-align_mask - 1)) & ~(-align_mask -
>>> 1);
>>>
>>> whereas what I am suggesting gives you:
>>> 	offset = (nc->offset + ~align_mask) & ~(~align_mask));
>>>
>>> My main concern is that I am not sure the compiler will optimize around
>>> the combination of bit operations and arithmetic operations. It seems
>>> much cleaner to me to stick to the bitwise operations for the alignment
>>> than to force this into the vhost approach which requires a power of 2
>>> aligned mask.
>>
>> My argument about the above is in [1]. But since you seems to not be working
>> through the next patch yet, I might just do it as you suggested in the next
>> version so that I don't have to repeat my argument again:(
>>
>> 1. https://lore.kernel.org/all/df826acf-8867-7eb6-e7f0-962c106bc28b@huawei.com/
> 
> Sorry, I didn't have time to go digging through the mailing list to
> review all the patches from the last set. I was only Cced on a few of

I thought adding 'CC: Alexander Duyck <alexander.duyck@gmail.com>' in
the cover letter would enable the git sendmail to send all the patches
to a specific email, apparently it did not. And I seems to only add that
in rfc and v1, but forgot to add it in the newest v2 version:(

> them as I recall. As you know I have the fbnic patches I also have been
> trying to get pushed out so that was my primary focus the last couple
> weeks.

Understood.

> 
> That said, this just goes into my earlier complaints. You are now
> optimizing for the non-aligned paths. There are few callers that are
> asking for this to provide non-aligned segments. In most cases they are

I suppose that 'optimizing for the non-aligned paths' is referring to
doing the data alignment in a inline helper for aligned API caller and
avoid doing the data alignment for non-aligned API caller in the patch 6?

For the existing user, it seems there are more callers for the non-aligned
API than callers for aligned API:

Referenced in 13 files:
https://elixir.bootlin.com/linux/v6.7-rc8/A/ident/napi_alloc_frag

Referenced in 2 files:
https://elixir.bootlin.com/linux/v6.7-rc8/A/ident/napi_alloc_frag_align

Referenced in 15 files:
https://elixir.bootlin.com/linux/v6.7-rc8/A/ident/netdev_alloc_frag

No references found in the database
https://elixir.bootlin.com/linux/v6.7-rc8/A/ident/netdev_alloc_frag_align

Referenced in 6 files:
https://elixir.bootlin.com/linux/v6.7-rc8/A/ident/page_frag_alloc

Referenced in 3 files:
https://elixir.bootlin.com/linux/v6.7-rc8/A/ident/page_frag_alloc_align

And we are adding new users mostly asking for non-aligned segments in
patch 13.

I would argue that it is not about optimizing for the non-aligned paths,
it is about avoid doing the data alignment operation for non-aligned API.

> at least cache aligned. Specifically the __netdev_alloc_frag_align and
> __napi_alloc_frag_align are aligning things at a minimum to
> SMP_CACHE_BYTES by aligning the fragsz argument using SKB_DATA_ALIGN.

It seems the above is doing the aligning operation for fragsz, most of
callers are calling __netdev_alloc_frag_align() and __napi_alloc_frag_align()
with align_mask being ~0u.

> Perhaps it would be better to actually incorporate that alignment
> guarantee into the calls themselves by doing an &= with the align_mask
> request for those two functions to make this more transparent.

Did you means doing something like below for fragsz too?
fragsz = __ALIGN_KERNEL_MASK(fragsz, ~align_mask);

> 
>>>
>>> Also the old code was aligning on the combination of offset AND fragsz.
>>> This new logic is aligning on offset only. Do we run the risk of
>>> overwriting blocks of neighbouring fragments if two users of
>>> napi_alloc_frag_align end up passing arguments that have different
>>> alignment values?
>>
>> I am not sure I understand the question here.
>> As my understanding, both the old code and new code is aligning on
>> the offset, and both might have space reserved before the offset
>> due to aligning. The memory returned to the caller is in the range
>> of [offset, offset + fragsz). Am I missing something obvious here?
> 
> My main concern is that by aligning offset - fragsz by the alignment
> mask we were taking care of all our variables immediately ourselves. If
> we didn't provide a correct value it was all traceable to one call as
> the assumption was that fragsz would be a multiple of the alignment
> value.
> 
> With your change the alignment is done in the following call. So now it
> splits up the alignment of fragsz from the alignment of the offset. As
> such we will probably need to add additional overhead to guarantee
> fragsz is a multiple of the alignment.

I am not thinking it through how the above will affect the API caller yet
if different caller is passing different alignment for the same
'page_frag_cache' instance, does it cause some cache bouncing or dma
issue if used for dma?

I am supposing it depends on what alignment semantics are we providing
here:
1. Ensure alignment for both offset and fragsz.
2. Ensure alignment for offset only.
3. Ensure alignment for fragsz only.

It seems you are in favour of option 1?
I am supposing it is a balance between performance and API flexibility
here? If it is possible to enforce the caller to use the same alignment
for the same 'page_frag_cache' instance, and give a warning if it is
not using the same alignment? So that we only need to ensure alignment
for offset or fragsz, but not both of them.

I am not sure if there is a strong use case to support both alignment
for offset and fragsz, we might create a new API for it if it is a
strong use case?


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 06/15] mm: page_frag: change page_frag_alloc_* API to accept align param
  2024-04-16 16:08   ` Alexander Duyck
@ 2024-04-17 13:18     ` Yunsheng Lin
  0 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-17 13:18 UTC (permalink / raw)
  To: Alexander Duyck
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Andrew Morton,
	Eric Dumazet, David Howells, Marc Dionne, linux-mm, linux-afs

On 2024/4/17 0:08, Alexander Duyck wrote:
> On Mon, Apr 15, 2024 at 6:22 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>>
>> When page_frag_alloc_* API doesn't need data alignment, the
>> ALIGN() operation is unnecessary, so change page_frag_alloc_*
>> API to accept align param instead of align_mask param, and do
>> the ALIGN()'ing in the inline helper when needed.
>>
>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> 
> The vast majority of callers are using this aligned one way or
> another. If anything with your recent changes we should probably be
> making sure to align the fragsz as well as the offset since most
> callers were only using the alignment of the fragsz in order to get
> their alignment.
> 
> My main concern is that this change implies that most are using an
> unaligned setup when it is in fact quite the opposite.

I think the above is depending on what we are about is 'offset unaligned'
or 'fragsz unaligned'.

'offset unaligned' seems like the most case here.

> 
>> ---
>>  include/linux/page_frag_cache.h | 20 ++++++++++++--------
>>  include/linux/skbuff.h          | 12 ++++++------
>>  mm/page_frag_cache.c            |  9 ++++-----
>>  net/core/skbuff.c               | 12 +++++-------
>>  net/rxrpc/txbuf.c               |  5 +++--
>>  5 files changed, 30 insertions(+), 28 deletions(-)
>>
>> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
>> index 04810d8d6a7d..cc0ede0912f3 100644
>> --- a/include/linux/page_frag_cache.h
>> +++ b/include/linux/page_frag_cache.h
>> @@ -25,21 +25,25 @@ struct page_frag_cache {
>>
>>  void page_frag_cache_drain(struct page_frag_cache *nc);
>>  void __page_frag_cache_drain(struct page *page, unsigned int count);
>> -void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
>> -                             gfp_t gfp_mask, unsigned int align_mask);
>> +void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz,
>> +                     gfp_t gfp_mask);
>> +
>> +static inline void *__page_frag_alloc_align(struct page_frag_cache *nc,
>> +                                           unsigned int fragsz, gfp_t gfp_mask,
>> +                                           unsigned int align)
>> +{
>> +       nc->offset = ALIGN(nc->offset, align);
>> +
>> +       return page_frag_alloc(nc, fragsz, gfp_mask);
>> +}
>>
> 
> I would rather not have us breaking up the alignment into another
> function. It makes this much more difficult to work with. In addition
> you are adding offsets without actually adding to the pages which
> makes this seem exploitable. Basically just pass an alignment value of
> 32K and you are forcing a page eviction regardless.

Yes, as you mentioned in patch 9:
The "align >= PAGE_SIZE" fix should probably go with your change that
> reversed the direction.


> 
>>  static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
>>                                           unsigned int fragsz, gfp_t gfp_mask,
>>                                           unsigned int align)
>>  {
>>         WARN_ON_ONCE(!is_power_of_2(align));
>> -       return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
>> -}
>>
>> -static inline void *page_frag_alloc(struct page_frag_cache *nc,
>> -                                   unsigned int fragsz, gfp_t gfp_mask)
>> -{
>> -       return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
>> +       return __page_frag_alloc_align(nc, fragsz, gfp_mask, align);
>>  }
>>

...

>>  /*
>>   * Frees a page fragment allocated out of either a compound or order 0 page.
>> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
>> index ea052fa710d8..676e2d857f02 100644
>> --- a/net/core/skbuff.c
>> +++ b/net/core/skbuff.c
>> @@ -306,18 +306,17 @@ void napi_get_frags_check(struct napi_struct *napi)
>>         local_bh_enable();
>>  }
>>
>> -void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
>> +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align)
>>  {
>>         struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
>>
>>         fragsz = SKB_DATA_ALIGN(fragsz);
>>
> 
> So this is a perfect example. This caller is aligning the size by
> SMP_CACHE_BYTES. This is the most typical case. Either this or
> L1_CACHE_BYTES. As such all requests should be aligned to at least
> that. I would prefer it if we didn't strip the alignment code out of
> our main allocating function. If anything, maybe we should make it
> more specific that the expectation is that fragsz is a multiple of the
> alignment.

Let's discuss the above in patch 5.


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 07/15] mm: page_frag: add '_va' suffix to page_frag API
  2024-04-16 16:12   ` Alexander H Duyck
@ 2024-04-17 13:18     ` Yunsheng Lin
  0 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-17 13:18 UTC (permalink / raw)
  To: Alexander H Duyck, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Jeroen de Borst, Praveen Kaligineedi,
	Shailend Chand, Eric Dumazet, Jesse Brandeburg, Tony Nguyen,
	Sunil Goutham, Geetha sowjanya, Subbaraya Sundeep, hariprasad,
	Felix Fietkau, Sean Wang, Mark Lee, Lorenzo Bianconi,
	Matthias Brugger, AngeloGioacchino Del Regno, Keith Busch,
	Jens Axboe, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
	Michael S. Tsirkin, Jason Wang, Andrew Morton,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, Andrii Nakryiko, Martin KaFai Lau,
	Eduard Zingerman, Song Liu, Yonghong Song, KP Singh,
	Stanislav Fomichev, Hao Luo, Jiri Olsa, David Howells,
	Marc Dionne, Chuck Lever, Jeff Layton, Neil Brown,
	Olga Kornievskaia, Dai Ngo, Tom Talpey, Trond Myklebust,
	Anna Schumaker, intel-wired-lan, linux-arm-kernel,
	linux-mediatek, linux-nvme, kvm, virtualization, linux-mm, bpf,
	linux-afs, linux-nfs

On 2024/4/17 0:12, Alexander H Duyck wrote:
> On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
>> Currently most of the API for page_frag API is returning
>> 'virtual address' as output or expecting 'virtual address'
>> as input, in order to differentiate the API handling between
>> 'virtual address' and 'struct page', add '_va' suffix to the
>> corresponding API mirroring the page_pool_alloc_va() API of
>> the page_pool.
>>
>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> 
> This patch is a total waste of time. By that logic we should be
> renaming __get_free_pages since it essentially does the same thing.
> 
> This just seems like more code changes for the sake of adding code
> changes rather than fixing anything. In my opinion it should be dropped
> from the set.

The rename is to support different use case as mentioned below in patch
14:
"Depending on different use cases, callers expecting to deal with va, page or
both va and page for them may call page_frag_alloc_va*, page_frag_alloc_pg*,
or page_frag_alloc* API accordingly."

Naming is hard anyway, I am open to better API naming for the above use cases.

> 
> .
> 


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-16 16:22   ` Alexander H Duyck
@ 2024-04-17 13:19     ` Yunsheng Lin
  2024-04-17 15:11       ` Alexander H Duyck
  0 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-17 13:19 UTC (permalink / raw)
  To: Alexander H Duyck, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On 2024/4/17 0:22, Alexander H Duyck wrote:
> On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
>> The '(PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)' case is for the
>> system with page size less than 32KB, which is 0x8000 bytes
>> requiring 16 bits space, change 'size' to 'size_mask' to avoid
>> using the MSB, and change 'pfmemalloc' field to reuse the that
>> MSB, so that we remove the orginal space needed by 'pfmemalloc'.
>>
>> For another case, the MSB of 'offset' is reused for 'pfmemalloc'.
>>
>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
>> ---
>>  include/linux/page_frag_cache.h | 13 ++++++++-----
>>  mm/page_frag_cache.c            |  5 +++--
>>  2 files changed, 11 insertions(+), 7 deletions(-)
>>
>> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
>> index fe5faa80b6c3..40a7d6da9ef0 100644
>> --- a/include/linux/page_frag_cache.h
>> +++ b/include/linux/page_frag_cache.h
>> @@ -12,15 +12,16 @@ struct page_frag_cache {
>>  	void *va;
>>  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>>  	__u16 offset;
>> -	__u16 size;
>> +	__u16 size_mask:15;
>> +	__u16 pfmemalloc:1;
>>  #else
>> -	__u32 offset;
>> +	__u32 offset:31;
>> +	__u32 pfmemalloc:1;
>>  #endif
> 
> This seems like a really bad idea. Using a bit-field like this seems
> like a waste as it means that all the accesses now have to add
> additional operations to access either offset or size. It wasn't as if
> this is an oversized struct, or one that we are allocating a ton of. As
> such I am not sure why we need to optmize for size like this.

For the old 'struct page_frag' use case, there is one 'struct page_frag'
for every socket and task_struct, there may be tens of thousands of
them even in a 32 bit sysmem, which might mean a lof of memory even for
a single byte saving, see patch 13.

> 
>>  	/* we maintain a pagecount bias, so that we dont dirty cache line
>>  	 * containing page->_refcount every time we allocate a fragment.
>>  	 */
>>  	unsigned int		pagecnt_bias;
>> -	bool pfmemalloc;
>>  };


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 10/15] mm: page_frag: reuse existing bit field of 'va' for pagecnt_bias
  2024-04-16 16:33   ` Alexander H Duyck
@ 2024-04-17 13:23     ` Yunsheng Lin
  0 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-17 13:23 UTC (permalink / raw)
  To: Alexander H Duyck, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On 2024/4/17 0:33, Alexander H Duyck wrote:
> On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
>> As alignment of 'va' is always aligned with the order of the
>> page allocated, we can reuse the LSB bits for the pagecount
>> bias, and remove the orginal space needed by 'pagecnt_bias'.
>> Also limit the 'fragsz' to be at least the size of
>> 'usigned int' to match the limited pagecnt_bias.
>>
>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> 
> What is the point of this? You are trading off space for size on a data
> structure that is only something like 24B in size and only allocated a
> few times.

As we are going to replace page_frag with page_frag_cache in patch 13,
it is not going to only be allocated a few times as mentioned.

> 
>> ---
>>  include/linux/page_frag_cache.h | 20 +++++++----
>>  mm/page_frag_cache.c            | 63 +++++++++++++++++++--------------
>>  2 files changed, 50 insertions(+), 33 deletions(-)
>>
>> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
>> index 40a7d6da9ef0..a97a1ac017d6 100644
>> --- a/include/linux/page_frag_cache.h
>> +++ b/include/linux/page_frag_cache.h
>> @@ -9,7 +9,18 @@
>>  #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
>>  
>>  struct page_frag_cache {
>> -	void *va;
>> +	union {
>> +		void *va;
>> +		/* we maintain a pagecount bias, so that we dont dirty cache
>> +		 * line containing page->_refcount every time we allocate a
>> +		 * fragment. As 'va' is always aligned with the order of the
>> +		 * page allocated, we can reuse the LSB bits for the pagecount
>> +		 * bias, and its bit width happens to be indicated by the
>> +		 * 'size_mask' below.
>> +		 */
>> +		unsigned long pagecnt_bias;
>> +
>> +	};
> 
> Both va and pagecnt_bias are frequently accessed items. If pagecnt_bias
> somehow ends up exceeding the alignment of the page we run the risk of
> corrupting data or creating an page fault.
> 
> In my opinion this is not worth the risk especially since with the
> previous change your new change results in 0 size savings on 64b
> systems as the structure will be aligned to the size of the pointer.

But aren't we going to avoid a register usage and loading if reusing
the lower bits of 'va' for the 64b systems? And added benefit is the
memory saving for 32b systems as mentioned in previous patch.

> 
>>  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>>  	__u16 offset;
>>  	__u16 size_mask:15;
>> @@ -18,10 +29,6 @@ struct page_frag_cache {
>>  	__u32 offset:31;
>>  	__u32 pfmemalloc:1;
>>  #endif
>> -	/* we maintain a pagecount bias, so that we dont dirty cache line
>> -	 * containing page->_refcount every time we allocate a fragment.
>> -	 */
>> -	unsigned int		pagecnt_bias;
>>  };
>>  
>>  static inline void page_frag_cache_init(struct page_frag_cache *nc)
>> @@ -56,7 +63,8 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
>>  					     gfp_t gfp_mask,
>>  					     unsigned int align)
>>  {
>> -	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE);
>> +	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE ||
>> +		     fragsz < sizeof(unsigned int));
> 
> What is the reason for this change? Seems like it is to account for an
> issue somewhere.

If the fragsz is one, we might not have enough pagecnt_bias for it,
as we are using the lower bits of 'va' now.

> 
>>  
>>  	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-17 13:19     ` Yunsheng Lin
@ 2024-04-17 15:11       ` Alexander H Duyck
  2024-04-18  9:39         ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander H Duyck @ 2024-04-17 15:11 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On Wed, 2024-04-17 at 21:19 +0800, Yunsheng Lin wrote:
> On 2024/4/17 0:22, Alexander H Duyck wrote:
> > On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
> > > The '(PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)' case is for the
> > > system with page size less than 32KB, which is 0x8000 bytes
> > > requiring 16 bits space, change 'size' to 'size_mask' to avoid
> > > using the MSB, and change 'pfmemalloc' field to reuse the that
> > > MSB, so that we remove the orginal space needed by 'pfmemalloc'.
> > > 
> > > For another case, the MSB of 'offset' is reused for 'pfmemalloc'.
> > > 
> > > Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> > > ---
> > >  include/linux/page_frag_cache.h | 13 ++++++++-----
> > >  mm/page_frag_cache.c            |  5 +++--
> > >  2 files changed, 11 insertions(+), 7 deletions(-)
> > > 
> > > diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
> > > index fe5faa80b6c3..40a7d6da9ef0 100644
> > > --- a/include/linux/page_frag_cache.h
> > > +++ b/include/linux/page_frag_cache.h
> > > @@ -12,15 +12,16 @@ struct page_frag_cache {
> > >  	void *va;
> > >  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> > >  	__u16 offset;
> > > -	__u16 size;
> > > +	__u16 size_mask:15;
> > > +	__u16 pfmemalloc:1;
> > >  #else
> > > -	__u32 offset;
> > > +	__u32 offset:31;
> > > +	__u32 pfmemalloc:1;
> > >  #endif
> > 
> > This seems like a really bad idea. Using a bit-field like this seems
> > like a waste as it means that all the accesses now have to add
> > additional operations to access either offset or size. It wasn't as if
> > this is an oversized struct, or one that we are allocating a ton of. As
> > such I am not sure why we need to optmize for size like this.
> 
> For the old 'struct page_frag' use case, there is one 'struct page_frag'
> for every socket and task_struct, there may be tens of thousands of
> them even in a 32 bit sysmem, which might mean a lof of memory even for
> a single byte saving, see patch 13.
> 

Yeah, I finally had time to finish getting through the patch set last
night. Sorry for quick firing reviews but lately I haven't had much
time to work on upstream work, and as you mentioned last time I only
got to 3 patches so I was trying to speed through.

I get that you are trying to reduce the size but in the next patch you
actually end up overshooting that on x86_64 systems. I am assuming that
is to try to account for the 32b use case? On 64b I am pretty sure you
don't get any gain since a long followed by two u16s and an int will
still be 16B. What we probably need to watch out for is the
optimization for size versus having to add instructions to extract and
insert the data back into the struct.

Anyway as far as this layout I am not sure it is the best way to go.
You are combining pfmemalloc with either size *OR* offset, and then
combining the pagecnt_bias with the va. I'm wondering if it wouldn't
make more sense to look at putting together the structure something
like:

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
typedef u16 page_frag_bias_t;
#else
typedef u32 page_frag_bias_t;
#endif

struct page_frag_cache {
	/* page address and offset */
	void *va;
	page_frag_bias_t pagecnt_bias;
	u8 pfmemalloc;
	u8 page_frag_order;
}

The basic idea would be that we would be able to replace the size mask
with just a shift value representing the page order of the page being
fragmented. With that we can reduce the size to just a single byte. In
addition we could probably leave it there regardless of build as the
order should be initialized to 0 when this is allocated to it would be
correct even in the case where it isn't used (and there isn't much we
can do about the hole anyway).

In addition by combining the virtual address with the offset we can
just use the combined result for what we need. The only item that has
to be worked out is how to deal with the end of a page in the count up
case. However the combination seems the most logical one since they are
meant to be combined ultimately anyway. It does put limits on when we
can align things as we don't want to align ourselves into the next
page, but I think it makes more sense then the other limits that had to
be put on allocations and such in order to allow us to squeeze
pagecnt_bias into the virtual address.

Anyway I pulled in your patches and plan to do a bit of testing, after
I figure out what the nvme disk ID regression is I am seeing. My main
concern can be summed up as the NIC driver use case
(netdev/napi_alloc_frag callers) versus the socket/vhost use case. The
main thing in the case of the NIC driver callers is that we have a need
for isolation and guarantees that we won't lose cache line alignment. I
think those are the callers you are missing in your benchmarks, but
arguably that might be something you cannot test as I don't know what
NICs you have access to and if you have any that are using those calls.


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-17 15:11       ` Alexander H Duyck
@ 2024-04-18  9:39         ` Yunsheng Lin
  2024-04-26  9:38           ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-18  9:39 UTC (permalink / raw)
  To: Alexander H Duyck, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On 2024/4/17 23:11, Alexander H Duyck wrote:
> On Wed, 2024-04-17 at 21:19 +0800, Yunsheng Lin wrote:
>> On 2024/4/17 0:22, Alexander H Duyck wrote:
>>> On Mon, 2024-04-15 at 21:19 +0800, Yunsheng Lin wrote:
>>>> The '(PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)' case is for the
>>>> system with page size less than 32KB, which is 0x8000 bytes
>>>> requiring 16 bits space, change 'size' to 'size_mask' to avoid
>>>> using the MSB, and change 'pfmemalloc' field to reuse the that
>>>> MSB, so that we remove the orginal space needed by 'pfmemalloc'.
>>>>
>>>> For another case, the MSB of 'offset' is reused for 'pfmemalloc'.
>>>>
>>>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
>>>> ---
>>>>  include/linux/page_frag_cache.h | 13 ++++++++-----
>>>>  mm/page_frag_cache.c            |  5 +++--
>>>>  2 files changed, 11 insertions(+), 7 deletions(-)
>>>>
>>>> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
>>>> index fe5faa80b6c3..40a7d6da9ef0 100644
>>>> --- a/include/linux/page_frag_cache.h
>>>> +++ b/include/linux/page_frag_cache.h
>>>> @@ -12,15 +12,16 @@ struct page_frag_cache {
>>>>  	void *va;
>>>>  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>>>>  	__u16 offset;
>>>> -	__u16 size;
>>>> +	__u16 size_mask:15;
>>>> +	__u16 pfmemalloc:1;
>>>>  #else
>>>> -	__u32 offset;
>>>> +	__u32 offset:31;
>>>> +	__u32 pfmemalloc:1;
>>>>  #endif
>>>
>>> This seems like a really bad idea. Using a bit-field like this seems
>>> like a waste as it means that all the accesses now have to add
>>> additional operations to access either offset or size. It wasn't as if
>>> this is an oversized struct, or one that we are allocating a ton of. As
>>> such I am not sure why we need to optmize for size like this.
>>
>> For the old 'struct page_frag' use case, there is one 'struct page_frag'
>> for every socket and task_struct, there may be tens of thousands of
>> them even in a 32 bit sysmem, which might mean a lof of memory even for
>> a single byte saving, see patch 13.
>>
> 
> Yeah, I finally had time to finish getting through the patch set last
> night. Sorry for quick firing reviews but lately I haven't had much
> time to work on upstream work, and as you mentioned last time I only
> got to 3 patches so I was trying to speed through.
> 
> I get that you are trying to reduce the size but in the next patch you
> actually end up overshooting that on x86_64 systems. I am assuming that
> is to try to account for the 32b use case? On 64b I am pretty sure you
> don't get any gain since a long followed by two u16s and an int will
> still be 16B. What we probably need to watch out for is the
> optimization for size versus having to add instructions to extract and
> insert the data back into the struct.
> 
> Anyway as far as this layout I am not sure it is the best way to go.
> You are combining pfmemalloc with either size *OR* offset, and then

Does it really matter if pfmemalloc is conbined with size or offset?
as we are using bitfield for pfmemalloc.

> combining the pagecnt_bias with the va. I'm wondering if it wouldn't
> make more sense to look at putting together the structure something
> like:
> 
> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> typedef u16 page_frag_bias_t;
> #else
> typedef u32 page_frag_bias_t;
> #endif
> 
> struct page_frag_cache {
> 	/* page address and offset */
> 	void *va;

Generally I am agreed with combining the virtual address with the
offset for the reason you mentioned below.

> 	page_frag_bias_t pagecnt_bias;
> 	u8 pfmemalloc;
> 	u8 page_frag_order;
> }

The issue with the 'page_frag_order' I see is that we might need to do
a 'PAGE << page_frag_order' to get actual size, and we might also need
to do 'size - 1' to get the size_mask if we want to mask out the offset
from the 'va'.

For page_frag_order, we need to:
size = PAGE << page_frag_order
size_mask = size - 1

For size_mask, it seem we only need to do:
size = size_mask + 1

And as PAGE_FRAG_CACHE_MAX_SIZE = 32K, which can be fitted into 15 bits
if we use size_mask instead of size.

Does it make sense to use below, so that we only need to use bitfield
for SIZE < PAGE_FRAG_CACHE_MAX_SIZE in 32 bits system? And 'struct
page_frag' is using a similar '(BITS_PER_LONG > 32)' checking trick.

struct page_frag_cache {
	/* page address and offset */
	void *va;

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
	u16 pagecnt_bias;
	u16 size_mask:15;
	u16 pfmemalloc:1;
#else
	u32 pagecnt_bias;
	u16 size_mask;
	u16 pfmemalloc;
#endif
};

> 
> The basic idea would be that we would be able to replace the size mask
> with just a shift value representing the page order of the page being
> fragmented. With that we can reduce the size to just a single byte. In
> addition we could probably leave it there regardless of build as the
> order should be initialized to 0 when this is allocated to it would be
> correct even in the case where it isn't used (and there isn't much we
> can do about the hole anyway).
> 
> In addition by combining the virtual address with the offset we can
> just use the combined result for what we need. The only item that has
> to be worked out is how to deal with the end of a page in the count up
> case. However the combination seems the most logical one since they are
> meant to be combined ultimately anyway. It does put limits on when we
> can align things as we don't want to align ourselves into the next

I guess it means we need to mask out the offset 'va' before doing the
aligning operation and 'offset + fragsz > size' checking, right?

> page, but I think it makes more sense then the other limits that had to
> be put on allocations and such in order to allow us to squeeze
> pagecnt_bias into the virtual address.
> 
> Anyway I pulled in your patches and plan to do a bit of testing, after
> I figure out what the nvme disk ID regression is I am seeing. My main
> concern can be summed up as the NIC driver use case
> (netdev/napi_alloc_frag callers) versus the socket/vhost use case. The
> main thing in the case of the NIC driver callers is that we have a need
> for isolation and guarantees that we won't lose cache line alignment. I
> think those are the callers you are missing in your benchmarks, but
> arguably that might be something you cannot test as I don't know what
> NICs you have access to and if you have any that are using those calls.

I guess we just need to replace the API used by socket/vhost with the one
used by netdev/napi_alloc_frag callers in mm/page_frag_test.c in patch 1,
which is introduced to test performance of page_frag implementation, see:

https://lore.kernel.org/all/20240415131941.51153-2-linyunsheng@huawei.com/

> .
> 


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-18  9:39         ` Yunsheng Lin
@ 2024-04-26  9:38           ` Yunsheng Lin
  2024-04-29 14:49             ` Alexander Duyck
  0 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-26  9:38 UTC (permalink / raw)
  To: Alexander H Duyck, davem, kuba, pabeni
  Cc: netdev, linux-kernel, Andrew Morton, linux-mm

On 2024/4/18 17:39, Yunsheng Lin wrote:

...

> 
>> combining the pagecnt_bias with the va. I'm wondering if it wouldn't
>> make more sense to look at putting together the structure something
>> like:
>>
>> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>> typedef u16 page_frag_bias_t;
>> #else
>> typedef u32 page_frag_bias_t;
>> #endif
>>
>> struct page_frag_cache {
>> 	/* page address and offset */
>> 	void *va;
> 
> Generally I am agreed with combining the virtual address with the
> offset for the reason you mentioned below.
> 
>> 	page_frag_bias_t pagecnt_bias;
>> 	u8 pfmemalloc;
>> 	u8 page_frag_order;
>> }
> 
> The issue with the 'page_frag_order' I see is that we might need to do
> a 'PAGE << page_frag_order' to get actual size, and we might also need
> to do 'size - 1' to get the size_mask if we want to mask out the offset
> from the 'va'.
> 
> For page_frag_order, we need to:
> size = PAGE << page_frag_order
> size_mask = size - 1
> 
> For size_mask, it seem we only need to do:
> size = size_mask + 1
> 
> And as PAGE_FRAG_CACHE_MAX_SIZE = 32K, which can be fitted into 15 bits
> if we use size_mask instead of size.
> 
> Does it make sense to use below, so that we only need to use bitfield
> for SIZE < PAGE_FRAG_CACHE_MAX_SIZE in 32 bits system? And 'struct
> page_frag' is using a similar '(BITS_PER_LONG > 32)' checking trick.
> 
> struct page_frag_cache {
> 	/* page address and offset */
> 	void *va;
> 
> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
> 	u16 pagecnt_bias;
> 	u16 size_mask:15;
> 	u16 pfmemalloc:1;
> #else
> 	u32 pagecnt_bias;
> 	u16 size_mask;
> 	u16 pfmemalloc;
> #endif
> };
> 

After considering a few different layouts for 'struct page_frag_cache',
it seems the below is more optimized:

struct page_frag_cache {
	/* page address & pfmemalloc & order */
	void *va;

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
	u16 pagecnt_bias;
	u16 size;
#else
	u32 pagecnt_bias;
	u32 size;
#endif
}

The lower bits of 'va' is or'ed with the page order & pfmemalloc instead
of offset or pagecnt_bias, so that we don't have to add more checking
for handling the problem of not having enough space for offset or
pagecnt_bias for PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE and 32 bits system.
And page address & pfmemalloc & order is unchanged for the same page
in the same 'page_frag_cache' instance, it makes sense to fit them
together.

Also, it seems it is better to replace 'offset' with 'size', which indicates
the remaining size for the cache in a 'page_frag_cache' instance, and we
might be able to do a single 'size >= fragsz' checking for the case of cache
being enough, which should be the fast path if we ensure size is zoro when
'va' == NULL.

Something like below:

#define PAGE_FRAG_CACHE_ORDER_MASK	GENMASK(1, 0)
#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT	BIT(2)

struct page_frag_cache {
	/* page address & pfmemalloc & order */
	void *va;

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
	u16 pagecnt_bias;
	u16 size;
#else
	u32 pagecnt_bias;
	u32 size;
#endif
};


static void *__page_frag_cache_refill(struct page_frag_cache *nc,
				      unsigned int fragsz, gfp_t gfp_mask,
				      unsigned int align_mask)
{
	gfp_t gfp = gfp_mask;
	struct page *page;
	void *va;

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
	/* Ensure free_unref_page() can be used to free the page fragment */
	BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_ALLOC_COSTLY_ORDER);

	gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) |  __GFP_COMP |
		   __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
				PAGE_FRAG_CACHE_MAX_ORDER);
	if (likely(page)) {
		nc->size = PAGE_FRAG_CACHE_MAX_SIZE - fragsz;
		va = page_address(page);
		nc->va = (void *)((unsigned long)va |
				  PAGE_FRAG_CACHE_MAX_ORDER |
				  (page_is_pfmemalloc(page) ?
				   PAGE_FRAG_CACHE_PFMEMALLOC_BIT : 0));
		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE;
		return va;
	}
#endif
	page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
	if (likely(page)) {
		nc->size = PAGE_SIZE - fragsz;
		va = page_address(page);
		nc->va = (void *)((unsigned long)va |
				  (page_is_pfmemalloc(page) ?
				   PAGE_FRAG_CACHE_PFMEMALLOC_BIT : 0));
		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE;
		return va;
	}

	nc->va = NULL;
	nc->size = 0;
	return NULL;
}

void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
				 unsigned int fragsz, gfp_t gfp_mask,
				 unsigned int align_mask)
{
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
	unsigned long page_order;
#endif
	unsigned long page_size;
	unsigned long size;
	struct page *page;
	void *va;

	size = nc->size & align_mask;
	va = nc->va;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
	page_order = (unsigned long)va & PAGE_FRAG_CACHE_ORDER_MASK;
	page_size = PAGE_SIZE << page_order;
#else
	page_size = PAGE_SIZE;
#endif

	if (unlikely(fragsz > size)) {
		if (unlikely(!va))
			return __page_frag_cache_refill(nc, fragsz, gfp_mask,
							align_mask);

		/* fragsz is not supposed to be bigger than PAGE_SIZE as we are
		 * allowing order 3 page allocation to fail easily under low
		 * memory condition.
		 */
		if (WARN_ON_ONCE(fragsz > PAGE_SIZE))
			return NULL;

		page = virt_to_page(va);
		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
			return __page_frag_cache_refill(nc, fragsz, gfp_mask,
							align_mask);

		if (unlikely((unsigned long)va &
			     PAGE_FRAG_CACHE_PFMEMALLOC_BIT)) {
			free_unref_page(page, compound_order(page));
			return __page_frag_cache_refill(nc, fragsz, gfp_mask,
							align_mask);
		}

		/* OK, page count is 0, we can safely set it */
		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);

		/* reset page count bias and offset to start of new frag */
		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
		size = page_size;
	}

	va = (void *)((unsigned long)va & PAGE_MASK);
	va = va + (page_size - size);
	nc->size = size - fragsz;
	nc->pagecnt_bias--;

	return va;
}
EXPORT_SYMBOL(__page_frag_alloc_va_align);


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-26  9:38           ` Yunsheng Lin
@ 2024-04-29 14:49             ` Alexander Duyck
  2024-04-30 12:05               ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander Duyck @ 2024-04-29 14:49 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Andrew Morton, linux-mm

On Fri, Apr 26, 2024 at 2:38 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>
> On 2024/4/18 17:39, Yunsheng Lin wrote:
>
> ...
>
> >
> >> combining the pagecnt_bias with the va. I'm wondering if it wouldn't
> >> make more sense to look at putting together the structure something
> >> like:
> >>
> >> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> >> typedef u16 page_frag_bias_t;
> >> #else
> >> typedef u32 page_frag_bias_t;
> >> #endif
> >>
> >> struct page_frag_cache {
> >>      /* page address and offset */
> >>      void *va;
> >
> > Generally I am agreed with combining the virtual address with the
> > offset for the reason you mentioned below.
> >
> >>      page_frag_bias_t pagecnt_bias;
> >>      u8 pfmemalloc;
> >>      u8 page_frag_order;
> >> }
> >
> > The issue with the 'page_frag_order' I see is that we might need to do
> > a 'PAGE << page_frag_order' to get actual size, and we might also need
> > to do 'size - 1' to get the size_mask if we want to mask out the offset
> > from the 'va'.
> >
> > For page_frag_order, we need to:
> > size = PAGE << page_frag_order
> > size_mask = size - 1
> >
> > For size_mask, it seem we only need to do:
> > size = size_mask + 1
> >
> > And as PAGE_FRAG_CACHE_MAX_SIZE = 32K, which can be fitted into 15 bits
> > if we use size_mask instead of size.
> >
> > Does it make sense to use below, so that we only need to use bitfield
> > for SIZE < PAGE_FRAG_CACHE_MAX_SIZE in 32 bits system? And 'struct
> > page_frag' is using a similar '(BITS_PER_LONG > 32)' checking trick.
> >
> > struct page_frag_cache {
> >       /* page address and offset */
> >       void *va;
> >
> > #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
> >       u16 pagecnt_bias;
> >       u16 size_mask:15;
> >       u16 pfmemalloc:1;
> > #else
> >       u32 pagecnt_bias;
> >       u16 size_mask;
> >       u16 pfmemalloc;
> > #endif
> > };
> >
>
> After considering a few different layouts for 'struct page_frag_cache',
> it seems the below is more optimized:
>
> struct page_frag_cache {
>         /* page address & pfmemalloc & order */
>         void *va;

I see. So basically just pack the much smaller bitfields in here.

>
> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
>         u16 pagecnt_bias;
>         u16 size;
> #else
>         u32 pagecnt_bias;
>         u32 size;
> #endif
> }
>
> The lower bits of 'va' is or'ed with the page order & pfmemalloc instead
> of offset or pagecnt_bias, so that we don't have to add more checking
> for handling the problem of not having enough space for offset or
> pagecnt_bias for PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE and 32 bits system.
> And page address & pfmemalloc & order is unchanged for the same page
> in the same 'page_frag_cache' instance, it makes sense to fit them
> together.
>
> Also, it seems it is better to replace 'offset' with 'size', which indicates
> the remaining size for the cache in a 'page_frag_cache' instance, and we
> might be able to do a single 'size >= fragsz' checking for the case of cache
> being enough, which should be the fast path if we ensure size is zoro when
> 'va' == NULL.

I'm not sure the rename to size is called for as it is going to be
confusing. Maybe something like "remaining"?

> Something like below:
>
> #define PAGE_FRAG_CACHE_ORDER_MASK      GENMASK(1, 0)
> #define PAGE_FRAG_CACHE_PFMEMALLOC_BIT  BIT(2)

The only downside is that it is ossifying things so that we can only
ever do order 3 as the maximum cache size. It might be better to do a
full 8 bytes as on x86 it would just mean accessing the low end of a
16b register. Then you can have pfmemalloc at bit 8.

> struct page_frag_cache {
>         /* page address & pfmemalloc & order */
>         void *va;
>

When you start combining things like this we normally would convert va
to an unsigned long.

> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
>         u16 pagecnt_bias;
>         u16 size;
> #else
>         u32 pagecnt_bias;
>         u32 size;
> #endif
> };
>
>
> static void *__page_frag_cache_refill(struct page_frag_cache *nc,
>                                       unsigned int fragsz, gfp_t gfp_mask,
>                                       unsigned int align_mask)
> {
>         gfp_t gfp = gfp_mask;
>         struct page *page;
>         void *va;
>
> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>         /* Ensure free_unref_page() can be used to free the page fragment */
>         BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_ALLOC_COSTLY_ORDER);
>
>         gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) |  __GFP_COMP |
>                    __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
>         page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
>                                 PAGE_FRAG_CACHE_MAX_ORDER);
>         if (likely(page)) {
>                 nc->size = PAGE_FRAG_CACHE_MAX_SIZE - fragsz;

I wouldn't pass fragsz here. Ideally we keep this from having to get
pulled directly into the allocator and can instead treat this as a
pristine page. We can do the subtraction further down in the actual
frag alloc call.

>                 va = page_address(page);
>                 nc->va = (void *)((unsigned long)va |
>                                   PAGE_FRAG_CACHE_MAX_ORDER |
>                                   (page_is_pfmemalloc(page) ?
>                                    PAGE_FRAG_CACHE_PFMEMALLOC_BIT : 0));
>                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
>                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE;
>                 return va;
>         }
> #endif
>         page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
>         if (likely(page)) {
>                 nc->size = PAGE_SIZE - fragsz;
>                 va = page_address(page);
>                 nc->va = (void *)((unsigned long)va |
>                                   (page_is_pfmemalloc(page) ?
>                                    PAGE_FRAG_CACHE_PFMEMALLOC_BIT : 0));
>                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
>                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE;
>                 return va;
>         }
>
>         nc->va = NULL;
>         nc->size = 0;
>         return NULL;
> }
>
> void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
>                                  unsigned int fragsz, gfp_t gfp_mask,
>                                  unsigned int align_mask)
> {
> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>         unsigned long page_order;
> #endif
>         unsigned long page_size;
>         unsigned long size;
>         struct page *page;
>         void *va;
>
>         size = nc->size & align_mask;
>         va = nc->va;
> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>         page_order = (unsigned long)va & PAGE_FRAG_CACHE_ORDER_MASK;
>         page_size = PAGE_SIZE << page_order;
> #else
>         page_size = PAGE_SIZE;
> #endif

So I notice you got rid of the loops within the function. One of the
reasons for structuring it the way it was is to enable better code
caching. By unfolding the loops you are increasing the number of
instructions that have to be fetched and processed in order to
allocate the buffers.

>
>         if (unlikely(fragsz > size)) {
>                 if (unlikely(!va))
>                         return __page_frag_cache_refill(nc, fragsz, gfp_mask,
>                                                         align_mask);
>
>                 /* fragsz is not supposed to be bigger than PAGE_SIZE as we are
>                  * allowing order 3 page allocation to fail easily under low
>                  * memory condition.
>                  */
>                 if (WARN_ON_ONCE(fragsz > PAGE_SIZE))
>                         return NULL;
>
>                 page = virt_to_page(va);
>                 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
>                         return __page_frag_cache_refill(nc, fragsz, gfp_mask,
>                                                         align_mask);
>
>                 if (unlikely((unsigned long)va &
>                              PAGE_FRAG_CACHE_PFMEMALLOC_BIT)) {
>                         free_unref_page(page, compound_order(page));
>                         return __page_frag_cache_refill(nc, fragsz, gfp_mask,
>                                                         align_mask);
>                 }
>
>                 /* OK, page count is 0, we can safely set it */
>                 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
>
>                 /* reset page count bias and offset to start of new frag */
>                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
>                 size = page_size;
>         }
>
>         va = (void *)((unsigned long)va & PAGE_MASK);
>         va = va + (page_size - size);
>         nc->size = size - fragsz;
>         nc->pagecnt_bias--;
>
>         return va;
> }
> EXPORT_SYMBOL(__page_frag_alloc_va_align);
>


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-29 14:49             ` Alexander Duyck
@ 2024-04-30 12:05               ` Yunsheng Lin
  2024-04-30 14:54                 ` Alexander Duyck
  0 siblings, 1 reply; 32+ messages in thread
From: Yunsheng Lin @ 2024-04-30 12:05 UTC (permalink / raw)
  To: Alexander Duyck
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Andrew Morton, linux-mm

On 2024/4/29 22:49, Alexander Duyck wrote:

...

>>>
>>
>> After considering a few different layouts for 'struct page_frag_cache',
>> it seems the below is more optimized:
>>
>> struct page_frag_cache {
>>         /* page address & pfmemalloc & order */
>>         void *va;
> 
> I see. So basically just pack the much smaller bitfields in here.
> 
>>
>> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
>>         u16 pagecnt_bias;
>>         u16 size;
>> #else
>>         u32 pagecnt_bias;
>>         u32 size;
>> #endif
>> }
>>
>> The lower bits of 'va' is or'ed with the page order & pfmemalloc instead
>> of offset or pagecnt_bias, so that we don't have to add more checking
>> for handling the problem of not having enough space for offset or
>> pagecnt_bias for PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE and 32 bits system.
>> And page address & pfmemalloc & order is unchanged for the same page
>> in the same 'page_frag_cache' instance, it makes sense to fit them
>> together.
>>
>> Also, it seems it is better to replace 'offset' with 'size', which indicates
>> the remaining size for the cache in a 'page_frag_cache' instance, and we
>> might be able to do a single 'size >= fragsz' checking for the case of cache
>> being enough, which should be the fast path if we ensure size is zoro when
>> 'va' == NULL.
> 
> I'm not sure the rename to size is called for as it is going to be
> confusing. Maybe something like "remaining"?

Yes, using 'size' for that is a bit confusing.
Beside the above 'remaining', by googling, it seems we may have below
options too:
'residual','unused', 'surplus'

> 
>> Something like below:
>>
>> #define PAGE_FRAG_CACHE_ORDER_MASK      GENMASK(1, 0)
>> #define PAGE_FRAG_CACHE_PFMEMALLOC_BIT  BIT(2)
> 
> The only downside is that it is ossifying things so that we can only

There is 12 bits that is always useful, we can always extend ORDER_MASK
to more bits if lager order number is needed.

> ever do order 3 as the maximum cache size. It might be better to do a
> full 8 bytes as on x86 it would just mean accessing the low end of a
> 16b register. Then you can have pfmemalloc at bit 8.

I am not sure I understand the above as it seems we may have below option:
1. Use somthing like the above ORDER_MASK and PFMEMALLOC_BIT.
2. Use bitfield as something like below:

unsigned long va:20;---or 52 for 64bit system
unsigned long pfmemalloc:1
unsigned long order:11;

Or is there a better idea in your mind?

> 
>> struct page_frag_cache {
>>         /* page address & pfmemalloc & order */
>>         void *va;
>>
> 
> When you start combining things like this we normally would convert va
> to an unsigned long.

Ack.

> 
>> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
>>         u16 pagecnt_bias;
>>         u16 size;
>> #else
>>         u32 pagecnt_bias;
>>         u32 size;
>> #endif
>> };
>>
>>
>> static void *__page_frag_cache_refill(struct page_frag_cache *nc,
>>                                       unsigned int fragsz, gfp_t gfp_mask,
>>                                       unsigned int align_mask)
>> {
>>         gfp_t gfp = gfp_mask;
>>         struct page *page;
>>         void *va;
>>
>> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>>         /* Ensure free_unref_page() can be used to free the page fragment */
>>         BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_ALLOC_COSTLY_ORDER);
>>
>>         gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) |  __GFP_COMP |
>>                    __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
>>         page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
>>                                 PAGE_FRAG_CACHE_MAX_ORDER);
>>         if (likely(page)) {
>>                 nc->size = PAGE_FRAG_CACHE_MAX_SIZE - fragsz;
> 
> I wouldn't pass fragsz here. Ideally we keep this from having to get
> pulled directly into the allocator and can instead treat this as a
> pristine page. We can do the subtraction further down in the actual
> frag alloc call.

Yes for the maintanability point of view.
But for performance point of view, doesn't it make sense to do the
subtraction here, as doing the subtraction in the actual frag alloc
call may involve more load/store operation to do the subtraction?

> 
>>                 va = page_address(page);
>>                 nc->va = (void *)((unsigned long)va |
>>                                   PAGE_FRAG_CACHE_MAX_ORDER |
>>                                   (page_is_pfmemalloc(page) ?
>>                                    PAGE_FRAG_CACHE_PFMEMALLOC_BIT : 0));
>>                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
>>                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE;
>>                 return va;
>>         }
>> #endif
>>         page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
>>         if (likely(page)) {
>>                 nc->size = PAGE_SIZE - fragsz;
>>                 va = page_address(page);
>>                 nc->va = (void *)((unsigned long)va |
>>                                   (page_is_pfmemalloc(page) ?
>>                                    PAGE_FRAG_CACHE_PFMEMALLOC_BIT : 0));
>>                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
>>                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE;
>>                 return va;
>>         }
>>
>>         nc->va = NULL;
>>         nc->size = 0;
>>         return NULL;
>> }
>>
>> void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
>>                                  unsigned int fragsz, gfp_t gfp_mask,
>>                                  unsigned int align_mask)
>> {
>> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>>         unsigned long page_order;
>> #endif
>>         unsigned long page_size;
>>         unsigned long size;
>>         struct page *page;
>>         void *va;
>>
>>         size = nc->size & align_mask;
>>         va = nc->va;
>> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>>         page_order = (unsigned long)va & PAGE_FRAG_CACHE_ORDER_MASK;
>>         page_size = PAGE_SIZE << page_order;
>> #else
>>         page_size = PAGE_SIZE;
>> #endif
> 
> So I notice you got rid of the loops within the function. One of the
> reasons for structuring it the way it was is to enable better code
> caching. By unfolding the loops you are increasing the number of
> instructions that have to be fetched and processed in order to
> allocate the buffers.

I am not sure I understand what does 'the loops' means here, as there
is not 'while' or 'for' here. I suppose you are referring to the 'goto'
here?

> 


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-30 12:05               ` Yunsheng Lin
@ 2024-04-30 14:54                 ` Alexander Duyck
  2024-05-06 12:33                   ` Yunsheng Lin
  0 siblings, 1 reply; 32+ messages in thread
From: Alexander Duyck @ 2024-04-30 14:54 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Andrew Morton, linux-mm

On Tue, Apr 30, 2024 at 5:06 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>
> On 2024/4/29 22:49, Alexander Duyck wrote:
>
> ...
>
> >>>
> >>
> >> After considering a few different layouts for 'struct page_frag_cache',
> >> it seems the below is more optimized:
> >>
> >> struct page_frag_cache {
> >>         /* page address & pfmemalloc & order */
> >>         void *va;
> >
> > I see. So basically just pack the much smaller bitfields in here.
> >
> >>
> >> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
> >>         u16 pagecnt_bias;
> >>         u16 size;
> >> #else
> >>         u32 pagecnt_bias;
> >>         u32 size;
> >> #endif
> >> }
> >>
> >> The lower bits of 'va' is or'ed with the page order & pfmemalloc instead
> >> of offset or pagecnt_bias, so that we don't have to add more checking
> >> for handling the problem of not having enough space for offset or
> >> pagecnt_bias for PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE and 32 bits system.
> >> And page address & pfmemalloc & order is unchanged for the same page
> >> in the same 'page_frag_cache' instance, it makes sense to fit them
> >> together.
> >>
> >> Also, it seems it is better to replace 'offset' with 'size', which indicates
> >> the remaining size for the cache in a 'page_frag_cache' instance, and we
> >> might be able to do a single 'size >= fragsz' checking for the case of cache
> >> being enough, which should be the fast path if we ensure size is zoro when
> >> 'va' == NULL.
> >
> > I'm not sure the rename to size is called for as it is going to be
> > confusing. Maybe something like "remaining"?
>
> Yes, using 'size' for that is a bit confusing.
> Beside the above 'remaining', by googling, it seems we may have below
> options too:
> 'residual','unused', 'surplus'
>
> >
> >> Something like below:
> >>
> >> #define PAGE_FRAG_CACHE_ORDER_MASK      GENMASK(1, 0)
> >> #define PAGE_FRAG_CACHE_PFMEMALLOC_BIT  BIT(2)
> >
> > The only downside is that it is ossifying things so that we can only
>
> There is 12 bits that is always useful, we can always extend ORDER_MASK
> to more bits if lager order number is needed.
>
> > ever do order 3 as the maximum cache size. It might be better to do a
> > full 8 bytes as on x86 it would just mean accessing the low end of a
> > 16b register. Then you can have pfmemalloc at bit 8.
>
> I am not sure I understand the above as it seems we may have below option:
> 1. Use somthing like the above ORDER_MASK and PFMEMALLOC_BIT.
> 2. Use bitfield as something like below:
>
> unsigned long va:20;---or 52 for 64bit system
> unsigned long pfmemalloc:1
> unsigned long order:11;
>
> Or is there a better idea in your mind?

All I was suggesting was to make the ORDER_MASK 8 bits. Doing that the
compiler can just store VA in a register such as RCX and instead of
having to bother with a mask it could then just use CL to access the
order. As far as the flag bits such as pfmemalloc the 4 bits starting
at 8 would make the most sense since it doesn't naturally align to
anything and would have to be masked anyway.

Using a bitfield like you suggest would be problematic as the compiler
would assume a shift is needed so you would have to add a shift to
your code to offset it for accessing VA.

> >
> >> struct page_frag_cache {
> >>         /* page address & pfmemalloc & order */
> >>         void *va;
> >>
> >
> > When you start combining things like this we normally would convert va
> > to an unsigned long.
>
> Ack.
>
> >
> >> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
> >>         u16 pagecnt_bias;
> >>         u16 size;
> >> #else
> >>         u32 pagecnt_bias;
> >>         u32 size;
> >> #endif
> >> };
> >>
> >>
> >> static void *__page_frag_cache_refill(struct page_frag_cache *nc,
> >>                                       unsigned int fragsz, gfp_t gfp_mask,
> >>                                       unsigned int align_mask)
> >> {
> >>         gfp_t gfp = gfp_mask;
> >>         struct page *page;
> >>         void *va;
> >>
> >> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> >>         /* Ensure free_unref_page() can be used to free the page fragment */
> >>         BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_ALLOC_COSTLY_ORDER);
> >>
> >>         gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) |  __GFP_COMP |
> >>                    __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
> >>         page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
> >>                                 PAGE_FRAG_CACHE_MAX_ORDER);
> >>         if (likely(page)) {
> >>                 nc->size = PAGE_FRAG_CACHE_MAX_SIZE - fragsz;
> >
> > I wouldn't pass fragsz here. Ideally we keep this from having to get
> > pulled directly into the allocator and can instead treat this as a
> > pristine page. We can do the subtraction further down in the actual
> > frag alloc call.
>
> Yes for the maintanability point of view.
> But for performance point of view, doesn't it make sense to do the
> subtraction here, as doing the subtraction in the actual frag alloc
> call may involve more load/store operation to do the subtraction?

It just means more code paths doing different things. It doesn't add
much here since what you are doing is juggling more variables in this
function as a result of this.

> >
> >>                 va = page_address(page);
> >>                 nc->va = (void *)((unsigned long)va |
> >>                                   PAGE_FRAG_CACHE_MAX_ORDER |
> >>                                   (page_is_pfmemalloc(page) ?
> >>                                    PAGE_FRAG_CACHE_PFMEMALLOC_BIT : 0));
> >>                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
> >>                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE;
> >>                 return va;
> >>         }
> >> #endif
> >>         page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
> >>         if (likely(page)) {
> >>                 nc->size = PAGE_SIZE - fragsz;
> >>                 va = page_address(page);
> >>                 nc->va = (void *)((unsigned long)va |
> >>                                   (page_is_pfmemalloc(page) ?
> >>                                    PAGE_FRAG_CACHE_PFMEMALLOC_BIT : 0));
> >>                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
> >>                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE;
> >>                 return va;
> >>         }
> >>
> >>         nc->va = NULL;
> >>         nc->size = 0;
> >>         return NULL;
> >> }
> >>
> >> void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
> >>                                  unsigned int fragsz, gfp_t gfp_mask,
> >>                                  unsigned int align_mask)
> >> {
> >> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> >>         unsigned long page_order;
> >> #endif
> >>         unsigned long page_size;
> >>         unsigned long size;
> >>         struct page *page;
> >>         void *va;
> >>
> >>         size = nc->size & align_mask;
> >>         va = nc->va;
> >> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> >>         page_order = (unsigned long)va & PAGE_FRAG_CACHE_ORDER_MASK;
> >>         page_size = PAGE_SIZE << page_order;
> >> #else
> >>         page_size = PAGE_SIZE;
> >> #endif
> >
> > So I notice you got rid of the loops within the function. One of the
> > reasons for structuring it the way it was is to enable better code
> > caching. By unfolding the loops you are increasing the number of
> > instructions that have to be fetched and processed in order to
> > allocate the buffers.
>
> I am not sure I understand what does 'the loops' means here, as there
> is not 'while' or 'for' here. I suppose you are referring to the 'goto'
> here?

So there was logic before that would jump to a label back at the start
of the function. It seems like you got rid of that logic and just
flattened everything out. This is likely resulting in some duplication
in the code and overall an increase in the number of instructions that
need to be fetched to allocate the fragment. As I recall one of the
reasons things were folded up the way they were was to allow it to use
a short jump instruction instead of a longer one. I suspect we may be
losing that with these changes.


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc
  2024-04-30 14:54                 ` Alexander Duyck
@ 2024-05-06 12:33                   ` Yunsheng Lin
  0 siblings, 0 replies; 32+ messages in thread
From: Yunsheng Lin @ 2024-05-06 12:33 UTC (permalink / raw)
  To: Alexander Duyck
  Cc: davem, kuba, pabeni, netdev, linux-kernel, Andrew Morton, linux-mm

On 2024/4/30 22:54, Alexander Duyck wrote:
> On Tue, Apr 30, 2024 at 5:06 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>>
>> On 2024/4/29 22:49, Alexander Duyck wrote:
>>
>> ...
>>
>>>>>
>>>>
>>>> After considering a few different layouts for 'struct page_frag_cache',
>>>> it seems the below is more optimized:
>>>>
>>>> struct page_frag_cache {
>>>>         /* page address & pfmemalloc & order */
>>>>         void *va;
>>>
>>> I see. So basically just pack the much smaller bitfields in here.
>>>
>>>>
>>>> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
>>>>         u16 pagecnt_bias;
>>>>         u16 size;
>>>> #else
>>>>         u32 pagecnt_bias;
>>>>         u32 size;
>>>> #endif
>>>> }
>>>>
>>>> The lower bits of 'va' is or'ed with the page order & pfmemalloc instead
>>>> of offset or pagecnt_bias, so that we don't have to add more checking
>>>> for handling the problem of not having enough space for offset or
>>>> pagecnt_bias for PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE and 32 bits system.
>>>> And page address & pfmemalloc & order is unchanged for the same page
>>>> in the same 'page_frag_cache' instance, it makes sense to fit them
>>>> together.
>>>>
>>>> Also, it seems it is better to replace 'offset' with 'size', which indicates
>>>> the remaining size for the cache in a 'page_frag_cache' instance, and we
>>>> might be able to do a single 'size >= fragsz' checking for the case of cache
>>>> being enough, which should be the fast path if we ensure size is zoro when
>>>> 'va' == NULL.
>>>
>>> I'm not sure the rename to size is called for as it is going to be
>>> confusing. Maybe something like "remaining"?
>>
>> Yes, using 'size' for that is a bit confusing.
>> Beside the above 'remaining', by googling, it seems we may have below
>> options too:
>> 'residual','unused', 'surplus'
>>
>>>
>>>> Something like below:
>>>>
>>>> #define PAGE_FRAG_CACHE_ORDER_MASK      GENMASK(1, 0)
>>>> #define PAGE_FRAG_CACHE_PFMEMALLOC_BIT  BIT(2)
>>>
>>> The only downside is that it is ossifying things so that we can only
>>
>> There is 12 bits that is always useful, we can always extend ORDER_MASK
>> to more bits if lager order number is needed.
>>
>>> ever do order 3 as the maximum cache size. It might be better to do a
>>> full 8 bytes as on x86 it would just mean accessing the low end of a
>>> 16b register. Then you can have pfmemalloc at bit 8.
>>
>> I am not sure I understand the above as it seems we may have below option:
>> 1. Use somthing like the above ORDER_MASK and PFMEMALLOC_BIT.
>> 2. Use bitfield as something like below:
>>
>> unsigned long va:20;---or 52 for 64bit system
>> unsigned long pfmemalloc:1
>> unsigned long order:11;
>>
>> Or is there a better idea in your mind?
> 
> All I was suggesting was to make the ORDER_MASK 8 bits. Doing that the
> compiler can just store VA in a register such as RCX and instead of
> having to bother with a mask it could then just use CL to access the
> order. As far as the flag bits such as pfmemalloc the 4 bits starting
> at 8 would make the most sense since it doesn't naturally align to
> anything and would have to be masked anyway.

Ok.

> 
> Using a bitfield like you suggest would be problematic as the compiler
> would assume a shift is needed so you would have to add a shift to
> your code to offset it for accessing VA.
> 
>>>
>>>> struct page_frag_cache {
>>>>         /* page address & pfmemalloc & order */
>>>>         void *va;
>>>>
>>>
>>> When you start combining things like this we normally would convert va
>>> to an unsigned long.
>>
>> Ack.

It seems it makes more sense to convert va to something like 'struct encoded_va'
mirroring 'struct encoded_page' in below:

https://elixir.bootlin.com/linux/v6.7-rc8/source/include/linux/mm_types.h#L222

>>
>>>
>>>> #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
>>>>         u16 pagecnt_bias;
>>>>         u16 size;
>>>> #else
>>>>         u32 pagecnt_bias;
>>>>         u32 size;
>>>> #endif
>>>> };
>>>>
>>>>



^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2024-05-06 12:34 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20240415131941.51153-1-linyunsheng@huawei.com>
2024-04-15 13:19 ` [PATCH net-next v2 01/15] mm: page_frag: add a test module for page_frag Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 03/15] mm: page_frag: use free_unref_page() to free page fragment Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 04/15] mm: move the page fragment allocator from page_alloc into its own file Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 05/15] mm: page_frag: use initial zero offset for page_frag_alloc_align() Yunsheng Lin
2024-04-15 23:55   ` Alexander H Duyck
2024-04-16 13:11     ` Yunsheng Lin
2024-04-16 15:51       ` Alexander H Duyck
2024-04-17 13:17         ` Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 06/15] mm: page_frag: change page_frag_alloc_* API to accept align param Yunsheng Lin
2024-04-16 16:08   ` Alexander Duyck
2024-04-17 13:18     ` Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 07/15] mm: page_frag: add '_va' suffix to page_frag API Yunsheng Lin
2024-04-16 16:12   ` Alexander H Duyck
2024-04-17 13:18     ` Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 08/15] mm: page_frag: add two inline helper for " Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 09/15] mm: page_frag: reuse MSB of 'size' field for pfmemalloc Yunsheng Lin
2024-04-16 16:22   ` Alexander H Duyck
2024-04-17 13:19     ` Yunsheng Lin
2024-04-17 15:11       ` Alexander H Duyck
2024-04-18  9:39         ` Yunsheng Lin
2024-04-26  9:38           ` Yunsheng Lin
2024-04-29 14:49             ` Alexander Duyck
2024-04-30 12:05               ` Yunsheng Lin
2024-04-30 14:54                 ` Alexander Duyck
2024-05-06 12:33                   ` Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 10/15] mm: page_frag: reuse existing bit field of 'va' for pagecnt_bias Yunsheng Lin
2024-04-16 16:33   ` Alexander H Duyck
2024-04-17 13:23     ` Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 12/15] mm: page_frag: introduce prepare/commit API for page_frag Yunsheng Lin
2024-04-15 13:19 ` [PATCH net-next v2 14/15] mm: page_frag: update documentation " Yunsheng Lin
2024-04-16  6:13   ` Bagas Sanjaya
2024-04-16 13:11     ` Yunsheng Lin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).