All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: kent.overstreet@linux.dev, mhocko@suse.com, vbabka@suse.cz,
	 hannes@cmpxchg.org, roman.gushchin@linux.dev, mgorman@suse.de,
	 dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
	 corbet@lwn.net, void@manifault.com, peterz@infradead.org,
	 juri.lelli@redhat.com, catalin.marinas@arm.com, will@kernel.org,
	 arnd@arndb.de, tglx@linutronix.de, mingo@redhat.com,
	 dave.hansen@linux.intel.com, x86@kernel.org, peterx@redhat.com,
	 david@redhat.com, axboe@kernel.dk, mcgrof@kernel.org,
	masahiroy@kernel.org,  nathan@kernel.org, dennis@kernel.org,
	tj@kernel.org, muchun.song@linux.dev,  rppt@kernel.org,
	paulmck@kernel.org, pasha.tatashin@soleen.com,
	 yosryahmed@google.com, yuzhao@google.com, dhowells@redhat.com,
	 hughd@google.com, andreyknvl@gmail.com, keescook@chromium.org,
	 ndesaulniers@google.com, vvvvvv@google.com,
	gregkh@linuxfoundation.org,  ebiggers@google.com,
	ytcoode@gmail.com, vincent.guittot@linaro.org,
	 dietmar.eggemann@arm.com, rostedt@goodmis.org,
	bsegall@google.com,  bristot@redhat.com, vschneid@redhat.com,
	cl@linux.com, penberg@kernel.org,  iamjoonsoo.kim@lge.com,
	42.hyeyoo@gmail.com, glider@google.com,  elver@google.com,
	dvyukov@google.com, shakeelb@google.com,
	 songmuchun@bytedance.com, jbaron@akamai.com,
	rientjes@google.com,  minchan@google.com, kaleshsingh@google.com,
	surenb@google.com,  kernel-team@android.com,
	linux-doc@vger.kernel.org,  linux-kernel@vger.kernel.org,
	iommu@lists.linux.dev,  linux-arch@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	 linux-modules@vger.kernel.org, kasan-dev@googlegroups.com,
	 cgroups@vger.kernel.org
Subject: [PATCH v3 29/35] mm: vmalloc: Enable memory allocation profiling
Date: Mon, 12 Feb 2024 13:39:15 -0800	[thread overview]
Message-ID: <20240212213922.783301-30-surenb@google.com> (raw)
In-Reply-To: <20240212213922.783301-1-surenb@google.com>

From: Kent Overstreet <kent.overstreet@linux.dev>

This wrapps all external vmalloc allocation functions with the
alloc_hooks() wrapper, and switches internal allocations to _noprof
variants where appropriate, for the new memory allocation profiling
feature.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 drivers/staging/media/atomisp/pci/hmm/hmm.c |  2 +-
 include/linux/vmalloc.h                     | 60 ++++++++++----
 kernel/kallsyms_selftest.c                  |  2 +-
 mm/util.c                                   | 24 +++---
 mm/vmalloc.c                                | 88 ++++++++++-----------
 5 files changed, 103 insertions(+), 73 deletions(-)

diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index bb12644fd033..3e2899ad8517 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -205,7 +205,7 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
 	}
 
 	dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n",
-		bo->start, bytes, type, vmalloc);
+		bo->start, bytes, type, vmalloc_noprof);
 
 	return bo->start;
 
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index c720be70c8dd..106d78e75606 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -2,6 +2,8 @@
 #ifndef _LINUX_VMALLOC_H
 #define _LINUX_VMALLOC_H
 
+#include <linux/alloc_tag.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
 #include <linux/list.h>
@@ -137,26 +139,54 @@ extern unsigned long vmalloc_nr_pages(void);
 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
 #endif
 
-extern void *vmalloc(unsigned long size) __alloc_size(1);
-extern void *vzalloc(unsigned long size) __alloc_size(1);
-extern void *vmalloc_user(unsigned long size) __alloc_size(1);
-extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
-extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
-extern void *vmalloc_32(unsigned long size) __alloc_size(1);
-extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
-extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
+#define vmalloc(...)		alloc_hooks(vmalloc_noprof(__VA_ARGS__))
+
+extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
+#define vzalloc(...)		alloc_hooks(vzalloc_noprof(__VA_ARGS__))
+
+extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
+#define vmalloc_user(...)	alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
+
+extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
+#define vmalloc_node(...)	alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
+
+extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
+#define vzalloc_node(...)	alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
+
+extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
+#define vmalloc_32(...)		alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
+
+extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
+#define vmalloc_32_user(...)	alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
+
+extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+#define __vmalloc(...)		alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
+
+extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 			unsigned long start, unsigned long end, gfp_t gfp_mask,
 			pgprot_t prot, unsigned long vm_flags, int node,
 			const void *caller) __alloc_size(1);
-void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
+#define __vmalloc_node_range(...)	alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
+
+void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
 		int node, const void *caller) __alloc_size(1);
-void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+#define __vmalloc_node(...)	alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
+
+void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+#define vmalloc_huge(...)	alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
+
+extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
+#define __vmalloc_array(...)	alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
+
+extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
+#define vmalloc_array(...)	alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
+
+extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
+#define __vcalloc(...)		alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
 
-extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
-extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
-extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
-extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
+extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
+#define vcalloc(...)		alloc_hooks(vcalloc_noprof(__VA_ARGS__))
 
 extern void vfree(const void *addr);
 extern void vfree_atomic(const void *addr);
diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c
index b4cac76ea5e9..3ea9be364e32 100644
--- a/kernel/kallsyms_selftest.c
+++ b/kernel/kallsyms_selftest.c
@@ -82,7 +82,7 @@ static struct test_item test_items[] = {
 	ITEM_FUNC(kallsyms_test_func_static),
 	ITEM_FUNC(kallsyms_test_func),
 	ITEM_FUNC(kallsyms_test_func_weak),
-	ITEM_FUNC(vmalloc),
+	ITEM_FUNC(vmalloc_noprof),
 	ITEM_FUNC(vfree),
 #ifdef CONFIG_KALLSYMS_ALL
 	ITEM_DATA(kallsyms_test_var_bss_static),
diff --git a/mm/util.c b/mm/util.c
index 291f7945190f..19c90036d3cc 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -639,7 +639,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)
 	 * about the resulting pointer, and cannot play
 	 * protection games.
 	 */
-	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 			node, __builtin_return_address(0));
 }
@@ -698,12 +698,12 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
 EXPORT_SYMBOL(kvrealloc_noprof);
 
 /**
- * __vmalloc_array - allocate memory for a virtually contiguous array.
+ * __vmalloc_array_noprof - allocate memory for a virtually contiguous array.
  * @n: number of elements.
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
+void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
 {
 	size_t bytes;
 
@@ -711,18 +711,18 @@ void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
 		return NULL;
 	return __vmalloc(bytes, flags);
 }
-EXPORT_SYMBOL(__vmalloc_array);
+EXPORT_SYMBOL(__vmalloc_array_noprof);
 
 /**
- * vmalloc_array - allocate memory for a virtually contiguous array.
+ * vmalloc_array_noprof - allocate memory for a virtually contiguous array.
  * @n: number of elements.
  * @size: element size.
  */
-void *vmalloc_array(size_t n, size_t size)
+void *vmalloc_array_noprof(size_t n, size_t size)
 {
 	return __vmalloc_array(n, size, GFP_KERNEL);
 }
-EXPORT_SYMBOL(vmalloc_array);
+EXPORT_SYMBOL(vmalloc_array_noprof);
 
 /**
  * __vcalloc - allocate and zero memory for a virtually contiguous array.
@@ -730,22 +730,22 @@ EXPORT_SYMBOL(vmalloc_array);
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-void *__vcalloc(size_t n, size_t size, gfp_t flags)
+void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
 {
 	return __vmalloc_array(n, size, flags | __GFP_ZERO);
 }
-EXPORT_SYMBOL(__vcalloc);
+EXPORT_SYMBOL(__vcalloc_noprof);
 
 /**
- * vcalloc - allocate and zero memory for a virtually contiguous array.
+ * vcalloc_noprof - allocate and zero memory for a virtually contiguous array.
  * @n: number of elements.
  * @size: element size.
  */
-void *vcalloc(size_t n, size_t size)
+void *vcalloc_noprof(size_t n, size_t size)
 {
 	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
 }
-EXPORT_SYMBOL(vcalloc);
+EXPORT_SYMBOL(vcalloc_noprof);
 
 struct anon_vma *folio_anon_vma(struct folio *folio)
 {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d12a17fc0c17..5239f2c9ecae 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3025,12 +3025,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
 			 * but mempolicy wants to alloc memory by interleaving.
 			 */
 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
-				nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
+				nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
 							nr_pages_request,
 							pages + nr_allocated);
 
 			else
-				nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
+				nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
 							nr_pages_request,
 							pages + nr_allocated);
 
@@ -3060,9 +3060,9 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
 			break;
 
 		if (nid == NUMA_NO_NODE)
-			page = alloc_pages(alloc_gfp, order);
+			page = alloc_pages_noprof(alloc_gfp, order);
 		else
-			page = alloc_pages_node(nid, alloc_gfp, order);
+			page = alloc_pages_node_noprof(nid, alloc_gfp, order);
 		if (unlikely(!page)) {
 			if (!nofail)
 				break;
@@ -3119,10 +3119,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 
 	/* Please note that the recursion is strictly bounded. */
 	if (array_size > PAGE_SIZE) {
-		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
+		area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
 					area->caller);
 	} else {
-		area->pages = kmalloc_node(array_size, nested_gfp, node);
+		area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
 	}
 
 	if (!area->pages) {
@@ -3205,7 +3205,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 }
 
 /**
- * __vmalloc_node_range - allocate virtually contiguous memory
+ * __vmalloc_node_range_noprof - allocate virtually contiguous memory
  * @size:		  allocation size
  * @align:		  desired alignment
  * @start:		  vm area range start
@@ -3232,7 +3232,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
  *
  * Return: the address of the area or %NULL on failure
  */
-void *__vmalloc_node_range(unsigned long size, unsigned long align,
+void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 			unsigned long start, unsigned long end, gfp_t gfp_mask,
 			pgprot_t prot, unsigned long vm_flags, int node,
 			const void *caller)
@@ -3361,7 +3361,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
 }
 
 /**
- * __vmalloc_node - allocate virtually contiguous memory
+ * __vmalloc_node_noprof - allocate virtually contiguous memory
  * @size:	    allocation size
  * @align:	    desired alignment
  * @gfp_mask:	    flags for the page level allocator
@@ -3379,10 +3379,10 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *__vmalloc_node(unsigned long size, unsigned long align,
+void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
 			    gfp_t gfp_mask, int node, const void *caller)
 {
-	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
 				gfp_mask, PAGE_KERNEL, 0, node, caller);
 }
 /*
@@ -3391,15 +3391,15 @@ void *__vmalloc_node(unsigned long size, unsigned long align,
  * than that.
  */
 #ifdef CONFIG_TEST_VMALLOC_MODULE
-EXPORT_SYMBOL_GPL(__vmalloc_node);
+EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
 #endif
 
-void *__vmalloc(unsigned long size, gfp_t gfp_mask)
+void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
 {
-	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
+	return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
-EXPORT_SYMBOL(__vmalloc);
+EXPORT_SYMBOL(__vmalloc_noprof);
 
 /**
  * vmalloc - allocate virtually contiguous memory
@@ -3413,12 +3413,12 @@ EXPORT_SYMBOL(__vmalloc);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc(unsigned long size)
+void *vmalloc_noprof(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
+	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc);
+EXPORT_SYMBOL(vmalloc_noprof);
 
 /**
  * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
@@ -3432,16 +3432,16 @@ EXPORT_SYMBOL(vmalloc);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
+void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask)
 {
-	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
 				    gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 				    NUMA_NO_NODE, __builtin_return_address(0));
 }
-EXPORT_SYMBOL_GPL(vmalloc_huge);
+EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
 
 /**
- * vzalloc - allocate virtually contiguous memory with zero fill
+ * vzalloc_noprof - allocate virtually contiguous memory with zero fill
  * @size:    allocation size
  *
  * Allocate enough pages to cover @size from the page level
@@ -3453,12 +3453,12 @@ EXPORT_SYMBOL_GPL(vmalloc_huge);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vzalloc(unsigned long size)
+void *vzalloc_noprof(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
+	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vzalloc);
+EXPORT_SYMBOL(vzalloc_noprof);
 
 /**
  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
@@ -3469,17 +3469,17 @@ EXPORT_SYMBOL(vzalloc);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_user(unsigned long size)
+void *vmalloc_user_noprof(unsigned long size)
 {
-	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
 				    VM_USERMAP, NUMA_NO_NODE,
 				    __builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_user);
+EXPORT_SYMBOL(vmalloc_user_noprof);
 
 /**
- * vmalloc_node - allocate memory on a specific node
+ * vmalloc_node_noprof - allocate memory on a specific node
  * @size:	  allocation size
  * @node:	  numa node
  *
@@ -3491,15 +3491,15 @@ EXPORT_SYMBOL(vmalloc_user);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_node(unsigned long size, int node)
+void *vmalloc_node_noprof(unsigned long size, int node)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL, node,
+	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
 			__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_node);
+EXPORT_SYMBOL(vmalloc_node_noprof);
 
 /**
- * vzalloc_node - allocate memory on a specific node with zero fill
+ * vzalloc_node_noprof - allocate memory on a specific node with zero fill
  * @size:	allocation size
  * @node:	numa node
  *
@@ -3509,12 +3509,12 @@ EXPORT_SYMBOL(vmalloc_node);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vzalloc_node(unsigned long size, int node)
+void *vzalloc_node_noprof(unsigned long size, int node)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
+	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
 				__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vzalloc_node);
+EXPORT_SYMBOL(vzalloc_node_noprof);
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
@@ -3529,7 +3529,7 @@ EXPORT_SYMBOL(vzalloc_node);
 #endif
 
 /**
- * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
+ * vmalloc_32_noprof - allocate virtually contiguous memory (32bit addressable)
  * @size:	allocation size
  *
  * Allocate enough 32bit PA addressable pages to cover @size from the
@@ -3537,15 +3537,15 @@ EXPORT_SYMBOL(vzalloc_node);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_32(unsigned long size)
+void *vmalloc_32_noprof(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
+	return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
 			__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_32);
+EXPORT_SYMBOL(vmalloc_32_noprof);
 
 /**
- * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
+ * vmalloc_32_user_noprof - allocate zeroed virtually contiguous 32bit memory
  * @size:	     allocation size
  *
  * The resulting memory area is 32bit addressable and zeroed so it can be
@@ -3553,14 +3553,14 @@ EXPORT_SYMBOL(vmalloc_32);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_32_user(unsigned long size)
+void *vmalloc_32_user_noprof(unsigned long size)
 {
-	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
 				    VM_USERMAP, NUMA_NO_NODE,
 				    __builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_32_user);
+EXPORT_SYMBOL(vmalloc_32_user_noprof);
 
 /*
  * Atomically zero bytes in the iterator.
-- 
2.43.0.687.g38aa6559b0-goog


  parent reply	other threads:[~2024-02-12 21:40 UTC|newest]

Thread overview: 216+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-12 21:38 [PATCH v3 00/35] Memory allocation profiling Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 01/35] lib/string_helpers: Add flags param to string_get_size() Suren Baghdasaryan
2024-02-12 22:09   ` Kees Cook
2024-02-13  8:26   ` Andy Shevchenko
2024-02-13  8:29     ` Andy Shevchenko
2024-02-13 23:55       ` Kent Overstreet
2024-02-13 22:06     ` Kent Overstreet
2024-02-29 20:54       ` Andy Shevchenko
2024-02-14 20:11   ` Matthew Wilcox
2024-02-12 21:38 ` [PATCH v3 02/35] scripts/kallysms: Always include __start and __stop symbols Suren Baghdasaryan
2024-02-12 22:06   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 03/35] fs: Convert alloc_inode_sb() to a macro Suren Baghdasaryan
2024-02-12 22:07   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 04/35] mm: enumerate all gfp flags Suren Baghdasaryan
2024-02-12 22:10   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 05/35] mm: introduce slabobj_ext to support slab object extensions Suren Baghdasaryan
2024-02-12 22:14   ` Kees Cook
2024-02-13  2:20     ` Suren Baghdasaryan
2024-02-14 17:59   ` Vlastimil Babka
2024-02-14 19:19     ` Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 06/35] mm: introduce __GFP_NO_OBJ_EXT flag to selectively prevent slabobj_ext creation Suren Baghdasaryan
2024-02-12 22:14   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 07/35] mm/slab: introduce SLAB_NO_OBJ_EXT to avoid obj_ext creation Suren Baghdasaryan
2024-02-12 22:14   ` Kees Cook
2024-02-15 21:31   ` Vlastimil Babka
2024-02-15 21:37     ` Kent Overstreet
2024-02-15 21:50       ` Vlastimil Babka
2024-02-15 22:10         ` Suren Baghdasaryan
2024-02-16 18:41           ` Suren Baghdasaryan
2024-02-16 18:49             ` Vlastimil Babka
2024-02-12 21:38 ` [PATCH v3 08/35] mm: prevent slabobj_ext allocations for slabobj_ext and kmem_cache objects Suren Baghdasaryan
2024-02-12 22:15   ` Kees Cook
2024-02-15 21:44   ` Vlastimil Babka
2024-02-15 22:13     ` Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 09/35] slab: objext: introduce objext_flags as extension to page_memcg_data_flags Suren Baghdasaryan
2024-02-12 22:15   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 10/35] lib: code tagging framework Suren Baghdasaryan
2024-02-12 22:27   ` Kees Cook
2024-02-13  2:04     ` Suren Baghdasaryan
2024-02-16  7:22       ` Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 11/35] lib: code tagging module support Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 12/35] lib: prevent module unloading if memory is not freed Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 13/35] lib: add allocation tagging support for memory allocation profiling Suren Baghdasaryan
2024-02-12 22:40   ` Kees Cook
2024-02-13  1:01     ` Suren Baghdasaryan
2024-02-13 22:28       ` Darrick J. Wong
2024-02-13 22:35         ` Suren Baghdasaryan
2024-02-13 22:38           ` Kees Cook
2024-02-13 22:47             ` Steven Rostedt
2024-02-16  8:50             ` Vlastimil Babka
2024-02-16  8:55               ` Suren Baghdasaryan
2024-02-16 23:26     ` Kent Overstreet
2024-02-17  0:08       ` Kees Cook
2024-02-14  6:34   ` kernel test robot
2024-02-16  0:54   ` Andrew Morton
2024-02-16  1:00     ` Kent Overstreet
2024-02-16  1:22       ` Pasha Tatashin
2024-02-16  1:27         ` Kent Overstreet
2024-02-16  9:02           ` Suren Baghdasaryan
2024-02-16  9:03             ` Suren Baghdasaryan
2024-02-16 17:18             ` Pasha Tatashin
2024-02-17 20:10               ` Kent Overstreet
2024-02-16  8:57   ` Vlastimil Babka
2024-02-18  2:21     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 14/35] lib: introduce support for page allocation tagging Suren Baghdasaryan
2024-02-16  9:45   ` Vlastimil Babka
2024-02-16 16:44     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 15/35] mm: percpu: increase PERCPU_MODULE_RESERVE to accommodate allocation tags Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 16/35] change alloc_pages name in dma_map_ops to avoid name conflicts Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 17/35] mm: enable page allocation tagging Suren Baghdasaryan
2024-02-12 22:59   ` Kees Cook
2024-02-12 21:39 ` [PATCH v3 18/35] mm: create new codetag references during page splitting Suren Baghdasaryan
2024-02-16 14:33   ` Vlastimil Babka
2024-02-16 16:46     ` Suren Baghdasaryan
2024-02-18  0:44       ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 19/35] mm/page_ext: enable early_page_ext when CONFIG_MEM_ALLOC_PROFILING_DEBUG=y Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 20/35] lib: add codetag reference into slabobj_ext Suren Baghdasaryan
2024-02-13 19:12   ` kernel test robot
2024-02-16 15:36   ` Vlastimil Babka
2024-02-16 17:04     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 21/35] mm/slab: add allocation accounting into slab allocation and free paths Suren Baghdasaryan
2024-02-12 22:59   ` Kees Cook
2024-02-16 16:31   ` Vlastimil Babka
2024-02-16 16:38     ` Kent Overstreet
2024-02-16 17:11       ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 22/35] mm/slab: enable slab allocation tagging for kmalloc and friends Suren Baghdasaryan
2024-02-12 23:01   ` Kees Cook
2024-02-14  1:03   ` kernel test robot
2024-02-16 16:52   ` Vlastimil Babka
2024-02-16 17:03     ` Kent Overstreet
2024-02-17  8:46   ` kernel test robot
2024-02-12 21:39 ` [PATCH v3 23/35] mm/slub: Mark slab_free_freelist_hook() __always_inline Suren Baghdasaryan
2024-02-13  0:31   ` Kees Cook
2024-02-13  0:34     ` Suren Baghdasaryan
2024-02-13  2:08     ` Kent Overstreet
2024-02-14 15:13       ` Vlastimil Babka
2024-02-15  4:04         ` Liam R. Howlett
2024-02-12 21:39 ` [PATCH v3 24/35] mempool: Hook up to memory allocation profiling Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 25/35] xfs: Memory allocation profiling fixups Suren Baghdasaryan
2024-02-14 22:22   ` Dave Chinner
2024-02-14 22:36     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 26/35] mm: percpu: Introduce pcpuobj_ext Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 27/35] mm: percpu: Add codetag reference into pcpuobj_ext Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 28/35] mm: percpu: enable per-cpu allocation tagging Suren Baghdasaryan
2024-02-15  2:06   ` kernel test robot
2024-02-12 21:39 ` Suren Baghdasaryan [this message]
2024-02-13 23:09   ` [PATCH v3 29/35] mm: vmalloc: Enable memory allocation profiling kernel test robot
2024-02-13 23:19   ` kernel test robot
2024-02-12 21:39 ` [PATCH v3 30/35] rhashtable: Plumb through alloc tag Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 31/35] lib: add memory allocations report in show_mem() Suren Baghdasaryan
2024-02-13  0:10   ` Kees Cook
2024-02-13  0:22     ` Steven Rostedt
2024-02-13  4:33       ` Kent Overstreet
2024-02-13  8:17         ` Suren Baghdasaryan
2024-02-15  9:22   ` Michal Hocko
2024-02-15 14:58     ` Suren Baghdasaryan
2024-02-15 16:44       ` Michal Hocko
2024-02-15 16:47         ` Suren Baghdasaryan
2024-02-15 18:29           ` Kent Overstreet
2024-02-15 18:33             ` Suren Baghdasaryan
2024-02-15 18:38               ` Kent Overstreet
2024-02-15 18:41             ` Michal Hocko
2024-02-15 18:49               ` Suren Baghdasaryan
2024-02-15 20:22             ` Vlastimil Babka
2024-02-15 20:33               ` Kent Overstreet
2024-02-15 21:54                 ` Michal Hocko
2024-02-15 22:54                   ` Kent Overstreet
2024-02-15 23:07                 ` Steven Rostedt
2024-02-15 23:16                   ` Steven Rostedt
2024-02-15 23:27                     ` Steven Rostedt
2024-02-15 23:56                       ` Kent Overstreet
2024-02-19 17:17                         ` Suren Baghdasaryan
2024-02-20 16:23                           ` Michal Hocko
2024-02-20 17:18                             ` Kent Overstreet
2024-02-20 17:24                               ` Michal Hocko
2024-02-20 17:32                                 ` Kent Overstreet
2024-02-20 18:27                           ` Vlastimil Babka
2024-02-20 20:59                             ` Suren Baghdasaryan
2024-02-21 13:21                             ` Tetsuo Handa
2024-02-21 18:26                               ` Suren Baghdasaryan
2024-02-15 23:19                   ` Dave Hansen
2024-02-15 23:54                     ` Kent Overstreet
2024-02-15 23:51                   ` Kent Overstreet
2024-02-16  0:21                     ` Steven Rostedt
2024-02-16  0:32                       ` Kent Overstreet
2024-02-16  0:39                         ` Steven Rostedt
2024-02-16  0:50                           ` Kent Overstreet
2024-02-16  1:12                             ` Steven Rostedt
2024-02-16  1:18                               ` Kent Overstreet
2024-02-16  1:31                                 ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 32/35] codetag: debug: skip objext checking when it's for objext itself Suren Baghdasaryan
2024-02-16 18:39   ` Vlastimil Babka
2024-02-19  1:04     ` Suren Baghdasaryan
2024-02-19  9:17       ` Vlastimil Babka
2024-02-19 16:55         ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 33/35] codetag: debug: mark codetags for reserved pages as empty Suren Baghdasaryan
2024-02-12 22:45   ` Kees Cook
2024-02-13  0:15     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 34/35] codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations Suren Baghdasaryan
2024-02-12 22:49   ` Kees Cook
2024-02-13  0:09     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 35/35] MAINTAINERS: Add entries for code tagging and memory allocation profiling Suren Baghdasaryan
2024-02-12 22:43   ` Kees Cook
2024-02-13  0:33     ` Suren Baghdasaryan
2024-02-13  0:14 ` [PATCH v3 00/35] Memory " Pasha Tatashin
2024-02-13  0:29 ` Kees Cook
2024-02-13  0:47   ` Suren Baghdasaryan
2024-02-13 12:24 ` Michal Hocko
2024-02-13 21:58   ` Suren Baghdasaryan
2024-02-13 22:04     ` David Hildenbrand
2024-02-13 22:09       ` Kent Overstreet
2024-02-13 22:17         ` David Hildenbrand
2024-02-13 22:29           ` Kent Overstreet
2024-02-13 23:11             ` Darrick J. Wong
2024-02-13 23:24               ` Kent Overstreet
2024-02-13 22:30           ` Suren Baghdasaryan
2024-02-13 22:48             ` David Hildenbrand
2024-02-13 22:50               ` Kent Overstreet
2024-02-13 22:57                 ` David Hildenbrand
2024-02-13 22:59                 ` Suren Baghdasaryan
2024-02-13 23:02                   ` David Hildenbrand
2024-02-13 23:12                     ` Kent Overstreet
2024-02-13 23:22                       ` David Hildenbrand
2024-02-13 23:28                         ` Suren Baghdasaryan
2024-02-13 23:54                           ` Pasha Tatashin
2024-02-14  0:04                             ` Kent Overstreet
2024-02-14 10:01                           ` David Hildenbrand
2024-02-13 23:08                   ` Kent Overstreet
2024-02-14 10:20                     ` Vlastimil Babka
2024-02-14 16:38                       ` Kent Overstreet
2024-02-14 15:00                     ` Matthew Wilcox
2024-02-14 15:13                       ` Kent Overstreet
2024-02-14 13:23                   ` Michal Hocko
2024-02-14 16:55                   ` Andrew Morton
2024-02-14 17:14                     ` Suren Baghdasaryan
2024-02-14 17:52                     ` Kent Overstreet
2024-02-14 19:24                       ` Suren Baghdasaryan
2024-02-14 20:00                         ` Kent Overstreet
2024-02-14  6:20 ` Johannes Weiner
2024-02-14 14:46   ` Michal Hocko
2024-02-14 15:01     ` Kent Overstreet
2024-02-14 16:02       ` Michal Hocko
2024-02-14 16:17         ` Kent Overstreet
2024-02-14 16:31           ` Michal Hocko
2024-02-14 17:14             ` Suren Baghdasaryan
2024-02-14 18:44 ` Andy Shevchenko
2024-02-14 18:51   ` Suren Baghdasaryan
2024-02-14 18:53 ` Tim Chen
2024-02-14 19:09   ` Suren Baghdasaryan
2024-02-14 20:17     ` Yosry Ahmed
2024-02-14 20:30       ` Suren Baghdasaryan
2024-02-14 22:59         ` Tim Chen
2024-02-16  8:38 ` Jani Nikula
2024-02-16  8:42   ` Kent Overstreet
2024-02-16  9:07     ` Jani Nikula
2024-02-16  9:14 [PATCH v3 29/35] mm: vmalloc: Enable memory " kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240212213922.783301-30-surenb@google.com \
    --to=surenb@google.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@gmail.com \
    --cc=arnd@arndb.de \
    --cc=axboe@kernel.dk \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=catalin.marinas@arm.com \
    --cc=cgroups@vger.kernel.org \
    --cc=cl@linux.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=dennis@kernel.org \
    --cc=dhowells@redhat.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dvyukov@google.com \
    --cc=ebiggers@google.com \
    --cc=elver@google.com \
    --cc=glider@google.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=iommu@lists.linux.dev \
    --cc=jbaron@akamai.com \
    --cc=juri.lelli@redhat.com \
    --cc=kaleshsingh@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=keescook@chromium.org \
    --cc=kent.overstreet@linux.dev \
    --cc=kernel-team@android.com \
    --cc=liam.howlett@oracle.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-modules@vger.kernel.org \
    --cc=masahiroy@kernel.org \
    --cc=mcgrof@kernel.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=minchan@google.com \
    --cc=mingo@redhat.com \
    --cc=muchun.song@linux.dev \
    --cc=nathan@kernel.org \
    --cc=ndesaulniers@google.com \
    --cc=pasha.tatashin@soleen.com \
    --cc=paulmck@kernel.org \
    --cc=penberg@kernel.org \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=rostedt@goodmis.org \
    --cc=rppt@kernel.org \
    --cc=shakeelb@google.com \
    --cc=songmuchun@bytedance.com \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=vbabka@suse.cz \
    --cc=vincent.guittot@linaro.org \
    --cc=void@manifault.com \
    --cc=vschneid@redhat.com \
    --cc=vvvvvv@google.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=yosryahmed@google.com \
    --cc=ytcoode@gmail.com \
    --cc=yuzhao@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.