linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/4] vmalloc: add vm_flags argument to internal __vmalloc_node()
@ 2018-01-23 10:55 Konstantin Khlebnikov
  2018-01-23 10:55 ` [PATCH 2/4] vmalloc: add __vmalloc_area() Konstantin Khlebnikov
                   ` (3 more replies)
  0 siblings, 4 replies; 24+ messages in thread
From: Konstantin Khlebnikov @ 2018-01-23 10:55 UTC (permalink / raw)
  To: Dave Hansen, linux-kernel, Christoph Hellwig, linux-mm,
	Andy Lutomirski, Andrew Morton

This allows to set VM_USERMAP in vmalloc_user() and vmalloc_32_user()
directly at allocation and avoid find_vm_area() call.

Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
---
 mm/vmalloc.c |   54 +++++++++++++++++++++---------------------------------
 1 file changed, 21 insertions(+), 33 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 673942094328..cece3fb33cef 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1662,7 +1662,9 @@ EXPORT_SYMBOL(vmap);
 
 static void *__vmalloc_node(unsigned long size, unsigned long align,
 			    gfp_t gfp_mask, pgprot_t prot,
+			    unsigned long vm_flags,
 			    int node, const void *caller);
+
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 				 pgprot_t prot, int node)
 {
@@ -1681,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 	/* Please note that the recursion is strictly bounded. */
 	if (array_size > PAGE_SIZE) {
 		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
-				PAGE_KERNEL, node, area->caller);
+				       PAGE_KERNEL, 0, node, area->caller);
 	} else {
 		pages = kmalloc_node(array_size, nested_gfp, node);
 	}
@@ -1752,7 +1754,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
 		goto fail;
 
 	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
-				vm_flags, start, end, node, gfp_mask, caller);
+				  vm_flags, start, end, node, gfp_mask, caller);
 	if (!area)
 		goto fail;
 
@@ -1783,6 +1785,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
  *	@align:		desired alignment
  *	@gfp_mask:	flags for the page level allocator
  *	@prot:		protection mask for the allocated pages
+ *	@vm_flags:	additional vm area flags (e.g. %VM_NO_GUARD)
  *	@node:		node to use for allocation or NUMA_NO_NODE
  *	@caller:	caller's return address
  *
@@ -1799,15 +1802,16 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
  */
 static void *__vmalloc_node(unsigned long size, unsigned long align,
 			    gfp_t gfp_mask, pgprot_t prot,
+			    unsigned long vm_flags,
 			    int node, const void *caller)
 {
 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
-				gfp_mask, prot, 0, node, caller);
+				gfp_mask, prot, vm_flags, node, caller);
 }
 
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 {
-	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
+	return __vmalloc_node(size, 1, gfp_mask, prot, 0, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
 EXPORT_SYMBOL(__vmalloc);
@@ -1815,15 +1819,15 @@ EXPORT_SYMBOL(__vmalloc);
 static inline void *__vmalloc_node_flags(unsigned long size,
 					int node, gfp_t flags)
 {
-	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
-					node, __builtin_return_address(0));
+	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 0, node,
+			      __builtin_return_address(0));
 }
 
 
 void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
 				  void *caller)
 {
-	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
+	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 0, node, caller);
 }
 
 /**
@@ -1868,18 +1872,9 @@ EXPORT_SYMBOL(vzalloc);
  */
 void *vmalloc_user(unsigned long size)
 {
-	struct vm_struct *area;
-	void *ret;
-
-	ret = __vmalloc_node(size, SHMLBA,
-			     GFP_KERNEL | __GFP_ZERO,
-			     PAGE_KERNEL, NUMA_NO_NODE,
-			     __builtin_return_address(0));
-	if (ret) {
-		area = find_vm_area(ret);
-		area->flags |= VM_USERMAP;
-	}
-	return ret;
+	return __vmalloc_node(size, SHMLBA, GFP_KERNEL | __GFP_ZERO,
+			      PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE,
+			      __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc_user);
 
@@ -1896,8 +1891,8 @@ EXPORT_SYMBOL(vmalloc_user);
  */
 void *vmalloc_node(unsigned long size, int node)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
-					node, __builtin_return_address(0));
+	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, 0, node,
+			      __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc_node);
 
@@ -1938,7 +1933,7 @@ EXPORT_SYMBOL(vzalloc_node);
 
 void *vmalloc_exec(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
+	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
 			      NUMA_NO_NODE, __builtin_return_address(0));
 }
 
@@ -1959,7 +1954,7 @@ void *vmalloc_exec(unsigned long size)
  */
 void *vmalloc_32(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
+	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 0,
 			      NUMA_NO_NODE, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc_32);
@@ -1973,16 +1968,9 @@ EXPORT_SYMBOL(vmalloc_32);
  */
 void *vmalloc_32_user(unsigned long size)
 {
-	struct vm_struct *area;
-	void *ret;
-
-	ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
-			     NUMA_NO_NODE, __builtin_return_address(0));
-	if (ret) {
-		area = find_vm_area(ret);
-		area->flags |= VM_USERMAP;
-	}
-	return ret;
+	return __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO,
+			      PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE,
+			      __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc_32_user);
 

^ permalink raw reply related	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2018-03-01 18:16 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-23 10:55 [PATCH 1/4] vmalloc: add vm_flags argument to internal __vmalloc_node() Konstantin Khlebnikov
2018-01-23 10:55 ` [PATCH 2/4] vmalloc: add __vmalloc_area() Konstantin Khlebnikov
2018-01-23 10:55 ` [PATCH 3/4] kernel/fork: switch vmapped stack callation to __vmalloc_area() Konstantin Khlebnikov
2018-01-23 13:57   ` Konstantin Khlebnikov
2018-02-21  0:16     ` Andrew Morton
2018-02-21  7:23       ` Konstantin Khlebnikov
2018-02-21 16:35         ` Andy Lutomirski
2018-01-23 10:55 ` [PATCH 4/4] kernel/fork: add option to use virtually mapped stacks as fallback Konstantin Khlebnikov
2018-02-21 15:42   ` Use higher-order pages in vmalloc Matthew Wilcox
2018-02-21 16:11     ` Andy Lutomirski
2018-02-21 16:50       ` Matthew Wilcox
2018-02-21 16:16     ` Dave Hansen
2018-02-21 17:01       ` Matthew Wilcox
2018-02-22  6:59         ` Michal Hocko
2018-02-22 12:22           ` Matthew Wilcox
2018-02-22 13:36             ` Michal Hocko
2018-02-22 19:01               ` Andy Lutomirski
2018-02-22 19:19                 ` Dave Hansen
2018-02-22 19:27                   ` Andy Lutomirski
2018-02-22 19:36                     ` Dave Hansen
2018-02-23 12:13                 ` Michal Hocko
2018-03-01 18:16                   ` Eric Dumazet
2018-02-21 12:24 ` [PATCH 1/4] vmalloc: add vm_flags argument to internal __vmalloc_node() Matthew Wilcox
2018-02-21 12:39   ` Andrey Ryabinin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).