All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] um: support some of ARCH_HAS_SET_MEMORY
@ 2020-12-05 20:50 Johannes Berg
  2020-12-05 20:50 ` [PATCH 2/2] um: allocate a guard page to helper threads Johannes Berg
  0 siblings, 1 reply; 7+ messages in thread
From: Johannes Berg @ 2020-12-05 20:50 UTC (permalink / raw)
  To: linux-um; +Cc: Johannes Berg

From: Johannes Berg <johannes.berg@intel.com>

For now, only support set_memory_ro()/rw() which we need for
the stack protection in the next patch.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
---
 arch/um/Kconfig                  |  1 +
 arch/um/include/asm/pgtable.h    |  3 ++
 arch/um/include/asm/set_memory.h |  1 +
 arch/um/kernel/tlb.c             | 54 ++++++++++++++++++++++++++++++++
 4 files changed, 59 insertions(+)
 create mode 100644 arch/um/include/asm/set_memory.h

diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 1c57599b82fa..70ee19cc6ec6 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -15,6 +15,7 @@ config UML
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DEBUG_BUGVERBOSE
 	select NO_DMA
+	select ARCH_HAS_SET_MEMORY
 	select GENERIC_IRQ_SHOW
 	select GENERIC_CPU_DEVICES
 	select GENERIC_CLOCKEVENTS
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index def376194dce..39376bb63abf 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -55,12 +55,15 @@ extern unsigned long end_iomem;
 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 #define __PAGE_KERNEL_EXEC                                              \
 	 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define __PAGE_KERNEL_RO						\
+	 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO)
 
 /*
  * The i386 can't do page protection for execute, and considers that the same
diff --git a/arch/um/include/asm/set_memory.h b/arch/um/include/asm/set_memory.h
new file mode 100644
index 000000000000..24266c63720d
--- /dev/null
+++ b/arch/um/include/asm/set_memory.h
@@ -0,0 +1 @@
+#include <asm-generic/set_memory.h>
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 61776790cd67..437d1f1cc5ec 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -608,3 +608,57 @@ void force_flush_all(void)
 		vma = vma->vm_next;
 	}
 }
+
+struct page_change_data {
+	unsigned int set_mask, clear_mask;
+};
+
+static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+{
+	struct page_change_data *cdata = data;
+	pte_t pte = READ_ONCE(*ptep);
+
+	pte_clear_bits(pte, cdata->clear_mask);
+	pte_set_bits(pte, cdata->set_mask);
+
+	set_pte(ptep, pte);
+	return 0;
+}
+
+static int change_memory(unsigned long start, unsigned long pages,
+			 unsigned int set_mask, unsigned int clear_mask)
+{
+	unsigned long size = pages * PAGE_SIZE;
+	struct page_change_data data;
+	int ret;
+
+	data.set_mask = set_mask;
+	data.clear_mask = clear_mask;
+
+	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+				  &data);
+
+	flush_tlb_kernel_range(start, start + size);
+
+	return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+	return change_memory(addr, numpages, 0, _PAGE_RW);
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+	return change_memory(addr, numpages, _PAGE_RW, 0);
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+	return -EOPNOTSUPP;
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+	return -EOPNOTSUPP;
+}
-- 
2.26.2


_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/2] um: allocate a guard page to helper threads
  2020-12-05 20:50 [PATCH 1/2] um: support some of ARCH_HAS_SET_MEMORY Johannes Berg
@ 2020-12-05 20:50 ` Johannes Berg
  2020-12-05 22:09   ` Anton Ivanov
  0 siblings, 1 reply; 7+ messages in thread
From: Johannes Berg @ 2020-12-05 20:50 UTC (permalink / raw)
  To: linux-um; +Cc: Johannes Berg

From: Johannes Berg <johannes.berg@intel.com>

We've been running into stack overflows in helper threads
corrupting memory (e.g. because somebody put printf() or
os_info() there), so to avoid those causing hard-to-debug
issues later on, allocate a guard page for helper thread
stacks and mark it read-only.

Unfortunately, the crash dump at that point is useless as
the stack tracer will try to backtrace the *kernel* thread,
not the helper thread, but at least we don't survive to a
random issue caused by corruption.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
---
 arch/um/drivers/ubd_kern.c         |  2 +-
 arch/um/include/shared/kern_util.h |  2 +-
 arch/um/kernel/process.c           | 11 +++++++----
 arch/um/os-Linux/helper.c          |  4 ++--
 4 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index d4c39e595c72..390edda0794f 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1192,7 +1192,7 @@ static int __init ubd_driver_init(void){
 		/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
 		 * enough. So use anyway the io thread. */
 	}
-	stack = alloc_stack(0, 0);
+	stack = alloc_stack(0);
 	io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
 				 &thread_fd);
 	if(io_pid < 0){
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index 2888ec812f6e..d8c279e3312f 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -19,7 +19,7 @@ extern int kmalloc_ok;
 #define UML_ROUND_UP(addr) \
 	((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
 
-extern unsigned long alloc_stack(int order, int atomic);
+extern unsigned long alloc_stack(int atomic);
 extern void free_stack(unsigned long stack, int order);
 
 struct pt_regs;
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index f0f50eae2293..37490c27f711 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -32,6 +32,7 @@
 #include <os.h>
 #include <skas.h>
 #include <linux/time-internal.h>
+#include <asm/set_memory.h>
 
 /*
  * This is a per-cpu array.  A processor only modifies its entry and it only
@@ -62,16 +63,18 @@ void free_stack(unsigned long stack, int order)
 	free_pages(stack, order);
 }
 
-unsigned long alloc_stack(int order, int atomic)
+unsigned long alloc_stack(int atomic)
 {
-	unsigned long page;
+	unsigned long addr;
 	gfp_t flags = GFP_KERNEL;
 
 	if (atomic)
 		flags = GFP_ATOMIC;
-	page = __get_free_pages(flags, order);
+	addr = __get_free_pages(flags, 1);
 
-	return page;
+	set_memory_ro(addr, 1);
+
+	return addr + PAGE_SIZE;
 }
 
 static inline void set_current(struct task_struct *task)
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 9fa6e4187d4f..feb48d796e00 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -45,7 +45,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
 	unsigned long stack, sp;
 	int pid, fds[2], ret, n;
 
-	stack = alloc_stack(0, __cant_sleep());
+	stack = alloc_stack(__cant_sleep());
 	if (stack == 0)
 		return -ENOMEM;
 
@@ -116,7 +116,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
 	unsigned long stack, sp;
 	int pid, status, err;
 
-	stack = alloc_stack(0, __cant_sleep());
+	stack = alloc_stack(__cant_sleep());
 	if (stack == 0)
 		return -ENOMEM;
 
-- 
2.26.2


_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] um: allocate a guard page to helper threads
  2020-12-05 20:50 ` [PATCH 2/2] um: allocate a guard page to helper threads Johannes Berg
@ 2020-12-05 22:09   ` Anton Ivanov
  2020-12-06  8:11     ` Johannes Berg
  0 siblings, 1 reply; 7+ messages in thread
From: Anton Ivanov @ 2020-12-05 22:09 UTC (permalink / raw)
  To: Johannes Berg, linux-um; +Cc: Johannes Berg

On 05/12/2020 20:50, Johannes Berg wrote:
> From: Johannes Berg <johannes.berg@intel.com>
> 
> We've been running into stack overflows in helper threads
> corrupting memory (e.g. because somebody put printf() or
> os_info() there), so to avoid those causing hard-to-debug
> issues later on, allocate a guard page for helper thread
> stacks and mark it read-only.
> 
> Unfortunately, the crash dump at that point is useless as
> the stack tracer will try to backtrace the *kernel* thread,
> not the helper thread, but at least we don't survive to a
> random issue caused by corruption.
> 
> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
> ---
>   arch/um/drivers/ubd_kern.c         |  2 +-
>   arch/um/include/shared/kern_util.h |  2 +-
>   arch/um/kernel/process.c           | 11 +++++++----
>   arch/um/os-Linux/helper.c          |  4 ++--
>   4 files changed, 11 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
> index d4c39e595c72..390edda0794f 100644
> --- a/arch/um/drivers/ubd_kern.c
> +++ b/arch/um/drivers/ubd_kern.c
> @@ -1192,7 +1192,7 @@ static int __init ubd_driver_init(void){
>   		/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
>   		 * enough. So use anyway the io thread. */
>   	}
> -	stack = alloc_stack(0, 0);
> +	stack = alloc_stack(0);
>   	io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
>   				 &thread_fd);
>   	if(io_pid < 0){
> diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
> index 2888ec812f6e..d8c279e3312f 100644
> --- a/arch/um/include/shared/kern_util.h
> +++ b/arch/um/include/shared/kern_util.h
> @@ -19,7 +19,7 @@ extern int kmalloc_ok;
>   #define UML_ROUND_UP(addr) \
>   	((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
>   
> -extern unsigned long alloc_stack(int order, int atomic);
> +extern unsigned long alloc_stack(int atomic);
>   extern void free_stack(unsigned long stack, int order);
>   
>   struct pt_regs;
> diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
> index f0f50eae2293..37490c27f711 100644
> --- a/arch/um/kernel/process.c
> +++ b/arch/um/kernel/process.c
> @@ -32,6 +32,7 @@
>   #include <os.h>
>   #include <skas.h>
>   #include <linux/time-internal.h>
> +#include <asm/set_memory.h>
>   
>   /*
>    * This is a per-cpu array.  A processor only modifies its entry and it only
> @@ -62,16 +63,18 @@ void free_stack(unsigned long stack, int order)
>   	free_pages(stack, order);
>   }
>   
> -unsigned long alloc_stack(int order, int atomic)
> +unsigned long alloc_stack(int atomic)
>   {
> -	unsigned long page;
> +	unsigned long addr;
>   	gfp_t flags = GFP_KERNEL;
>   
>   	if (atomic)
>   		flags = GFP_ATOMIC;
> -	page = __get_free_pages(flags, order);
> +	addr = __get_free_pages(flags, 1);

Why are you dropping order? I believe we do not fit in a stack of order 
less than 2 on newer 64 bit and we need even more with valgrind.

>   
> -	return page;
> +	set_memory_ro(addr, 1);
> +
> +	return addr + PAGE_SIZE;
>   }
>   
>   static inline void set_current(struct task_struct *task)
> diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
> index 9fa6e4187d4f..feb48d796e00 100644
> --- a/arch/um/os-Linux/helper.c
> +++ b/arch/um/os-Linux/helper.c
> @@ -45,7 +45,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
>   	unsigned long stack, sp;
>   	int pid, fds[2], ret, n;
>   
> -	stack = alloc_stack(0, __cant_sleep());
> +	stack = alloc_stack(__cant_sleep());
>   	if (stack == 0)
>   		return -ENOMEM;
>   
> @@ -116,7 +116,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
>   	unsigned long stack, sp;
>   	int pid, status, err;
>   
> -	stack = alloc_stack(0, __cant_sleep());
> +	stack = alloc_stack(__cant_sleep());
>   	if (stack == 0)
>   		return -ENOMEM;
>   
> 


-- 
Anton R. Ivanov
Cambridgegreys Limited. Registered in England. Company Number 10273661
https://www.cambridgegreys.com/

_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] um: allocate a guard page to helper threads
  2020-12-05 22:09   ` Anton Ivanov
@ 2020-12-06  8:11     ` Johannes Berg
  2020-12-06  8:23       ` Anton Ivanov
  0 siblings, 1 reply; 7+ messages in thread
From: Johannes Berg @ 2020-12-06  8:11 UTC (permalink / raw)
  To: Anton Ivanov, linux-um

On Sat, 2020-12-05 at 22:09 +0000, Anton Ivanov wrote:

> > -unsigned long alloc_stack(int order, int atomic)
> > +unsigned long alloc_stack(int atomic)
> >   {
> > -	unsigned long page;
> > +	unsigned long addr;
> >   	gfp_t flags = GFP_KERNEL;
> >   
> >   	if (atomic)
> >   		flags = GFP_ATOMIC;
> > -	page = __get_free_pages(flags, order);
> > +	addr = __get_free_pages(flags, 1);
> 
> Why are you dropping order?

It was always called with 0 as the order argument, so I didn't think it
was relevant. I guess we can keep it and allocate order+1, and then
reserve a page, but if then e.g. you wanted order 2 (16k) you'd actually
get 28k of usable stack and use up 32k. So I thought it was less
surprising if we'd remove it.

> I believe we do not fit in a stack of order 
> less than 2 on newer 64 bit and we need even more with valgrind.

Not sure what you mean by this, tbh. Running the whole of UML in
valgrind? Why would that require more stack? Kernel threads are
unaffected, and the helper threads don't do much that would really make
a significant difference between 32 and 64 bit?

johannes


_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] um: allocate a guard page to helper threads
  2020-12-06  8:11     ` Johannes Berg
@ 2020-12-06  8:23       ` Anton Ivanov
  2020-12-06  8:53         ` Johannes Berg
  0 siblings, 1 reply; 7+ messages in thread
From: Anton Ivanov @ 2020-12-06  8:23 UTC (permalink / raw)
  To: Johannes Berg, linux-um

On 06/12/2020 08:11, Johannes Berg wrote:
> On Sat, 2020-12-05 at 22:09 +0000, Anton Ivanov wrote:
>
>>> -unsigned long alloc_stack(int order, int atomic)
>>> +unsigned long alloc_stack(int atomic)
>>>    {
>>> -	unsigned long page;
>>> +	unsigned long addr;
>>>    	gfp_t flags = GFP_KERNEL;
>>>    
>>>    	if (atomic)
>>>    		flags = GFP_ATOMIC;
>>> -	page = __get_free_pages(flags, order);
>>> +	addr = __get_free_pages(flags, 1);
>> Why are you dropping order?
> It was always called with 0 as the order argument, so I didn't think it
> was relevant. I guess we can keep it and allocate order+1, and then
> reserve a page, but if then e.g. you wanted order 2 (16k) you'd actually
> get 28k of usable stack and use up 32k. So I thought it was less
> surprising if we'd remove it.
>
>> I believe we do not fit in a stack of order
>> less than 2 on newer 64 bit and we need even more with valgrind.
> Not sure what you mean by this, tbh. Running the whole of UML in
> valgrind? Why would that require more stack?

According to the description of the stack order option in the config it 
does.

> Kernel threads are
> unaffected, and the helper threads don't do much that would really make
> a significant difference between 32 and 64 bit?

OK, I need to have a look again on where do we use the order parameter 
and what that config option really means.

> johannes
>
>

-- 
Anton R. Ivanov
Cambridgegreys Limited. Registered in England. Company Number 10273661
https://www.cambridgegreys.com/


_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] um: allocate a guard page to helper threads
  2020-12-06  8:23       ` Anton Ivanov
@ 2020-12-06  8:53         ` Johannes Berg
  2020-12-06  9:31           ` Anton Ivanov
  0 siblings, 1 reply; 7+ messages in thread
From: Johannes Berg @ 2020-12-06  8:53 UTC (permalink / raw)
  To: Anton Ivanov, linux-um

On Sun, 2020-12-06 at 08:23 +0000, Anton Ivanov wrote:
> On 06/12/2020 08:11, Johannes Berg wrote:
> > On Sat, 2020-12-05 at 22:09 +0000, Anton Ivanov wrote:
> > 
> > > > -unsigned long alloc_stack(int order, int atomic)
> > > > +unsigned long alloc_stack(int atomic)
> > > >    {
> > > > -	unsigned long page;
> > > > +	unsigned long addr;
> > > >    	gfp_t flags = GFP_KERNEL;
> > > >    
> > > >    	if (atomic)
> > > >    		flags = GFP_ATOMIC;
> > > > -	page = __get_free_pages(flags, order);
> > > > +	addr = __get_free_pages(flags, 1);
> > > Why are you dropping order?
> > It was always called with 0 as the order argument, so I didn't think it
> > was relevant. I guess we can keep it and allocate order+1, and then
> > reserve a page, but if then e.g. you wanted order 2 (16k) you'd actually
> > get 28k of usable stack and use up 32k. So I thought it was less
> > surprising if we'd remove it.
> > 
> > > I believe we do not fit in a stack of order
> > > less than 2 on newer 64 bit and we need even more with valgrind.
> > Not sure what you mean by this, tbh. Running the whole of UML in
> > valgrind? Why would that require more stack?
> 
> According to the description of the stack order option in the config it 
> does.

Ah ok, you're referring to CONFIG_KERNEL_STACK_ORDER. That indeed
affects only *kernel* threads, not the helper threads that are allocated
here.

Not sure why valgrind would matter, but perhaps it does some magic with
stacks under the hood, don't really know well what it really does.

> > Kernel threads are
> > unaffected, and the helper threads don't do much that would really make
> > a significant difference between 32 and 64 bit?
> 
> OK, I need to have a look again on where do we use the order parameter 
> and what that config option really means.

Well, it affects THREAD_SIZE_ORDER, which is used in kernel/fork.c to
allocate the kernel stacks for new threads, so that makes sense.

But the code I changed here has no relation to it, it's just for the
userspace helper threads such as winch_thread :)

johannes


_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] um: allocate a guard page to helper threads
  2020-12-06  8:53         ` Johannes Berg
@ 2020-12-06  9:31           ` Anton Ivanov
  0 siblings, 0 replies; 7+ messages in thread
From: Anton Ivanov @ 2020-12-06  9:31 UTC (permalink / raw)
  To: Johannes Berg, linux-um

On 06/12/2020 08:53, Johannes Berg wrote:
> On Sun, 2020-12-06 at 08:23 +0000, Anton Ivanov wrote:
>> On 06/12/2020 08:11, Johannes Berg wrote:
>>> On Sat, 2020-12-05 at 22:09 +0000, Anton Ivanov wrote:
>>>
>>>>> -unsigned long alloc_stack(int order, int atomic)
>>>>> +unsigned long alloc_stack(int atomic)
>>>>>     {
>>>>> -	unsigned long page;
>>>>> +	unsigned long addr;
>>>>>     	gfp_t flags = GFP_KERNEL;
>>>>>     
>>>>>     	if (atomic)
>>>>>     		flags = GFP_ATOMIC;
>>>>> -	page = __get_free_pages(flags, order);
>>>>> +	addr = __get_free_pages(flags, 1);
>>>> Why are you dropping order?
>>> It was always called with 0 as the order argument, so I didn't think it
>>> was relevant. I guess we can keep it and allocate order+1, and then
>>> reserve a page, but if then e.g. you wanted order 2 (16k) you'd actually
>>> get 28k of usable stack and use up 32k. So I thought it was less
>>> surprising if we'd remove it.
>>>
>>>> I believe we do not fit in a stack of order
>>>> less than 2 on newer 64 bit and we need even more with valgrind.
>>> Not sure what you mean by this, tbh. Running the whole of UML in
>>> valgrind? Why would that require more stack?
>>
>> According to the description of the stack order option in the config it
>> does.
> 
> Ah ok, you're referring to CONFIG_KERNEL_STACK_ORDER. That indeed
> affects only *kernel* threads, not the helper threads that are allocated
> here.
> 
> Not sure why valgrind would matter, but perhaps it does some magic with
> stacks under the hood, don't really know well what it really does.
> 
>>> Kernel threads are
>>> unaffected, and the helper threads don't do much that would really make
>>> a significant difference between 32 and 64 bit?
>>
>> OK, I need to have a look again on where do we use the order parameter
>> and what that config option really means.
> 
> Well, it affects THREAD_SIZE_ORDER, which is used in kernel/fork.c to
> allocate the kernel stacks for new threads, so that makes sense. >
> But the code I changed here has no relation to it, it's just for the
> userspace helper threads such as winch_thread :)

Ah, cool. I did not look in detail as it was ~ past 21:00 over here when 
I saw the email. Just noted stack order :)

> 
> johannes
> 
> 
> _______________________________________________
> linux-um mailing list
> linux-um@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-um
> 


-- 
Anton R. Ivanov
Cambridgegreys Limited. Registered in England. Company Number 10273661
https://www.cambridgegreys.com/

_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-12-06  9:31 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-05 20:50 [PATCH 1/2] um: support some of ARCH_HAS_SET_MEMORY Johannes Berg
2020-12-05 20:50 ` [PATCH 2/2] um: allocate a guard page to helper threads Johannes Berg
2020-12-05 22:09   ` Anton Ivanov
2020-12-06  8:11     ` Johannes Berg
2020-12-06  8:23       ` Anton Ivanov
2020-12-06  8:53         ` Johannes Berg
2020-12-06  9:31           ` Anton Ivanov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.