linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
@ 2017-10-23 13:49 Juergen Gross
  2017-10-25  4:26 ` Dou Liyang
  2017-10-27  8:43 ` Ingo Molnar
  0 siblings, 2 replies; 9+ messages in thread
From: Juergen Gross @ 2017-10-23 13:49 UTC (permalink / raw)
  To: linux-kernel, x86; +Cc: hpa, tglx, mingo, arnd, peterz, Juergen Gross

Don't try to set the static virt_spin_lock_key to a value before
jump_label_init() has been called, as this will result in a WARN().

Solve the problem by introducing a new lock_init() hook called after
jump_label_init() instead of doing the call inside of
smp_prepare_boot_cpu().

Signed-off-by: Juergen Gross <jgross@suse.com>
---
Based on kernel/git/tip/tip.git locking/core
---
 arch/x86/include/asm/qspinlock.h | 5 +----
 arch/x86/kernel/smpboot.c        | 1 -
 include/asm-generic/qspinlock.h  | 6 ++++++
 include/linux/spinlock.h         | 4 ++++
 include/linux/spinlock_up.h      | 4 ++++
 init/main.c                      | 2 ++
 kernel/locking/spinlock.c        | 7 +++++++
 7 files changed, 24 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 308dfd0714c7..a53ef9ed0dc1 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -49,6 +49,7 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
 #ifdef CONFIG_PARAVIRT
 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
 
+#define native_pv_lock_init native_pv_lock_init
 void native_pv_lock_init(void) __init;
 
 #define virt_spin_lock virt_spin_lock
@@ -70,10 +71,6 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
 
 	return true;
 }
-#else
-static inline void native_pv_lock_init(void)
-{
-}
 #endif /* CONFIG_PARAVIRT */
 
 #include <asm-generic/qspinlock.h>
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 361f91674ce5..55a3121dd479 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1385,7 +1385,6 @@ void __init native_smp_prepare_boot_cpu(void)
 	/* already set me in cpu_online_mask in boot_cpu_init() */
 	cpumask_set_cpu(me, cpu_callout_mask);
 	cpu_set_state_online(me);
-	native_pv_lock_init();
 }
 
 void __init native_smp_cpus_done(unsigned int max_cpus)
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 66260777d644..42784a353401 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,6 +111,12 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 }
 #endif
 
+#ifndef native_pv_lock_init
+static __always_inline void native_pv_lock_init(void)
+{
+}
+#endif
+
 /*
  * Remapping spinlock architecture specific functions to the corresponding
  * queued spinlock functions.
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 69e079c5ff98..6654ff285e5c 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -420,4 +420,8 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 #define atomic_dec_and_lock(atomic, lock) \
 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
+#ifdef CONFIG_SMP
+void lock_init(void) __init;
+#endif
+
 #endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 612fb530af41..bc4787900ad7 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -80,4 +80,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 #define arch_read_can_lock(lock)	(((void)(lock), 1))
 #define arch_write_can_lock(lock)	(((void)(lock), 1))
 
+static inline lock_init(void)
+{
+}
+
 #endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/init/main.c b/init/main.c
index 0ee9c6866ada..e5c9f9bcd311 100644
--- a/init/main.c
+++ b/init/main.c
@@ -88,6 +88,7 @@
 #include <linux/io.h>
 #include <linux/cache.h>
 #include <linux/rodata_test.h>
+#include <linux/spinlock.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -567,6 +568,7 @@ asmlinkage __visible void __init start_kernel(void)
 	sort_main_extable();
 	trap_init();
 	mm_init();
+	lock_init();
 
 	ftrace_init();
 
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 4b082b5cac9e..f086e444c2ac 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -397,3 +397,10 @@ notrace int in_lock_functions(unsigned long addr)
 	&& addr < (unsigned long)__lock_text_end;
 }
 EXPORT_SYMBOL(in_lock_functions);
+
+void __init lock_init(void)
+{
+#ifdef CONFIG_QUEUED_SPINLOCKS
+	native_pv_lock_init();
+#endif
+}
-- 
2.12.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
  2017-10-23 13:49 [PATCH] paravirt/locks: avoid modifying static key before jump_label_init() Juergen Gross
@ 2017-10-25  4:26 ` Dou Liyang
  2017-10-25  6:58   ` Juergen Gross
  2017-10-27  8:43 ` Ingo Molnar
  1 sibling, 1 reply; 9+ messages in thread
From: Dou Liyang @ 2017-10-25  4:26 UTC (permalink / raw)
  To: Juergen Gross, linux-kernel, x86; +Cc: hpa, tglx, mingo, arnd, peterz

Hi Juergen,

At 10/23/2017 09:49 PM, Juergen Gross wrote:
> Don't try to set the static virt_spin_lock_key to a value before
> jump_label_init() has been called, as this will result in a WARN().
>
> Solve the problem by introducing a new lock_init() hook called after
> jump_label_init() instead of doing the call inside of
> smp_prepare_boot_cpu().
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> Based on kernel/git/tip/tip.git locking/core

I also found that WARN() in tip tree.

IMO, adding a hook in start_kernel() is not elegant. It will
affect other arches and increase the complexity of the system.

I like your original method.
So, I try to fix it by moving the native_pv_lock_init() from
  native_smp_prepare_boot_cpu() to native_smp_prepare_cpus().

I hope it's useful to you.

Thanks,
	dou.

==================<
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index aed1460..6b1335a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1323,6 +1323,8 @@ void __init native_smp_prepare_cpus(unsigned int 
max_cpus)
         pr_info("CPU0: ");
         print_cpu_info(&cpu_data(0));

+       native_pv_lock_init();
+
         uv_system_init();

         set_mtrr_aps_delayed_init();
@@ -1350,7 +1352,6 @@ void __init native_smp_prepare_boot_cpu(void)
         /* already set me in cpu_online_mask in boot_cpu_init() */
         cpumask_set_cpu(me, cpu_callout_mask);
         cpu_set_state_online(me);
-       native_pv_lock_init();
  }

  void __init native_smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 5147140..570b2bc 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -236,6 +236,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned 
int max_cpus)
                 xen_raw_printk(m);
                 panic(m);
         }
+       native_pv_lock_init();
+
         xen_init_lock_cpu(0);

         smp_store_boot_cpu_info();
> ---
>  arch/x86/include/asm/qspinlock.h | 5 +----
>  arch/x86/kernel/smpboot.c        | 1 -
>  include/asm-generic/qspinlock.h  | 6 ++++++
>  include/linux/spinlock.h         | 4 ++++
>  include/linux/spinlock_up.h      | 4 ++++
>  init/main.c                      | 2 ++
>  kernel/locking/spinlock.c        | 7 +++++++
>  7 files changed, 24 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
> index 308dfd0714c7..a53ef9ed0dc1 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -49,6 +49,7 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
>  #ifdef CONFIG_PARAVIRT
>  DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
>
> +#define native_pv_lock_init native_pv_lock_init
>  void native_pv_lock_init(void) __init;
>
>  #define virt_spin_lock virt_spin_lock
> @@ -70,10 +71,6 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>
>  	return true;
>  }
> -#else
> -static inline void native_pv_lock_init(void)
> -{
> -}
>  #endif /* CONFIG_PARAVIRT */
>
>  #include <asm-generic/qspinlock.h>
> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
> index 361f91674ce5..55a3121dd479 100644
> --- a/arch/x86/kernel/smpboot.c
> +++ b/arch/x86/kernel/smpboot.c
> @@ -1385,7 +1385,6 @@ void __init native_smp_prepare_boot_cpu(void)
>  	/* already set me in cpu_online_mask in boot_cpu_init() */
>  	cpumask_set_cpu(me, cpu_callout_mask);
>  	cpu_set_state_online(me);
> -	native_pv_lock_init();
>  }
>
>  void __init native_smp_cpus_done(unsigned int max_cpus)
> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> index 66260777d644..42784a353401 100644
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -111,6 +111,12 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
>  }
>  #endif
>
> +#ifndef native_pv_lock_init
> +static __always_inline void native_pv_lock_init(void)
> +{
> +}
> +#endif
> +
>  /*
>   * Remapping spinlock architecture specific functions to the corresponding
>   * queued spinlock functions.
> diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
> index 69e079c5ff98..6654ff285e5c 100644
> --- a/include/linux/spinlock.h
> +++ b/include/linux/spinlock.h
> @@ -420,4 +420,8 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
>  #define atomic_dec_and_lock(atomic, lock) \
>  		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
>
> +#ifdef CONFIG_SMP
> +void lock_init(void) __init;
> +#endif
> +
>  #endif /* __LINUX_SPINLOCK_H */
> diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
> index 612fb530af41..bc4787900ad7 100644
> --- a/include/linux/spinlock_up.h
> +++ b/include/linux/spinlock_up.h
> @@ -80,4 +80,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
>  #define arch_read_can_lock(lock)	(((void)(lock), 1))
>  #define arch_write_can_lock(lock)	(((void)(lock), 1))
>
> +static inline lock_init(void)
> +{
> +}
> +
>  #endif /* __LINUX_SPINLOCK_UP_H */
> diff --git a/init/main.c b/init/main.c
> index 0ee9c6866ada..e5c9f9bcd311 100644
> --- a/init/main.c
> +++ b/init/main.c
> @@ -88,6 +88,7 @@
>  #include <linux/io.h>
>  #include <linux/cache.h>
>  #include <linux/rodata_test.h>
> +#include <linux/spinlock.h>
>
>  #include <asm/io.h>
>  #include <asm/bugs.h>
> @@ -567,6 +568,7 @@ asmlinkage __visible void __init start_kernel(void)
>  	sort_main_extable();
>  	trap_init();
>  	mm_init();
> +	lock_init();
>
>  	ftrace_init();
>
> diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
> index 4b082b5cac9e..f086e444c2ac 100644
> --- a/kernel/locking/spinlock.c
> +++ b/kernel/locking/spinlock.c
> @@ -397,3 +397,10 @@ notrace int in_lock_functions(unsigned long addr)
>  	&& addr < (unsigned long)__lock_text_end;
>  }
>  EXPORT_SYMBOL(in_lock_functions);
> +
> +void __init lock_init(void)
> +{
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +	native_pv_lock_init();
> +#endif
> +}
>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
  2017-10-25  4:26 ` Dou Liyang
@ 2017-10-25  6:58   ` Juergen Gross
  2017-10-25  7:35     ` Dou Liyang
  0 siblings, 1 reply; 9+ messages in thread
From: Juergen Gross @ 2017-10-25  6:58 UTC (permalink / raw)
  To: Dou Liyang, linux-kernel, x86; +Cc: hpa, tglx, mingo, arnd, peterz

On 25/10/17 06:26, Dou Liyang wrote:
> Hi Juergen,
> 
> At 10/23/2017 09:49 PM, Juergen Gross wrote:
>> Don't try to set the static virt_spin_lock_key to a value before
>> jump_label_init() has been called, as this will result in a WARN().
>>
>> Solve the problem by introducing a new lock_init() hook called after
>> jump_label_init() instead of doing the call inside of
>> smp_prepare_boot_cpu().
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> ---
>> Based on kernel/git/tip/tip.git locking/core
> 
> I also found that WARN() in tip tree.
> 
> IMO, adding a hook in start_kernel() is not elegant. It will
> affect other arches and increase the complexity of the system.
> 
> I like your original method.
> So, I try to fix it by moving the native_pv_lock_init() from
>  native_smp_prepare_boot_cpu() to native_smp_prepare_cpus().

Hmm, this might work, but the Xen case has to be modified (same for
my more general solution), as xen_init_spinlocks() is still modifying
the static key too early. And we can't move xen_init_spinlocks() to
smp_prepare_cpus() as this would be too late for the alternatives
patching.

So let me extend your patch a little bit to cover Xen, too.

> I hope it's useful to you.

It really is, thanks.


Juergen

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
  2017-10-25  6:58   ` Juergen Gross
@ 2017-10-25  7:35     ` Dou Liyang
  2017-10-25  7:51       ` Juergen Gross
  0 siblings, 1 reply; 9+ messages in thread
From: Dou Liyang @ 2017-10-25  7:35 UTC (permalink / raw)
  To: Juergen Gross, linux-kernel, x86; +Cc: hpa, tglx, mingo, arnd, peterz

Hi Juergen,

[...]
>> I like your original method.
>> So, I try to fix it by moving the native_pv_lock_init() from
>>  native_smp_prepare_boot_cpu() to native_smp_prepare_cpus().
>
> Hmm, this might work, but the Xen case has to be modified (same for
> my more general solution), as xen_init_spinlocks() is still modifying
> the static key too early. And we can't move xen_init_spinlocks() to
> smp_prepare_cpus() as this would be too late for the alternatives
> patching.
>

Yes, Right.

> So let me extend your patch a little bit to cover Xen, too.
>

Yes!

How about moving the check of xen_pvspin into native_pv_lock_init()
like below?

Thanks,
	dou.

------------------------->8
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 041096b..b5f3ecb 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -119,7 +119,7 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);

  void __init native_pv_lock_init(void)
  {
-       if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+       if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || !xen_pvspin)
                 static_branch_disable(&virt_spin_lock_key);
  }

diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index aed1460..6b1335a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1323,6 +1323,8 @@ void __init native_smp_prepare_cpus(unsigned int 
max_cpus)
         pr_info("CPU0: ");
         print_cpu_info(&cpu_data(0));

+       native_pv_lock_init();
+
         uv_system_init();

         set_mtrr_aps_delayed_init();
@@ -1350,7 +1352,6 @@ void __init native_smp_prepare_boot_cpu(void)
         /* already set me in cpu_online_mask in boot_cpu_init() */
         cpumask_set_cpu(me, cpu_callout_mask);
         cpu_set_state_online(me);
-       native_pv_lock_init();
  }

  void __init native_smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 5147140..570b2bc 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -236,6 +236,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned 
int max_cpus)
                 xen_raw_printk(m);
                 panic(m);
         }
+       native_pv_lock_init();
+
         xen_init_lock_cpu(0);

         smp_store_boot_cpu_info();
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index e8ab80a..8e0ec79 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -130,7 +130,6 @@ void __init xen_init_spinlocks(void)

         if (!xen_pvspin) {
                 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
-               static_branch_disable(&virt_spin_lock_key);
                 return;
         }
         printk(KERN_DEBUG "xen: PV spinlocks enabled\n");


>> I hope it's useful to you.
>
> It really is, thanks.
>
>
> Juergen
>
>
>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
  2017-10-25  7:35     ` Dou Liyang
@ 2017-10-25  7:51       ` Juergen Gross
  2017-10-25  8:06         ` Dou Liyang
  0 siblings, 1 reply; 9+ messages in thread
From: Juergen Gross @ 2017-10-25  7:51 UTC (permalink / raw)
  To: Dou Liyang, linux-kernel, x86; +Cc: hpa, tglx, mingo, arnd, peterz

On 25/10/17 09:35, Dou Liyang wrote:
> Hi Juergen,
> 
> [...]
>>> I like your original method.
>>> So, I try to fix it by moving the native_pv_lock_init() from
>>>  native_smp_prepare_boot_cpu() to native_smp_prepare_cpus().
>>
>> Hmm, this might work, but the Xen case has to be modified (same for
>> my more general solution), as xen_init_spinlocks() is still modifying
>> the static key too early. And we can't move xen_init_spinlocks() to
>> smp_prepare_cpus() as this would be too late for the alternatives
>> patching.
>>
> 
> Yes, Right.
> 
>> So let me extend your patch a little bit to cover Xen, too.
>>
> 
> Yes!
> 
> How about moving the check of xen_pvspin into native_pv_lock_init()
> like below?

This would leak xen_pvspin to non-Xen code. I'd rather do the
static_branch_disable() in xen_init_lock_cpu() if cpu == 0.


Juergen

> 
> Thanks,
>     dou.
> 
> ------------------------->8
> diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
> index 041096b..b5f3ecb 100644
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -119,7 +119,7 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
> 
>  void __init native_pv_lock_init(void)
>  {
> -       if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> +       if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || !xen_pvspin)
>                 static_branch_disable(&virt_spin_lock_key);
>  }
> 
> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
> index aed1460..6b1335a 100644
> --- a/arch/x86/kernel/smpboot.c
> +++ b/arch/x86/kernel/smpboot.c
> @@ -1323,6 +1323,8 @@ void __init native_smp_prepare_cpus(unsigned int
> max_cpus)
>         pr_info("CPU0: ");
>         print_cpu_info(&cpu_data(0));
> 
> +       native_pv_lock_init();
> +
>         uv_system_init();
> 
>         set_mtrr_aps_delayed_init();
> @@ -1350,7 +1352,6 @@ void __init native_smp_prepare_boot_cpu(void)
>         /* already set me in cpu_online_mask in boot_cpu_init() */
>         cpumask_set_cpu(me, cpu_callout_mask);
>         cpu_set_state_online(me);
> -       native_pv_lock_init();
>  }
> 
>  void __init native_smp_cpus_done(unsigned int max_cpus)
> diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
> index 5147140..570b2bc 100644
> --- a/arch/x86/xen/smp_pv.c
> +++ b/arch/x86/xen/smp_pv.c
> @@ -236,6 +236,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned
> int max_cpus)
>                 xen_raw_printk(m);
>                 panic(m);
>         }
> +       native_pv_lock_init();
> +
>         xen_init_lock_cpu(0);
> 
>         smp_store_boot_cpu_info();
> diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
> index e8ab80a..8e0ec79 100644
> --- a/arch/x86/xen/spinlock.c
> +++ b/arch/x86/xen/spinlock.c
> @@ -130,7 +130,6 @@ void __init xen_init_spinlocks(void)
> 
>         if (!xen_pvspin) {
>                 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
> -               static_branch_disable(&virt_spin_lock_key);
>                 return;
>         }
>         printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
> 
> 
>>> I hope it's useful to you.
>>
>> It really is, thanks.
>>
>>
>> Juergen
>>
>>
>>
> 
> 
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
  2017-10-25  7:51       ` Juergen Gross
@ 2017-10-25  8:06         ` Dou Liyang
  0 siblings, 0 replies; 9+ messages in thread
From: Dou Liyang @ 2017-10-25  8:06 UTC (permalink / raw)
  To: Juergen Gross, linux-kernel, x86; +Cc: hpa, tglx, mingo, arnd, peterz

Hi Juergen,

>>
>> How about moving the check of xen_pvspin into native_pv_lock_init()
>> like below?
>
> This would leak xen_pvspin to non-Xen code. I'd rather do the

Oops, yes, I made the mistake. Please ignore it.

> static_branch_disable() in xen_init_lock_cpu() if cpu == 0.
>

Yes. I will test it. :-)

Thanks,
	dou.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
  2017-10-23 13:49 [PATCH] paravirt/locks: avoid modifying static key before jump_label_init() Juergen Gross
  2017-10-25  4:26 ` Dou Liyang
@ 2017-10-27  8:43 ` Ingo Molnar
  2017-10-27  9:21   ` Juergen Gross
  1 sibling, 1 reply; 9+ messages in thread
From: Ingo Molnar @ 2017-10-27  8:43 UTC (permalink / raw)
  To: Juergen Gross; +Cc: linux-kernel, x86, hpa, tglx, mingo, arnd, peterz


* Juergen Gross <jgross@suse.com> wrote:

> Don't try to set the static virt_spin_lock_key to a value before
> jump_label_init() has been called, as this will result in a WARN().
> 
> Solve the problem by introducing a new lock_init() hook called after
> jump_label_init() instead of doing the call inside of
> smp_prepare_boot_cpu().
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> Based on kernel/git/tip/tip.git locking/core

Just a quick ping: what's the conclusion of the discussion, do we want this patch 
as-is?

Also, it's unclear from the changelog under what circumstances this bug was 
observed and what symptoms there are and how severe the bug - can the warning 
trigger on vanilla v4.14?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
  2017-10-27  8:43 ` Ingo Molnar
@ 2017-10-27  9:21   ` Juergen Gross
  2017-10-27  9:38     ` Dou Liyang
  0 siblings, 1 reply; 9+ messages in thread
From: Juergen Gross @ 2017-10-27  9:21 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: linux-kernel, x86, hpa, tglx, mingo, arnd, peterz, Dou Liyang

On 27/10/17 10:43, Ingo Molnar wrote:
> 
> * Juergen Gross <jgross@suse.com> wrote:
> 
>> Don't try to set the static virt_spin_lock_key to a value before
>> jump_label_init() has been called, as this will result in a WARN().
>>
>> Solve the problem by introducing a new lock_init() hook called after
>> jump_label_init() instead of doing the call inside of
>> smp_prepare_boot_cpu().
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> ---
>> Based on kernel/git/tip/tip.git locking/core
> 
> Just a quick ping: what's the conclusion of the discussion, do we want this patch 
> as-is?

Dou Liyang (CC-ed) suggested another alternative he wanted to test. This
would be much less intrusive. I can send a patch based on his idea in
case he doesn't react in time: he basically suggested moving the call of
native_pv_lock_init() to native_smp_prepare_cpus() - this will need
another small adaption in Xen, but this is really simple.

> Also, it's unclear from the changelog under what circumstances this bug was 
> observed and what symptoms there are and how severe the bug - can the warning 
> trigger on vanilla v4.14?

No, the patch introducing the bug is in the tip tree only.


Juergen

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()
  2017-10-27  9:21   ` Juergen Gross
@ 2017-10-27  9:38     ` Dou Liyang
  0 siblings, 0 replies; 9+ messages in thread
From: Dou Liyang @ 2017-10-27  9:38 UTC (permalink / raw)
  To: Juergen Gross, Ingo Molnar
  Cc: linux-kernel, x86, hpa, tglx, mingo, arnd, peterz

Hi Juergen

At 10/27/2017 05:21 PM, Juergen Gross wrote:
> On 27/10/17 10:43, Ingo Molnar wrote:
>>
>> * Juergen Gross <jgross@suse.com> wrote:
>>
>>> Don't try to set the static virt_spin_lock_key to a value before
>>> jump_label_init() has been called, as this will result in a WARN().
>>>
>>> Solve the problem by introducing a new lock_init() hook called after
>>> jump_label_init() instead of doing the call inside of
>>> smp_prepare_boot_cpu().
>>>
>>> Signed-off-by: Juergen Gross <jgross@suse.com>
>>> ---
>>> Based on kernel/git/tip/tip.git locking/core
>>
>> Just a quick ping: what's the conclusion of the discussion, do we want this patch
>> as-is?
>
> Dou Liyang (CC-ed) suggested another alternative he wanted to test. This
> would be much less intrusive. I can send a patch based on his idea in
> case he doesn't react in time: he basically suggested moving the call of

I am sorry I misunderstood your comments, I thought you were going to 
send a new version, So I said i will test it. ;-)

Now, I understood clearly. I will do it and send a patch for this bug.

Thanks,
	dou

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-10-27  9:38 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-10-23 13:49 [PATCH] paravirt/locks: avoid modifying static key before jump_label_init() Juergen Gross
2017-10-25  4:26 ` Dou Liyang
2017-10-25  6:58   ` Juergen Gross
2017-10-25  7:35     ` Dou Liyang
2017-10-25  7:51       ` Juergen Gross
2017-10-25  8:06         ` Dou Liyang
2017-10-27  8:43 ` Ingo Molnar
2017-10-27  9:21   ` Juergen Gross
2017-10-27  9:38     ` Dou Liyang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).