From: Peter Zijlstra When we detect a hypervisor (!paravirt, see qspinlock paravirt support patches), revert to a simple test-and-set lock to avoid the horrors of queue preemption. Cc: Ingo Molnar Cc: David Vrabel Cc: Oleg Nesterov Cc: Scott J Norton Cc: Paolo Bonzini Cc: Douglas Hatch Cc: Konrad Rzeszutek Wilk Cc: Boris Ostrovsky Cc: "Paul E. McKenney" Cc: Linus Torvalds Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Rik van Riel Cc: Raghavendra K T Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Link: http://lkml.kernel.org/r/1421784755-21945-8-git-send-email-Waiman.Long@hp.com --- arch/x86/include/asm/qspinlock.h | 14 ++++++++++++++ include/asm-generic/qspinlock.h | 7 +++++++ kernel/locking/qspinlock.c | 3 +++ 3 files changed, 24 insertions(+) --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_QSPINLOCK_H #define _ASM_X86_QSPINLOCK_H +#include #include #define queue_spin_unlock queue_spin_unlock @@ -15,6 +16,19 @@ static inline void queue_spin_unlock(str smp_store_release((u8 *)lock, 0); } +#define virt_queue_spin_lock virt_queue_spin_lock + +static inline bool virt_queue_spin_lock(struct qspinlock *lock) +{ + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) + return false; + + while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) + cpu_relax(); + + return true; +} + #include #endif /* _ASM_X86_QSPINLOCK_H */ --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -111,6 +111,13 @@ static inline void queue_spin_unlock_wai cpu_relax(); } +#ifndef virt_queue_spin_lock +static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock) +{ + return false; +} +#endif + /* * Initializier */ --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -259,6 +259,9 @@ void queue_spin_lock_slowpath(struct qsp BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); + if (virt_queue_spin_lock(lock)) + return; + /* * wait for in-progress pending->locked hand-overs *