From 655d3a5d48a494a5674cf57454bf3e1f36b6eb83 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 7 Jul 2020 22:29:20 -0400 Subject: [PATCH 9/9] powerpc/pseries: Fixup Signed-off-by: Waiman Long --- arch/powerpc/include/asm/qspinlock.h | 22 ---------------------- arch/powerpc/platforms/pseries/Kconfig | 1 + arch/powerpc/platforms/pseries/setup.c | 6 +----- 3 files changed, 2 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index b752d34517b3..1fa724d27a2d 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -10,7 +10,6 @@ #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __pv_queued_spin_unlock(struct qspinlock *lock); static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { @@ -20,15 +19,6 @@ static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u3 __pv_queued_spin_lock_slowpath(lock, val); } -#define queued_spin_unlock queued_spin_unlock -static inline void queued_spin_unlock(struct qspinlock *lock) -{ - if (!is_shared_processor()) - smp_store_release(&lock->locked, 0); - else - __pv_queued_spin_unlock(lock); -} - #else extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); #endif @@ -72,18 +62,6 @@ static __always_inline void pv_wait(u8 *ptr, u8 val) */ } -static __always_inline void pv_kick(int cpu) -{ - prod_cpu(cpu); -} - -extern void __pv_init_lock_hash(void); - -static inline void pv_spinlocks_init(void) -{ - __pv_init_lock_hash(); -} - #endif #include diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 756e727b383f..1e3bbe27d664 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -33,6 +33,7 @@ config PPC_SPLPAR depends on PPC_PSERIES bool "Support for shared-processor logical partitions" select PARAVIRT_SPINLOCKS if PPC_QUEUED_SPINLOCKS + select PARAVIRT_QSPINLOCKS_LITE if PPC_QUEUED_SPINLOCKS help Enabling this option will make the kernel run more efficiently on logically-partitioned pSeries systems which use shared diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 747a203d9453..2db8469e475f 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -771,12 +771,8 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); - if (lppaca_shared_proc(get_lppaca())) { + if (lppaca_shared_proc(get_lppaca())) static_branch_enable(&shared_processor); -#ifdef CONFIG_PARAVIRT_SPINLOCKS - pv_spinlocks_init(); -#endif - } ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; -- 2.18.1