From mboxrd@z Thu Jan 1 00:00:00 1970 From: Palmer Dabbelt Date: Wed, 16 Mar 2022 16:26:00 -0700 Subject: [OpenRISC] [PATCH 5/5] RISC-V: Move to queued RW locks In-Reply-To: <20220316232600.20419-1-palmer@rivosinc.com> References: <20220316232600.20419-1-palmer@rivosinc.com> Message-ID: <20220316232600.20419-6-palmer@rivosinc.com> List-Id: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: openrisc@lists.librecores.org From: Palmer Dabbelt With the move to fair spinlocks, we might as well move to fair rwlocks. Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/Kbuild | 2 + arch/riscv/include/asm/spinlock.h | 82 +------------------------ arch/riscv/include/asm/spinlock_types.h | 7 +-- 4 files changed, 5 insertions(+), 87 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 5adcbd9b5e88..feb7030cfb6d 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -38,6 +38,7 @@ config RISCV select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU select ARCH_SUPPORTS_HUGETLBFS if MMU select ARCH_USE_MEMTEST + select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 42b1961af1a6..e8714070cbb9 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -4,5 +4,7 @@ generic-y += flat.h generic-y += kvm_para.h generic-y += ticket-lock.h generic-y += ticket-lock-types.h +generic-y += qrwlock.h +generic-y += qrwlock_types.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h index 38089cbdea92..97dfb150d18c 100644 --- a/arch/riscv/include/asm/spinlock.h +++ b/arch/riscv/include/asm/spinlock.h @@ -11,86 +11,6 @@ #include #include #include - -static inline void arch_read_lock(arch_rwlock_t *lock) -{ - int tmp; - - __asm__ __volatile__( - "1: lr.w %1, %0\n" - " bltz %1, 1b\n" - " addi %1, %1, 1\n" - " sc.w %1, %1, %0\n" - " bnez %1, 1b\n" - RISCV_ACQUIRE_BARRIER - : "+A" (lock->lock), "=&r" (tmp) - :: "memory"); -} - -static inline void arch_write_lock(arch_rwlock_t *lock) -{ - int tmp; - - __asm__ __volatile__( - "1: lr.w %1, %0\n" - " bnez %1, 1b\n" - " li %1, -1\n" - " sc.w %1, %1, %0\n" - " bnez %1, 1b\n" - RISCV_ACQUIRE_BARRIER - : "+A" (lock->lock), "=&r" (tmp) - :: "memory"); -} - -static inline int arch_read_trylock(arch_rwlock_t *lock) -{ - int busy; - - __asm__ __volatile__( - "1: lr.w %1, %0\n" - " bltz %1, 1f\n" - " addi %1, %1, 1\n" - " sc.w %1, %1, %0\n" - " bnez %1, 1b\n" - RISCV_ACQUIRE_BARRIER - "1:\n" - : "+A" (lock->lock), "=&r" (busy) - :: "memory"); - - return !busy; -} - -static inline int arch_write_trylock(arch_rwlock_t *lock) -{ - int busy; - - __asm__ __volatile__( - "1: lr.w %1, %0\n" - " bnez %1, 1f\n" - " li %1, -1\n" - " sc.w %1, %1, %0\n" - " bnez %1, 1b\n" - RISCV_ACQUIRE_BARRIER - "1:\n" - : "+A" (lock->lock), "=&r" (busy) - :: "memory"); - - return !busy; -} - -static inline void arch_read_unlock(arch_rwlock_t *lock) -{ - __asm__ __volatile__( - RISCV_RELEASE_BARRIER - " amoadd.w x0, %1, %0\n" - : "+A" (lock->lock) - : "r" (-1) - : "memory"); -} - -static inline void arch_write_unlock(arch_rwlock_t *lock) -{ - smp_store_release(&lock->lock, 0); -} +#include #endif /* _ASM_RISCV_SPINLOCK_H */ diff --git a/arch/riscv/include/asm/spinlock_types.h b/arch/riscv/include/asm/spinlock_types.h index 431ee08e26c4..3779f13706fa 100644 --- a/arch/riscv/include/asm/spinlock_types.h +++ b/arch/riscv/include/asm/spinlock_types.h @@ -11,11 +11,6 @@ #endif #include - -typedef struct { - volatile unsigned int lock; -} arch_rwlock_t; - -#define __ARCH_RW_LOCK_UNLOCKED { 0 } +#include #endif /* _ASM_RISCV_SPINLOCK_TYPES_H */ -- 2.34.1