All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Simek <michal.simek@xilinx.com>
To: linux-kernel@vger.kernel.org, monstr@monstr.eu,
	michal.simek@xilinx.com, git@xilinx.com, arnd@arndb.de
Cc: Stefan Asserhall <stefan.asserhall@xilinx.com>,
	Ingo Molnar <mingo@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Will Deacon <will@kernel.org>
Subject: [PATCH 6/7] microblaze: Implement architecture spinlock
Date: Wed, 12 Feb 2020 16:42:28 +0100	[thread overview]
Message-ID: <ed53474e9ca6736353afd10ebe7ea98e4c6c459e.1581522136.git.michal.simek@xilinx.com> (raw)
In-Reply-To: <cover.1581522136.git.michal.simek@xilinx.com>

From: Stefan Asserhall <stefan.asserhall@xilinx.com>

Using exclusive loads/stores to implement spinlocks which can be used on
SMP systems.

Signed-off-by: Stefan Asserhall <stefan.asserhall@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
---

 arch/microblaze/include/asm/spinlock.h       | 240 +++++++++++++++++++
 arch/microblaze/include/asm/spinlock_types.h |  25 ++
 2 files changed, 265 insertions(+)
 create mode 100644 arch/microblaze/include/asm/spinlock.h
 create mode 100644 arch/microblaze/include/asm/spinlock_types.h

diff --git a/arch/microblaze/include/asm/spinlock.h b/arch/microblaze/include/asm/spinlock.h
new file mode 100644
index 000000000000..0199ea9f7f0f
--- /dev/null
+++ b/arch/microblaze/include/asm/spinlock.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013-2020 Xilinx, Inc.
+ */
+
+#ifndef _ASM_MICROBLAZE_SPINLOCK_H
+#define _ASM_MICROBLAZE_SPINLOCK_H
+
+/*
+ * Unlocked value: 0
+ * Locked value: 1
+ */
+#define arch_spin_is_locked(x)	(READ_ONCE((x)->lock) != 0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %1 to %0 */
+		"1:	lwx	 %0, %1, r0;\n"
+		/* not zero? try again */
+		"	bnei	%0, 1b;\n"
+		/* increment lock by 1 */
+		"	addi	%0, r0, 1;\n"
+		/* attempt store */
+		"	swx	%0, %1, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%0, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%0, 1b;\n"
+		/* Outputs: temp variable for load result */
+		: "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&lock->lock)
+		: "cc", "memory"
+	);
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	unsigned long prev, tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %2 to %0 */
+		"1:	lwx	 %0, %2, r0;\n"
+		/* not zero? clear reservation */
+		"	bneid	%0, 2f;\n"
+		/* increment lock by one if lwx was sucessful */
+		"	addi	%1, r0, 1;\n"
+		/* attempt store */
+		"	swx	%1, %2, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%1, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%1, 1b;\n"
+		"2:"
+		/* Outputs: temp variable for load result */
+		: "=&r" (prev), "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&lock->lock)
+		: "cc", "memory"
+	);
+
+	return (prev == 0);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %1 to %0 */
+		"1:	lwx	%0, %1, r0;\n"
+		/* clear */
+		"	swx	r0, %1, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%0, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%0, 1b;\n"
+		/* Outputs: temp variable for load result */
+		: "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&lock->lock)
+		: "cc", "memory"
+	);
+}
+
+/* RWLOCKS */
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %1 to %0 */
+		"1:	lwx	 %0, %1, r0;\n"
+		/* not zero? try again */
+		"	bneid	%0, 1b;\n"
+		/* set tmp to -1 */
+		"	addi	%0, r0, -1;\n"
+		/* attempt store */
+		"	swx	%0, %1, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%0, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%0, 1b;\n"
+		/* Outputs: temp variable for load result */
+		: "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&rw->lock)
+		: "cc", "memory"
+	);
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+	unsigned long prev, tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %1 to tmp */
+		"1:	lwx	%0, %2, r0;\n"
+		/* not zero? abort */
+		"	bneid	%0, 2f;\n"
+		/* set tmp to -1 */
+		"	addi	%1, r0, -1;\n"
+		/* attempt store */
+		"	swx	%1, %2, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%1, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%1, 1b;\n"
+		"2:"
+		/* Outputs: temp variable for load result */
+		: "=&r" (prev), "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&rw->lock)
+		: "cc", "memory"
+	);
+	/* prev value should be zero and MSR should be clear */
+	return (prev == 0);
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %1 to %0 */
+		"1:	lwx	%0, %1, r0;\n"
+		/* clear */
+		"	swx	r0, %1, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%0, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%0, 1b;\n"
+		/* Outputs: temp variable for load result */
+		: "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&rw->lock)
+		: "cc", "memory"
+	);
+}
+
+/* Read locks */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %1 to %0 */
+		"1:	lwx	%0, %1, r0;\n"
+		/* < 0 (WRITE LOCK active) try again */
+		"	bltid	%0, 1b;\n"
+		/* increment lock by 1 if lwx was sucessful */
+		"	addi	%0, %0, 1;\n"
+		/* attempt store */
+		"	swx	%0, %1, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%0, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%0, 1b;\n"
+		/* Outputs: temp variable for load result */
+		: "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&rw->lock)
+		: "cc", "memory"
+	);
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %1 to tmp */
+		"1:	lwx	%0, %1, r0;\n"
+		/* tmp = tmp - 1 */
+		"	addi	%0, %0, -1;\n"
+		/* attempt store */
+		"	swx	%0, %1, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%0, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%0, 1b;\n"
+		/* Outputs: temp variable for load result */
+		: "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&rw->lock)
+		: "cc", "memory"
+	);
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+	unsigned long prev, tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %1 to %0 */
+		"1:	lwx	%0, %2, r0;\n"
+		/* < 0 bail, release lock */
+		"	bltid	%0, 2f;\n"
+		/* increment lock by 1 */
+		"	addi	%1, %0, 1;\n"
+		/* attempt store */
+		"	swx	%1, %2, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%1, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%1, 1b;\n"
+		"2:"
+		/* Outputs: temp variable for load result */
+		: "=&r" (prev), "=&r" (tmp)
+		/* Inputs: lock address */
+		: "r" (&rw->lock)
+		: "cc", "memory"
+	);
+	return (prev >= 0);
+}
+
+#endif /* _ASM_MICROBLAZE_SPINLOCK_H */
diff --git a/arch/microblaze/include/asm/spinlock_types.h b/arch/microblaze/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..ffd3588f6546
--- /dev/null
+++ b/arch/microblaze/include/asm/spinlock_types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013-2020 Xilinx, Inc.
+ */
+
+#ifndef __ASM_MICROBLAZE_SPINLOCK_TYPES_H
+#define __ASM_MICROBLAZE_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+	volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 }
+
+typedef struct {
+	volatile signed int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+
+#endif
-- 
2.25.0


  parent reply	other threads:[~2020-02-12 15:42 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-12 15:42 [PATCH 0/7] microblaze: Define SMP safe operations Michal Simek
2020-02-12 15:42 ` [PATCH 1/7] microblaze: timer: Don't use cpu timer setting Michal Simek
2020-02-12 15:42 ` [PATCH 2/7] microblaze: Make cpuinfo structure SMP aware Michal Simek
2020-02-12 20:42   ` Arnd Bergmann
2020-02-12 15:42 ` [PATCH 3/7] microblaze: Define SMP safe bit operations Michal Simek
2020-02-12 15:53   ` Peter Zijlstra
2020-02-13  8:42     ` Michal Simek
2020-02-13  9:01       ` Stefan Asserhall
2020-02-13  9:11         ` Peter Zijlstra
2020-02-13  9:24           ` Stefan Asserhall
2020-02-12 15:42 ` [PATCH 4/7] microblaze: Add SMP implementation of xchg and cmpxchg Michal Simek
2020-02-12 15:42 ` [PATCH 5/7] microblaze: Remove disabling IRQ while pte_update() run Michal Simek
2020-02-12 15:42 ` Michal Simek [this message]
2020-02-12 15:47   ` [PATCH 6/7] microblaze: Implement architecture spinlock Peter Zijlstra
2020-02-13  7:51     ` Michal Simek
2020-02-13  8:00       ` Peter Zijlstra
2020-02-12 15:42 ` [PATCH 7/7] microblaze: Do atomic operations by using exclusive ops Michal Simek
2020-02-12 15:55   ` Peter Zijlstra
2020-02-13  8:06     ` Michal Simek
2020-02-13  8:58       ` Peter Zijlstra
2020-02-13  9:16         ` Peter Zijlstra
2020-02-13 10:04           ` Will Deacon
2020-02-13 10:14             ` Stefan Asserhall
2020-02-13 10:20               ` Will Deacon
2020-02-13 10:15             ` Peter Zijlstra
2020-02-13 11:34         ` Boqun Feng
2020-02-13 11:38           ` Boqun Feng
2020-02-13 13:51             ` Andrea Parri
2020-02-13 14:01               ` Andrea Parri
2020-02-12 16:08 ` [PATCH 0/7] microblaze: Define SMP safe operations Peter Zijlstra
2020-02-12 16:38   ` Peter Zijlstra
2020-02-13  7:49   ` Michal Simek
2020-02-13  8:11     ` Peter Zijlstra
2020-02-13  8:12       ` Michal Simek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ed53474e9ca6736353afd10ebe7ea98e4c6c459e.1581522136.git.michal.simek@xilinx.com \
    --to=michal.simek@xilinx.com \
    --cc=arnd@arndb.de \
    --cc=git@xilinx.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=monstr@monstr.eu \
    --cc=peterz@infradead.org \
    --cc=stefan.asserhall@xilinx.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.