linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Michal Simek <michal.simek@xilinx.com>
To: linux-kernel@vger.kernel.org, monstr@monstr.eu,
	michal.simek@xilinx.com, git@xilinx.com, arnd@arndb.de
Cc: Stefan Asserhall load and store <stefan.asserhall@xilinx.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Will Deacon <will@kernel.org>
Subject: [PATCH 7/7] microblaze: Do atomic operations by using exclusive ops
Date: Wed, 12 Feb 2020 16:42:29 +0100	[thread overview]
Message-ID: <ba3047649af07dadecf1a52e7d815db8f068eb24.1581522136.git.michal.simek@xilinx.com> (raw)
In-Reply-To: <cover.1581522136.git.michal.simek@xilinx.com>

From: Stefan Asserhall load and store <stefan.asserhall@xilinx.com>

Implement SMP aware atomic operations.

Signed-off-by: Stefan Asserhall <stefan.asserhall@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
---

 arch/microblaze/include/asm/atomic.h | 265 +++++++++++++++++++++++++--
 1 file changed, 253 insertions(+), 12 deletions(-)

diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h
index 41e9aff23a62..522d704fad63 100644
--- a/arch/microblaze/include/asm/atomic.h
+++ b/arch/microblaze/include/asm/atomic.h
@@ -1,28 +1,269 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013-2020 Xilinx, Inc.
+ */
+
 #ifndef _ASM_MICROBLAZE_ATOMIC_H
 #define _ASM_MICROBLAZE_ATOMIC_H
 
+#include <linux/types.h>
 #include <asm/cmpxchg.h>
-#include <asm-generic/atomic.h>
-#include <asm-generic/atomic64.h>
+
+#define ATOMIC_INIT(i)	{ (i) }
+
+#define atomic_read(v)	READ_ONCE((v)->counter)
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+	int result, tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %2 to %0 */
+		"1:	lwx	%0, %2, r0;\n"
+		/* attempt store */
+		"	swx	%3, %2, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%1, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%1, 1b;\n"
+		/* Outputs: result value */
+		: "=&r" (result), "=&r" (tmp)
+		/* Inputs: counter address */
+		: "r" (&v->counter), "r" (i)
+		: "cc", "memory"
+	);
+}
+#define atomic_set	atomic_set
+
+/* Atomically perform op with v->counter and i, return result */
+#define ATOMIC_OP_RETURN(op, asm)					\
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
+{									\
+	int result, tmp;						\
+									\
+	__asm__ __volatile__ (						\
+		/* load conditional address in %2 to %0 */		\
+		"1:	lwx	%0, %2, r0;\n"				\
+		/* perform operation and save it to result */		\
+		#asm		" %0, %3, %0;\n"			\
+		/* attempt store */					\
+		"	swx	%0, %2, r0;\n"				\
+		/* checking msr carry flag */				\
+		"	addic	%1, r0, 0;\n"				\
+		/* store failed (MSR[C] set)? try again */		\
+		"	bnei	%1, 1b;\n"				\
+		/* Outputs: result value */				\
+		: "=&r" (result), "=&r" (tmp)				\
+		/* Inputs: counter address */				\
+		: "r"   (&v->counter), "r" (i)				\
+		: "cc", "memory"					\
+	);								\
+									\
+	return result;							\
+}									\
+									\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	atomic_##op##_return_relaxed(i, v);				\
+}
+
+/* Atomically perform op with v->counter and i, return orig v->counter */
+#define ATOMIC_FETCH_OP_RELAXED(op, asm)				\
+static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
+{									\
+	int old, tmp;							\
+									\
+	__asm__ __volatile__ (						\
+		/* load conditional address in %2 to %0 */		\
+		"1:	lwx	%0, %2, r0;\n"				\
+		/* perform operation and save it to tmp */		\
+		#asm		" %1, %3, %0;\n"			\
+		/* attempt store */					\
+		"	swx	%1, %2, r0;\n"				\
+		/* checking msr carry flag */				\
+		"	addic	%1, r0, 0;\n"				\
+		/* store failed (MSR[C] set)? try again */		\
+		"	bnei	%1, 1b;\n"				\
+		/* Outputs: old value */				\
+		: "=&r" (old), "=&r" (tmp)				\
+		/* Inputs: counter address */				\
+		: "r"   (&v->counter), "r" (i)				\
+		: "cc", "memory"					\
+	);								\
+									\
+	return old;							\
+}
+
+#define ATOMIC_OPS(op, asm) \
+	ATOMIC_FETCH_OP_RELAXED(op, asm) \
+	ATOMIC_OP_RETURN(op, asm)
+
+ATOMIC_OPS(and, and)
+#define atomic_and			atomic_and
+#define atomic_and_return_relaxed	atomic_and_return_relaxed
+#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
+
+ATOMIC_OPS(add, add)
+#define atomic_add			atomic_add
+#define atomic_add_return_relaxed	atomic_add_return_relaxed
+#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
+
+ATOMIC_OPS(xor, xor)
+#define atomic_xor			atomic_xor
+#define atomic_xor_return_relaxed	atomic_xor_return_relaxed
+#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
+
+ATOMIC_OPS(or, or)
+#define atomic_or			atomic_or
+#define atomic_or_return_relaxed	atomic_or_return_relaxed
+#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
+
+ATOMIC_OPS(sub, rsub)
+#define atomic_sub			atomic_sub
+#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
+#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
+
+static inline int atomic_inc_return_relaxed(atomic_t *v)
+{
+	int result, tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %2 to %0 */
+		"1:	lwx	%0, %2, r0;\n"
+		/* increment counter by 1 */
+		"	addi	%0, %0, 1;\n"
+		/* attempt store */
+		"	swx	%0, %2, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%1, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%1, 1b;\n"
+		/* Outputs: result value */
+		: "=&r" (result), "=&r" (tmp)
+		/* Inputs: counter address */
+		: "r"   (&v->counter)
+		: "cc", "memory"
+	);
+
+	return result;
+}
+#define atomic_inc_return_relaxed	atomic_inc_return_relaxed
+
+#define atomic_inc_and_test(v)	(atomic_inc_return(v) == 0)
+
+static inline int atomic_dec_return(atomic_t *v)
+{
+	int result, tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %2 to %0 */
+		"1:	lwx	%0, %2, r0;\n"
+		/* increment counter by -1 */
+		"	addi	%0, %0, -1;\n"
+		/* attempt store */
+		"	swx	%0, %2, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%1, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%1, 1b;\n"
+		/* Outputs: result value */
+		: "=&r" (result), "=&r" (tmp)
+		/* Inputs: counter address */
+		: "r"   (&v->counter)
+		: "cc", "memory"
+	);
+
+	return result;
+}
+#define atomic_dec_return	atomic_dec_return
+
+static inline void atomic_dec(atomic_t *v)
+{
+	atomic_dec_return(v);
+}
+#define atomic_dec	atomic_dec
+
+#define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0)
+
+#define atomic_cmpxchg(v, o, n)	(cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new)	(xchg(&((v)->counter), new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+	int result, tmp;
+
+	__asm__ __volatile__ (
+		/* load conditional address in %2 to %0 */
+		"1: lwx	 %0, %2, r0;\n"
+		/* compare loaded value with old value*/
+		"   cmp   %1, %0, %3;\n"
+		/* equal to u, don't increment */
+		"   beqid %1, 2f;\n"
+		/* increment counter by i */
+		"   add   %1, %0, %4;\n"
+		/* attempt store of new value*/
+		"   swx   %1, %2, r0;\n"
+		/* checking msr carry flag */
+		"   addic %1, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"   bnei  %1, 1b;\n"
+		"2:"
+		/* Outputs: result value */
+		: "=&r" (result), "=&r" (tmp)
+		/* Inputs: counter address, old, new */
+		: "r"   (&v->counter), "r" (u), "r" (a)
+		: "cc", "memory"
+	);
+
+	return result;
+}
 
 /*
  * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
  */
 static inline int atomic_dec_if_positive(atomic_t *v)
 {
-	unsigned long flags;
-	int res;
+	int result, tmp;
 
-	local_irq_save(flags);
-	res = v->counter - 1;
-	if (res >= 0)
-		v->counter = res;
-	local_irq_restore(flags);
+	__asm__ __volatile__ (
+		/* load conditional address in %2 to %0 */
+		"1:	lwx	%0, %2, r0;\n"
+		/* decrement counter by 1*/
+		"	addi	%0, %0, -1;\n"
+		/* if < 0 abort (*v was <= 0)*/
+		"	blti	%0, 2f;\n"
+		/* attempt store of new value*/
+		"	swx	%0, %2, r0;\n"
+		/* checking msr carry flag */
+		"	addic	%1, r0, 0;\n"
+		/* store failed (MSR[C] set)? try again */
+		"	bnei	%1, 1b;\n"
+		"2: "
+		/* Outputs: result value */
+		: "=&r" (result), "=&r" (tmp)
+		/* Inputs: counter address */
+		: "r"   (&v->counter)
+		: "cc", "memory"
+	);
 
-	return res;
+	return result;
 }
-#define atomic_dec_if_positive atomic_dec_if_positive
+#define atomic_dec_if_positive	atomic_dec_if_positive
+
+#define atomic_add_negative(i, v)	(atomic_add_return(i, v) < 0)
+
+#include <asm-generic/atomic64.h>
 
 #endif /* _ASM_MICROBLAZE_ATOMIC_H */
-- 
2.25.0


  parent reply	other threads:[~2020-02-12 15:42 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-12 15:42 [PATCH 0/7] microblaze: Define SMP safe operations Michal Simek
2020-02-12 15:42 ` [PATCH 1/7] microblaze: timer: Don't use cpu timer setting Michal Simek
2020-02-12 15:42 ` [PATCH 2/7] microblaze: Make cpuinfo structure SMP aware Michal Simek
2020-02-12 20:42   ` Arnd Bergmann
2020-02-12 15:42 ` [PATCH 3/7] microblaze: Define SMP safe bit operations Michal Simek
2020-02-12 15:53   ` Peter Zijlstra
2020-02-13  8:42     ` Michal Simek
2020-02-13  9:01       ` Stefan Asserhall
2020-02-13  9:11         ` Peter Zijlstra
2020-02-13  9:24           ` Stefan Asserhall
2020-02-12 15:42 ` [PATCH 4/7] microblaze: Add SMP implementation of xchg and cmpxchg Michal Simek
2020-02-12 15:42 ` [PATCH 5/7] microblaze: Remove disabling IRQ while pte_update() run Michal Simek
2020-02-12 15:42 ` [PATCH 6/7] microblaze: Implement architecture spinlock Michal Simek
2020-02-12 15:47   ` Peter Zijlstra
2020-02-13  7:51     ` Michal Simek
2020-02-13  8:00       ` Peter Zijlstra
2020-02-12 15:42 ` Michal Simek [this message]
2020-02-12 15:55   ` [PATCH 7/7] microblaze: Do atomic operations by using exclusive ops Peter Zijlstra
2020-02-13  8:06     ` Michal Simek
2020-02-13  8:58       ` Peter Zijlstra
2020-02-13  9:16         ` Peter Zijlstra
2020-02-13 10:04           ` Will Deacon
2020-02-13 10:14             ` Stefan Asserhall
2020-02-13 10:20               ` Will Deacon
2020-02-13 10:15             ` Peter Zijlstra
2020-02-13 11:34         ` Boqun Feng
2020-02-13 11:38           ` Boqun Feng
2020-02-13 13:51             ` Andrea Parri
2020-02-13 14:01               ` Andrea Parri
2020-02-12 16:08 ` [PATCH 0/7] microblaze: Define SMP safe operations Peter Zijlstra
2020-02-12 16:38   ` Peter Zijlstra
2020-02-13  7:49   ` Michal Simek
2020-02-13  8:11     ` Peter Zijlstra
2020-02-13  8:12       ` Michal Simek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ba3047649af07dadecf1a52e7d815db8f068eb24.1581522136.git.michal.simek@xilinx.com \
    --to=michal.simek@xilinx.com \
    --cc=arnd@arndb.de \
    --cc=boqun.feng@gmail.com \
    --cc=git@xilinx.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=monstr@monstr.eu \
    --cc=peterz@infradead.org \
    --cc=stefan.asserhall@xilinx.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).