linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Guo Ren <ren_guo@c-sky.com>
To: akpm@linux-foundation.org, arnd@arndb.de,
	daniel.lezcano@linaro.org, davem@davemloft.net,
	gregkh@linuxfoundation.org, jason@lakedaemon.net,
	marc.zyngier@arm.com, mark.rutland@arm.com,
	mchehab+samsung@kernel.org, peterz@infradead.org,
	robh@kernel.org, robh+dt@kernel.org, tglx@linutronix.de
Cc: green.hu@gmail.com, linux-kernel@vger.kernel.org,
	linux-arch@vger.kernel.org, devicetree@vger.kernel.org,
	c-sky_gcc_upstream@c-sky.com, Guo Ren <ren_guo@c-sky.com>
Subject: [PATCH V7 11/20] csky: Atomic operations
Date: Fri,  5 Oct 2018 13:41:53 +0800	[thread overview]
Message-ID: <343d5d408f3346c5de6db7e0cc98719bcd65d337.1538715563.git.ren_guo@c-sky.com> (raw)
In-Reply-To: <cover.1538715563.git.ren_guo@c-sky.com>
In-Reply-To: <cover.1538715563.git.ren_guo@c-sky.com>

This patch adds atomic, cmpxchg, spinlock files.

Changlog:
 - SMP supported
 - ticklock supported
 - queue-rwlock supported

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
---
 arch/csky/include/asm/atomic.h         | 212 +++++++++++++++++++++++++
 arch/csky/include/asm/cmpxchg.h        |  73 +++++++++
 arch/csky/include/asm/spinlock.h       | 274 +++++++++++++++++++++++++++++++++
 arch/csky/include/asm/spinlock_types.h |  37 +++++
 arch/csky/kernel/atomic.S              |  87 +++++++++++
 5 files changed, 683 insertions(+)
 create mode 100644 arch/csky/include/asm/atomic.h
 create mode 100644 arch/csky/include/asm/cmpxchg.h
 create mode 100644 arch/csky/include/asm/spinlock.h
 create mode 100644 arch/csky/include/asm/spinlock_types.h
 create mode 100644 arch/csky/kernel/atomic.S

diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h
new file mode 100644
index 0000000..e369d73
--- /dev/null
+++ b/arch/csky/include/asm/atomic.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_ATOMIC_H
+#define __ASM_CSKY_ATOMIC_H
+
+#include <linux/version.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_CPU_HAS_LDSTEX
+
+#define __atomic_add_unless __atomic_add_unless
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+	unsigned long tmp, ret;
+
+	smp_mb();
+
+	asm volatile (
+	"1:	ldex.w		%0, (%3) \n"
+	"	mov		%1, %0   \n"
+	"	cmpne		%0, %4   \n"
+	"	bf		2f	 \n"
+	"	add		%0, %2   \n"
+	"	stex.w		%0, (%3) \n"
+	"	bez		%0, 1b   \n"
+	"2:				 \n"
+		: "=&r" (tmp), "=&r" (ret)
+		: "r" (a), "r"(&v->counter), "r"(u)
+		: "memory");
+
+	if (ret != u)
+		smp_mb();
+
+	return ret;
+}
+
+#define ATOMIC_OP(op, c_op)						\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long tmp;						\
+									\
+	asm volatile (							\
+	"1:	ldex.w		%0, (%2) \n"				\
+	"	" #op "		%0, %1   \n"				\
+	"	stex.w		%0, (%2) \n"				\
+	"	bez		%0, 1b   \n"				\
+		: "=&r" (tmp)						\
+		: "r" (i), "r"(&v->counter)				\
+		: "memory");						\
+}
+
+#define ATOMIC_OP_RETURN(op, c_op)					\
+static inline int atomic_##op##_return(int i, atomic_t *v)		\
+{									\
+	unsigned long tmp, ret;						\
+									\
+	smp_mb();							\
+	asm volatile (							\
+	"1:	ldex.w		%0, (%3) \n"				\
+	"	" #op "		%0, %2   \n"				\
+	"	mov		%1, %0   \n"				\
+	"	stex.w		%0, (%3) \n"				\
+	"	bez		%0, 1b   \n"				\
+		: "=&r" (tmp), "=&r" (ret)				\
+		: "r" (i), "r"(&v->counter)				\
+		: "memory");						\
+	smp_mb();							\
+									\
+	return ret;							\
+}
+
+#define ATOMIC_FETCH_OP(op, c_op)					\
+static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long tmp, ret;						\
+									\
+	smp_mb();							\
+	asm volatile (							\
+	"1:	ldex.w		%0, (%3) \n"				\
+	"	mov		%1, %0   \n"				\
+	"	" #op "		%0, %2   \n"				\
+	"	stex.w		%0, (%3) \n"				\
+	"	bez		%0, 1b   \n"				\
+		: "=&r" (tmp), "=&r" (ret)				\
+		: "r" (i), "r"(&v->counter)				\
+		: "memory");						\
+	smp_mb();							\
+									\
+	return ret;							\
+}
+
+#else /* CONFIG_CPU_HAS_LDSTEX */
+
+#include <linux/irqflags.h>
+
+#define __atomic_add_unless __atomic_add_unless
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+	unsigned long tmp, ret, flags;
+
+	raw_local_irq_save(flags);
+
+	asm volatile (
+	"	ldw		%0, (%3) \n"
+	"	mov		%1, %0   \n"
+	"	cmpne		%0, %4   \n"
+	"	bf		2f	 \n"
+	"	add		%0, %2   \n"
+	"	stw		%0, (%3) \n"
+	"2:				 \n"
+		: "=&r" (tmp), "=&r" (ret)
+		: "r" (a), "r"(&v->counter), "r"(u)
+		: "memory");
+
+	raw_local_irq_restore(flags);
+
+	return ret;
+}
+
+#define ATOMIC_OP(op, c_op)						\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long tmp, flags;					\
+									\
+	raw_local_irq_save(flags);					\
+									\
+	asm volatile (							\
+	"	ldw		%0, (%2) \n"				\
+	"	" #op "		%0, %1   \n"				\
+	"	stw		%0, (%2) \n"				\
+		: "=&r" (tmp)						\
+		: "r" (i), "r"(&v->counter)				\
+		: "memory");						\
+									\
+	raw_local_irq_restore(flags);					\
+}
+
+#define ATOMIC_OP_RETURN(op, c_op)					\
+static inline int atomic_##op##_return(int i, atomic_t *v)		\
+{									\
+	unsigned long tmp, ret, flags;					\
+									\
+	raw_local_irq_save(flags);					\
+									\
+	asm volatile (							\
+	"	ldw		%0, (%3) \n"				\
+	"	" #op "		%0, %2   \n"				\
+	"	stw		%0, (%3) \n"				\
+	"	mov		%1, %0   \n"				\
+		: "=&r" (tmp), "=&r" (ret)				\
+		: "r" (i), "r"(&v->counter)				\
+		: "memory");						\
+									\
+	raw_local_irq_restore(flags);					\
+									\
+	return ret;							\
+}
+
+#define ATOMIC_FETCH_OP(op, c_op)					\
+static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long tmp, ret, flags;					\
+									\
+	raw_local_irq_save(flags);					\
+									\
+	asm volatile (							\
+	"	ldw		%0, (%3) \n"				\
+	"	mov		%1, %0   \n"				\
+	"	" #op "		%0, %2   \n"				\
+	"	stw		%0, (%3) \n"				\
+		: "=&r" (tmp), "=&r" (ret)				\
+		: "r" (i), "r"(&v->counter)				\
+		: "memory");						\
+									\
+	raw_local_irq_restore(flags);					\
+									\
+	return ret;							\
+}
+
+#endif /* CONFIG_CPU_HAS_LDSTEX */
+
+#define atomic_add_return atomic_add_return
+ATOMIC_OP_RETURN(add, +)
+#define atomic_sub_return atomic_sub_return
+ATOMIC_OP_RETURN(sub, -)
+
+#define atomic_fetch_add atomic_fetch_add
+ATOMIC_FETCH_OP(add, +)
+#define atomic_fetch_sub atomic_fetch_sub
+ATOMIC_FETCH_OP(sub, -)
+#define atomic_fetch_and atomic_fetch_and
+ATOMIC_FETCH_OP(and, &)
+#define atomic_fetch_or atomic_fetch_or
+ATOMIC_FETCH_OP(or, |)
+#define atomic_fetch_xor atomic_fetch_xor
+ATOMIC_FETCH_OP(xor, ^)
+
+#define atomic_and atomic_and
+ATOMIC_OP(and, &)
+#define atomic_or atomic_or
+ATOMIC_OP(or, |)
+#define atomic_xor atomic_xor
+ATOMIC_OP(xor, ^)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#include <asm-generic/atomic.h>
+
+#endif /* __ASM_CSKY_ATOMIC_H */
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
new file mode 100644
index 0000000..8922453
--- /dev/null
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CMPXCHG_H
+#define __ASM_CSKY_CMPXCHG_H
+
+#ifdef CONFIG_CPU_HAS_LDSTEX
+#include <asm/barrier.h>
+
+extern void __bad_xchg(void);
+
+#define __xchg(new, ptr, size)					\
+({								\
+	__typeof__(ptr) __ptr = (ptr);				\
+	__typeof__(new) __new = (new);				\
+	__typeof__(*(ptr)) __ret;				\
+	unsigned long tmp;					\
+	switch (size) {						\
+	case 4:							\
+		smp_mb();					\
+		asm volatile (					\
+		"1:	ldex.w		%0, (%3) \n"		\
+		"	mov		%1, %2   \n"		\
+		"	stex.w		%1, (%3) \n"		\
+		"	bez		%1, 1b   \n"		\
+			: "=&r" (__ret), "=&r" (tmp)		\
+			: "r" (__new), "r"(__ptr)		\
+			:);					\
+		smp_mb();					\
+		break;						\
+	default:						\
+		__bad_xchg();					\
+	}							\
+	__ret;							\
+})
+
+#define xchg(ptr, x)	(__xchg((x), (ptr), sizeof(*(ptr))))
+
+#define __cmpxchg(ptr, old, new, size)				\
+({								\
+	__typeof__(ptr) __ptr = (ptr);				\
+	__typeof__(new) __new = (new);				\
+	__typeof__(new) __tmp;					\
+	__typeof__(old) __old = (old);				\
+	__typeof__(*(ptr)) __ret;				\
+	switch (size) {						\
+	case 4:							\
+		smp_mb();					\
+		asm volatile (					\
+		"1:	ldex.w		%0, (%3) \n"		\
+		"	cmpne		%0, %4   \n"		\
+		"	bt		2f       \n"		\
+		"	mov		%1, %2   \n"		\
+		"	stex.w		%1, (%3) \n"		\
+		"	bez		%1, 1b   \n"		\
+		"2:				 \n"		\
+			: "=&r" (__ret), "=&r" (__tmp)		\
+			: "r" (__new), "r"(__ptr), "r"(__old)	\
+			:);					\
+		smp_mb();					\
+		break;						\
+	default:						\
+		__bad_xchg();					\
+	}							\
+	__ret;							\
+})
+
+#define cmpxchg(ptr, o, n) \
+	(__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
+#else
+#include <asm-generic/cmpxchg.h>
+#endif
+
+#endif /* __ASM_CSKY_CMPXCHG_H */
diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
new file mode 100644
index 0000000..0474603
--- /dev/null
+++ b/arch/csky/include/asm/spinlock.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_H
+#define __ASM_CSKY_SPINLOCK_H
+
+#include <linux/spinlock_types.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_QUEUED_RWLOCKS
+
+/*
+ * Ticket-based spin-locking.
+ */
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	arch_spinlock_t lockval;
+	u32 ticket_next = 1 << TICKET_NEXT;
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	asm volatile (
+		"1:	ldex.w		%0, (%2) \n"
+		"	mov		%1, %0	 \n"
+		"	add		%0, %3	 \n"
+		"	stex.w		%0, (%2) \n"
+		"	bez		%0, 1b   \n"
+		: "=&r" (tmp), "=&r" (lockval)
+		: "r"(p), "r"(ticket_next)
+		: "cc");
+
+	while (lockval.tickets.next != lockval.tickets.owner)
+		lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
+
+	smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	u32 tmp, contended, res;
+	u32 ticket_next = 1 << TICKET_NEXT;
+	u32 *p = &lock->lock;
+
+	do {
+		asm volatile (
+		"	ldex.w		%0, (%3)   \n"
+		"	movi		%2, 1	   \n"
+		"	rotli		%1, %0, 16 \n"
+		"	cmpne		%1, %0     \n"
+		"	bt		1f         \n"
+		"	movi		%2, 0	   \n"
+		"	add		%0, %0, %4 \n"
+		"	stex.w		%0, (%3)   \n"
+		"1:				   \n"
+		: "=&r" (res), "=&r" (tmp), "=&r" (contended)
+		: "r"(p), "r"(ticket_next)
+		: "cc");
+	} while (!res);
+
+	if (!contended)
+		smp_mb();
+
+	return !contended;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	smp_mb();
+	lock->tickets.owner++;
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.tickets.owner == lock.tickets.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+	return !arch_spin_value_unlocked(READ_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+	struct __raw_tickets tickets = READ_ONCE(lock->tickets);
+
+	return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended	arch_spin_is_contended
+
+#include <asm/qrwlock.h>
+
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock()	smp_mb()
+
+#else /* CONFIG_QUEUED_RWLOCKS */
+
+/*
+ * Test-and-set spin-locking.
+ */
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	asm volatile (
+		"1:	ldex.w		%0, (%1) \n"
+		"	bnez		%0, 1b   \n"
+		"	movi		%0, 1    \n"
+		"	stex.w		%0, (%1) \n"
+		"	bez		%0, 1b   \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+	smp_mb();
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	smp_mb();
+	asm volatile (
+		"	movi		%0, 0    \n"
+		"	stw		%0, (%1) \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	asm volatile (
+		"1:	ldex.w		%0, (%1) \n"
+		"	bnez		%0, 2f   \n"
+		"	movi		%0, 1    \n"
+		"	stex.w		%0, (%1) \n"
+		"	bez		%0, 1b   \n"
+		"	movi		%0, 0    \n"
+		"2:				 \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+
+	if (!tmp)
+		smp_mb();
+
+	return !tmp;
+}
+
+#define arch_spin_is_locked(x)	(READ_ONCE((x)->lock) != 0)
+
+/*
+ * read lock/unlock/trylock
+ */
+static inline void arch_read_lock(arch_rwlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	asm volatile (
+		"1:	ldex.w		%0, (%1) \n"
+		"	blz		%0, 1b   \n"
+		"	addi		%0, 1    \n"
+		"	stex.w		%0, (%1) \n"
+		"	bez		%0, 1b   \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+	smp_mb();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	smp_mb();
+	asm volatile (
+		"1:	ldex.w		%0, (%1) \n"
+		"	subi		%0, 1    \n"
+		"	stex.w		%0, (%1) \n"
+		"	bez		%0, 1b   \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	asm volatile (
+		"1:	ldex.w		%0, (%1) \n"
+		"	blz		%0, 2f   \n"
+		"	addi		%0, 1    \n"
+		"	stex.w		%0, (%1) \n"
+		"	bez		%0, 1b   \n"
+		"	movi		%0, 0    \n"
+		"2:				 \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+
+	if (!tmp)
+		smp_mb();
+
+	return !tmp;
+}
+
+/*
+ * write lock/unlock/trylock
+ */
+static inline void arch_write_lock(arch_rwlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	asm volatile (
+		"1:	ldex.w		%0, (%1) \n"
+		"	bnez		%0, 1b   \n"
+		"	subi		%0, 1    \n"
+		"	stex.w		%0, (%1) \n"
+		"	bez		%0, 1b   \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+	smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	smp_mb();
+	asm volatile (
+		"1:	ldex.w		%0, (%1) \n"
+		"	movi		%0, 0    \n"
+		"	stex.w		%0, (%1) \n"
+		"	bez		%0, 1b   \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+	u32 *p = &lock->lock;
+	u32 tmp;
+
+	asm volatile (
+		"1:	ldex.w		%0, (%1) \n"
+		"	bnez		%0, 2f   \n"
+		"	subi		%0, 1    \n"
+		"	stex.w		%0, (%1) \n"
+		"	bez		%0, 1b   \n"
+		"	movi		%0, 0    \n"
+		"2:				 \n"
+		: "=&r" (tmp)
+		: "r"(p)
+		: "cc");
+
+	if (!tmp)
+		smp_mb();
+
+	return !tmp;
+}
+
+#endif /* CONFIG_QUEUED_RWLOCKS */
+#endif /* __ASM_CSKY_SPINLOCK_H */
diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h
new file mode 100644
index 0000000..88b8243
--- /dev/null
+++ b/arch/csky/include/asm/spinlock_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_TYPES_H
+#define __ASM_CSKY_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+#define TICKET_NEXT	16
+
+typedef struct {
+	union {
+		u32 lock;
+		struct __raw_tickets {
+			/* little endian */
+			u16 owner;
+			u16 next;
+		} tickets;
+	};
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED	{ { 0 } }
+
+#ifdef CONFIG_QUEUED_RWLOCKS
+#include <asm-generic/qrwlock_types.h>
+
+#else /* CONFIG_NR_CPUS > 2 */
+
+typedef struct {
+	u32 lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+
+#endif /* CONFIG_QUEUED_RWLOCKS */
+#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
new file mode 100644
index 0000000..d2357c8
--- /dev/null
+++ b/arch/csky/kernel/atomic.S
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include <abi/entry.h>
+
+.text
+
+/*
+ * int csky_cmpxchg(int oldval, int newval, int *ptr)
+ *
+ * If *ptr != oldval && return 1,
+ * else *ptr = newval return 0.
+ */
+#ifdef CONFIG_CPU_HAS_LDSTEX
+ENTRY(csky_cmpxchg)
+	USPTOKSP
+	mfcr	a3, epc
+	INCTRAP	a3
+
+	subi    sp, 8
+	stw     a3, (sp, 0)
+	mfcr    a3, epsr
+	stw     a3, (sp, 4)
+
+	psrset	ee
+1:
+	ldex	a3, (a2)
+	cmpne	a0, a3
+	bt16	2f
+	mov	a3, a1
+	stex	a3, (a2)
+	bez	a3, 1b
+2:
+	sync.is
+	mvc	a0
+	ldw	a3, (sp, 0)
+	mtcr	a3, epc
+	ldw     a3, (sp, 4)
+	mtcr	a3, epsr
+	addi	sp, 8
+	KSPTOUSP
+	rte
+END(csky_cmpxchg)
+#else
+ENTRY(csky_cmpxchg)
+	USPTOKSP
+	mfcr	a3, epc
+	INCTRAP	a3
+
+	subi    sp, 8
+	stw     a3, (sp, 0)
+	mfcr    a3, epsr
+	stw     a3, (sp, 4)
+
+	psrset	ee
+1:
+	ldw	a3, (a2)
+	cmpne	a0, a3
+	bt16	3f
+2:
+	stw	a1, (a2)
+3:
+	mvc	a0
+	ldw	a3, (sp, 0)
+	mtcr	a3, epc
+	ldw     a3, (sp, 4)
+	mtcr	a3, epsr
+	addi	sp, 8
+	KSPTOUSP
+	rte
+END(csky_cmpxchg)
+
+/*
+ * Called from tlbmodified exception
+ */
+ENTRY(csky_cmpxchg_fixup)
+	mfcr	a0, epc
+	lrw	a1, 2b
+	cmpne	a1, a0
+	bt	1f
+	subi	a1, (2b - 1b)
+	stw	a1, (sp, LSAVE_PC)
+1:
+	rts
+END(csky_cmpxchg_fixup)
+#endif
-- 
2.7.4

  parent reply	other threads:[~2018-10-05  5:41 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-05  5:41 [PATCH V7 00/20] C-SKY(csky) Linux Kernel Port Guo Ren
2018-10-05  5:41 ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 01/20] csky: Build infrastructure Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 02/20] csky: defconfig Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 03/20] csky: Kernel booting Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 04/20] csky: Exception handling and mm-fault Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 05/20] csky: System Call Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 06/20] csky: Cache and TLB routines Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 07/20] csky: MMU and page table management Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 08/20] csky: Process management and Signal Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 09/20] csky: VDSO and rt_sigreturn Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 10/20] csky: IRQ handling Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` Guo Ren [this message]
2018-10-05  5:41   ` [PATCH V7 11/20] csky: Atomic operations Guo Ren
2018-10-05  5:41 ` [PATCH V7 12/20] csky: ELF and module probe Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 13/20] csky: Library functions Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 14/20] csky: User access Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 15/20] csky: Debug and Ptrace GDB Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 16/20] csky: SMP support Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  5:41 ` [PATCH V7 17/20] csky: Misc headers Guo Ren
2018-10-05  5:41   ` Guo Ren
2018-10-05  8:33 ` [PATCH V7 18/20] dt-bindings: csky CPU Bindings Guo Ren
2018-10-05  8:33   ` Guo Ren
2018-10-05  8:33 ` [PATCH V7 19/20] dt-bindings: Add vendor prefix for csky Guo Ren
2018-10-05  8:33   ` Guo Ren
2018-10-05  8:33 ` [PATCH V7 20/20] MAINTAINERS: Add csky Guo Ren
2018-10-05  8:33   ` Guo Ren
2018-10-06 20:06 ` [PATCH V7 00/20] C-SKY(csky) Linux Kernel Port Eugene Syromiatnikov
2018-10-06 20:06   ` Eugene Syromiatnikov
2018-10-07  4:48   ` Guo Ren
2018-10-07  4:48     ` Guo Ren
2018-10-07  5:44     ` Eugene Syromiatnikov
2018-10-07  5:44       ` Eugene Syromiatnikov
2018-10-07 10:41 ` Guo Ren
2018-10-07 10:41   ` Guo Ren

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=343d5d408f3346c5de6db7e0cc98719bcd65d337.1538715563.git.ren_guo@c-sky.com \
    --to=ren_guo@c-sky.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=c-sky_gcc_upstream@c-sky.com \
    --cc=daniel.lezcano@linaro.org \
    --cc=davem@davemloft.net \
    --cc=devicetree@vger.kernel.org \
    --cc=green.hu@gmail.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=jason@lakedaemon.net \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=marc.zyngier@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=mchehab+samsung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=robh+dt@kernel.org \
    --cc=robh@kernel.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).