All of lore.kernel.org
 help / color / mirror / Atom feed
From: Stafford Horne <shorne@gmail.com>
To: Jonas Bonn <jonas@southpole.se>,
	Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: linux@roeck-us.net, openrisc@lists.librecores.org,
	linux-kernel@vger.kernel.org,
	Peter Zijlstra <peterz@infradead.org>,
	Stafford Horne <shorne@gmail.com>, Ingo Molnar <mingo@redhat.com>
Subject: [PATCH v3 10/25] openrisc: add spinlock implementation
Date: Wed, 22 Feb 2017 04:11:39 +0900	[thread overview]
Message-ID: <9ec913b47790e9412d5b71a5fc52794ce4ebafb9.1487702890.git.shorne@gmail.com> (raw)
In-Reply-To: <cover.1487702890.git.shorne@gmail.com>
In-Reply-To: <cover.1487702890.git.shorne@gmail.com>

From: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>

Heavily based on the ARM implementation, this adds
ticket spinlock support for OpenRISC.

Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
[shorne@gmail.com: fix tabs vs space checkpatch warning]
Signed-off-by: Stafford Horne <shorne@gmail.com>
---
 arch/openrisc/include/asm/spinlock.h       | 232 ++++++++++++++++++++++++++++-
 arch/openrisc/include/asm/spinlock_types.h |  28 ++++
 2 files changed, 259 insertions(+), 1 deletion(-)
 create mode 100644 arch/openrisc/include/asm/spinlock_types.h

diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
index fd00a3a..adf62a6 100644
--- a/arch/openrisc/include/asm/spinlock.h
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -9,6 +9,9 @@
  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  * et al.
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * Ticket spinlocks, based on the ARM implementation.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,6 +22,233 @@
 #ifndef __ASM_OPENRISC_SPINLOCK_H
 #define __ASM_OPENRISC_SPINLOCK_H
 
-#error "or32 doesn't do SMP yet"
+#include <asm/spinlock_types.h>
+
+#define arch_spin_unlock_wait(lock) \
+	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	u32 newval;
+	arch_spinlock_t lockval;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%2)	\n"
+		"	l.add	%1, %0, %3	\n"
+		"	l.swa	0(%2), %1	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r" (lockval), "=&r" (newval)
+		: "r" (&lock->slock), "r" (1 << TICKET_SHIFT)
+		: "cc", "memory");
+
+	while (lockval.tickets.next != lockval.tickets.owner)
+		lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+
+	smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	unsigned long contended, tmp;
+	u32 slock;
+
+	/* contended = (lock->tickets.owner != lock->tickets.next) */
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%3)	\n"
+		"	l.srli	%1, %0, 16	\n"
+		"	l.andi	%2, %0, 0xffff	\n"
+		"	l.sfeq	%1, %2		\n"
+		"	l.bnf	1f		\n"
+		"	 l.ori	%1, r0, 1	\n"
+		"	l.add	%0, %0, %4	\n"
+		"	l.swa	0(%3), %0	\n"
+		"	l.bnf	1b		\n"
+		"	 l.ori	%1, r0, 0	\n"
+		"1:				\n"
+		: "=&r" (slock), "=&r" (contended), "=&r" (tmp)
+		: "r" (&lock->slock), "r" (1 << TICKET_SHIFT)
+		: "cc", "memory");
+
+	if (!contended) {
+		smp_mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	smp_mb();
+	lock->tickets.owner++;
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.tickets.owner == lock.tickets.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+
+	return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended	arch_spin_is_contended
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31.  When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%1)	\n"
+		"	l.sfeqi	%0, 0		\n"
+		"	l.bnf	1f		\n"
+		"	 l.nop			\n"
+		"	l.swa	0(%1), %2	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		"1:				\n"
+		: "=&r" (tmp)
+		: "r" (&rw->lock), "r" (0x80000000)
+		: "cc", "memory");
+
+	smp_mb();
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+	unsigned long contended;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%1)	\n"
+		"	l.sfeqi	%0, 0		\n"
+		"	l.bnf	1f		\n"
+		"	 l.nop			\n"
+		"	l.swa	 0(%1), %2	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		"1:				\n"
+		: "=&r" (contended)
+		: "r" (&rw->lock), "r" (0x80000000)
+		: "cc", "memory");
+
+	if (!contended) {
+		smp_mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	smp_mb();
+	rw->lock = 0;
+}
+
+/* write_can_lock - would write_trylock() succeed? */
+#define arch_write_can_lock(x)		(ACCESS_ONCE((x)->lock) == 0)
+
+/*
+ * Read locks are a bit more hairy:
+ *  - Exclusively load the lock value.
+ *  - Increment it.
+ *  - Store new lock value if positive, and we still own this location.
+ *    If the value is negative, we've already failed.
+ *  - If we failed to store the value, we want a negative result.
+ *  - If we failed, try again.
+ * Unlocking is similarly hairy.  We may have multiple read locks
+ * currently active.  However, we know we won't have any write
+ * locks.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa		%0, 0(%1)	\n"
+		"	l.sfltsi	%0, -1		\n"
+		"	l.bf		1b		\n"
+		"	 l.addi		%0, %0, 1	\n"
+		"	l.swa		0(%1), %0	\n"
+		"	l.bnf		1b		\n"
+		"	 l.nop				\n"
+		: "=&r" (tmp)
+		: "r" (&rw->lock)
+		: "cc", "memory");
+
+	smp_mb();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%1)	\n"
+		"	l.addi	%0, %0, -1	\n"
+		"	l.swa	0(%1), %0	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r" (tmp)
+		: "r" (&rw->lock)
+		: "cc", "memory");
+
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+	unsigned long contended;
+
+	__asm__ __volatile__(
+		"1:	l.lwa		%0, 0(%1)	\n"
+		"	l.sfltsi	%0, -1		\n"
+		"	l.bf		1f		\n"
+		"	 l.addi		%0, %0, 1	\n"
+		"	l.swa		0(%1), %0	\n"
+		"	l.bnf		1b		\n"
+		"	 l.nop				\n"
+		"1:					\n"
+		: "=&r" (contended)
+		: "r" (&rw->lock)
+		: "cc", "memory");
+
+	/* If the lock is negative, then it is already held for write. */
+	if (contended < 0x80000000) {
+		smp_mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+/* read_can_lock - would read_trylock() succeed? */
+#define arch_read_can_lock(x)		(ACCESS_ONCE((x)->lock) < 0x80000000)
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock)	cpu_relax()
+#define arch_read_relax(lock)	cpu_relax()
+#define arch_write_relax(lock)	cpu_relax()
 
 #endif
diff --git a/arch/openrisc/include/asm/spinlock_types.h b/arch/openrisc/include/asm/spinlock_types.h
new file mode 100644
index 0000000..bc0de48
--- /dev/null
+++ b/arch/openrisc/include/asm/spinlock_types.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+#define TICKET_SHIFT	16
+
+typedef struct {
+	union {
+		u32 slock;
+		struct __raw_tickets {
+			u16 next;
+			u16 owner;
+		} tickets;
+	};
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED	{ { 0 } }
+
+typedef struct {
+	u32 lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+
+#endif
-- 
2.9.3

WARNING: multiple messages have this Message-ID (diff)
From: Stafford Horne <shorne@gmail.com>
To: openrisc@lists.librecores.org
Subject: [OpenRISC] [PATCH v3 10/25] openrisc: add spinlock implementation
Date: Wed, 22 Feb 2017 04:11:39 +0900	[thread overview]
Message-ID: <9ec913b47790e9412d5b71a5fc52794ce4ebafb9.1487702890.git.shorne@gmail.com> (raw)
In-Reply-To: <cover.1487702890.git.shorne@gmail.com>

From: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>

Heavily based on the ARM implementation, this adds
ticket spinlock support for OpenRISC.

Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
[shorne at gmail.com: fix tabs vs space checkpatch warning]
Signed-off-by: Stafford Horne <shorne@gmail.com>
---
 arch/openrisc/include/asm/spinlock.h       | 232 ++++++++++++++++++++++++++++-
 arch/openrisc/include/asm/spinlock_types.h |  28 ++++
 2 files changed, 259 insertions(+), 1 deletion(-)
 create mode 100644 arch/openrisc/include/asm/spinlock_types.h

diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
index fd00a3a..adf62a6 100644
--- a/arch/openrisc/include/asm/spinlock.h
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -9,6 +9,9 @@
  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  * et al.
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * Ticket spinlocks, based on the ARM implementation.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,6 +22,233 @@
 #ifndef __ASM_OPENRISC_SPINLOCK_H
 #define __ASM_OPENRISC_SPINLOCK_H
 
-#error "or32 doesn't do SMP yet"
+#include <asm/spinlock_types.h>
+
+#define arch_spin_unlock_wait(lock) \
+	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	u32 newval;
+	arch_spinlock_t lockval;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%2)	\n"
+		"	l.add	%1, %0, %3	\n"
+		"	l.swa	0(%2), %1	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r" (lockval), "=&r" (newval)
+		: "r" (&lock->slock), "r" (1 << TICKET_SHIFT)
+		: "cc", "memory");
+
+	while (lockval.tickets.next != lockval.tickets.owner)
+		lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+
+	smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	unsigned long contended, tmp;
+	u32 slock;
+
+	/* contended = (lock->tickets.owner != lock->tickets.next) */
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%3)	\n"
+		"	l.srli	%1, %0, 16	\n"
+		"	l.andi	%2, %0, 0xffff	\n"
+		"	l.sfeq	%1, %2		\n"
+		"	l.bnf	1f		\n"
+		"	 l.ori	%1, r0, 1	\n"
+		"	l.add	%0, %0, %4	\n"
+		"	l.swa	0(%3), %0	\n"
+		"	l.bnf	1b		\n"
+		"	 l.ori	%1, r0, 0	\n"
+		"1:				\n"
+		: "=&r" (slock), "=&r" (contended), "=&r" (tmp)
+		: "r" (&lock->slock), "r" (1 << TICKET_SHIFT)
+		: "cc", "memory");
+
+	if (!contended) {
+		smp_mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	smp_mb();
+	lock->tickets.owner++;
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.tickets.owner == lock.tickets.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+
+	return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended	arch_spin_is_contended
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31.  When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%1)	\n"
+		"	l.sfeqi	%0, 0		\n"
+		"	l.bnf	1f		\n"
+		"	 l.nop			\n"
+		"	l.swa	0(%1), %2	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		"1:				\n"
+		: "=&r" (tmp)
+		: "r" (&rw->lock), "r" (0x80000000)
+		: "cc", "memory");
+
+	smp_mb();
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+	unsigned long contended;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%1)	\n"
+		"	l.sfeqi	%0, 0		\n"
+		"	l.bnf	1f		\n"
+		"	 l.nop			\n"
+		"	l.swa	 0(%1), %2	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		"1:				\n"
+		: "=&r" (contended)
+		: "r" (&rw->lock), "r" (0x80000000)
+		: "cc", "memory");
+
+	if (!contended) {
+		smp_mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	smp_mb();
+	rw->lock = 0;
+}
+
+/* write_can_lock - would write_trylock() succeed? */
+#define arch_write_can_lock(x)		(ACCESS_ONCE((x)->lock) == 0)
+
+/*
+ * Read locks are a bit more hairy:
+ *  - Exclusively load the lock value.
+ *  - Increment it.
+ *  - Store new lock value if positive, and we still own this location.
+ *    If the value is negative, we've already failed.
+ *  - If we failed to store the value, we want a negative result.
+ *  - If we failed, try again.
+ * Unlocking is similarly hairy.  We may have multiple read locks
+ * currently active.  However, we know we won't have any write
+ * locks.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa		%0, 0(%1)	\n"
+		"	l.sfltsi	%0, -1		\n"
+		"	l.bf		1b		\n"
+		"	 l.addi		%0, %0, 1	\n"
+		"	l.swa		0(%1), %0	\n"
+		"	l.bnf		1b		\n"
+		"	 l.nop				\n"
+		: "=&r" (tmp)
+		: "r" (&rw->lock)
+		: "cc", "memory");
+
+	smp_mb();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0, 0(%1)	\n"
+		"	l.addi	%0, %0, -1	\n"
+		"	l.swa	0(%1), %0	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r" (tmp)
+		: "r" (&rw->lock)
+		: "cc", "memory");
+
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+	unsigned long contended;
+
+	__asm__ __volatile__(
+		"1:	l.lwa		%0, 0(%1)	\n"
+		"	l.sfltsi	%0, -1		\n"
+		"	l.bf		1f		\n"
+		"	 l.addi		%0, %0, 1	\n"
+		"	l.swa		0(%1), %0	\n"
+		"	l.bnf		1b		\n"
+		"	 l.nop				\n"
+		"1:					\n"
+		: "=&r" (contended)
+		: "r" (&rw->lock)
+		: "cc", "memory");
+
+	/* If the lock is negative, then it is already held for write. */
+	if (contended < 0x80000000) {
+		smp_mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+/* read_can_lock - would read_trylock() succeed? */
+#define arch_read_can_lock(x)		(ACCESS_ONCE((x)->lock) < 0x80000000)
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock)	cpu_relax()
+#define arch_read_relax(lock)	cpu_relax()
+#define arch_write_relax(lock)	cpu_relax()
 
 #endif
diff --git a/arch/openrisc/include/asm/spinlock_types.h b/arch/openrisc/include/asm/spinlock_types.h
new file mode 100644
index 0000000..bc0de48
--- /dev/null
+++ b/arch/openrisc/include/asm/spinlock_types.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+#define TICKET_SHIFT	16
+
+typedef struct {
+	union {
+		u32 slock;
+		struct __raw_tickets {
+			u16 next;
+			u16 owner;
+		} tickets;
+	};
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED	{ { 0 } }
+
+typedef struct {
+	u32 lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+
+#endif
-- 
2.9.3


  parent reply	other threads:[~2017-02-21 19:13 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-21 19:11 [PATCH v3 00/25] OpenRISC patches for 4.11 final call Stafford Horne
2017-02-21 19:11 ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 01/25] openrisc: use SPARSE_IRQ Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 02/25] openrisc: add cache way information to cpuinfo Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-03-14 12:08   ` Sudeep Holla
2017-03-14 12:08     ` [OpenRISC] " Sudeep Holla
2017-03-14 13:11     ` Stefan Kristiansson
2017-03-14 13:11       ` [OpenRISC] " Stefan Kristiansson
2017-03-14 13:45       ` Sudeep Holla
2017-03-14 13:45         ` [OpenRISC] " Sudeep Holla
2017-03-14 14:09         ` Stafford Horne
2017-03-14 14:09           ` [OpenRISC] " Stafford Horne
2017-03-14 15:55           ` Sudeep Holla
2017-03-14 15:55             ` [OpenRISC] " Sudeep Holla
2017-02-21 19:11 ` [PATCH v3 03/25] openrisc: tlb miss handler optimizations Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 04/25] openrisc: head: use THREAD_SIZE instead of magic constant Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 05/25] openrisc: head: refactor out tlb flush into it's own function Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 06/25] openrisc: add l.lwa/l.swa emulation Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 07/25] openrisc: add atomic bitops Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 08/25] openrisc: add cmpxchg and xchg implementations Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-22 11:19   ` Peter Zijlstra
2017-02-22 11:19     ` [OpenRISC] " Peter Zijlstra
2017-02-22 14:20     ` Stafford Horne
2017-02-22 14:20       ` [OpenRISC] " Stafford Horne
2017-02-22 17:30       ` Richard Henderson
2017-02-22 17:30         ` Richard Henderson
2017-02-22 22:43         ` Stafford Horne
2017-02-22 22:43           ` Stafford Horne
2017-02-21 19:11 ` [PATCH v3 09/25] openrisc: add optimized atomic operations Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-22 11:27   ` Peter Zijlstra
2017-02-22 11:27     ` [OpenRISC] " Peter Zijlstra
2017-02-22 14:22     ` Stafford Horne
2017-02-22 14:22       ` [OpenRISC] " Stafford Horne
2017-02-22 17:31       ` Richard Henderson
2017-02-22 17:31         ` Richard Henderson
2017-02-22 22:42         ` Stafford Horne
2017-02-22 22:42           ` Stafford Horne
2017-02-21 19:11 ` Stafford Horne [this message]
2017-02-21 19:11   ` [OpenRISC] [PATCH v3 10/25] openrisc: add spinlock implementation Stafford Horne
2017-02-22 11:29   ` Peter Zijlstra
2017-02-22 11:29     ` [OpenRISC] " Peter Zijlstra
2017-02-22 11:32   ` Peter Zijlstra
2017-02-22 11:32     ` [OpenRISC] " Peter Zijlstra
2017-02-22 11:37   ` Peter Zijlstra
2017-02-22 11:37     ` [OpenRISC] " Peter Zijlstra
2017-02-22 12:02     ` Peter Zijlstra
2017-02-22 12:02       ` [OpenRISC] " Peter Zijlstra
2017-02-22 11:38   ` Peter Zijlstra
2017-02-22 11:38     ` [OpenRISC] " Peter Zijlstra
2017-02-22 11:41   ` Peter Zijlstra
2017-02-22 11:41     ` [OpenRISC] " Peter Zijlstra
2017-02-22 12:08     ` Peter Zijlstra
2017-02-22 12:08       ` [OpenRISC] " Peter Zijlstra
2017-02-21 19:11 ` [PATCH v3 11/25] openrisc: add futex_atomic_* implementations Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 12/25] openrisc: remove unnecessary stddef.h include Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 13/25] openrisc: Fix the bitmask for the unit present register Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 14/25] openrisc: Initial support for the idle state Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 20:24   ` Joe Perches
2017-02-21 20:24     ` [OpenRISC] " Joe Perches
2017-02-22 14:19     ` Stafford Horne
2017-02-22 14:19       ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 15/25] openrisc: Add optimized memset Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 16/25] openrisc: Add optimized memcpy routine Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 17/25] openrisc: Add .gitignore Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 18/25] MAINTAINERS: Add the openrisc official repository Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 19/25] scripts/checkstack.pl: Add openrisc support Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 20/25] openrisc: entry: Whitespace and comment cleanups Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 21/25] openrisc: entry: Fix delay slot detection Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 22/25] openrisc: head: Move init strings to rodata section Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 23/25] arch/openrisc/lib/memcpy.c: use correct OR1200 option Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 24/25] openrisc: Export ioremap symbols used by modules Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne
2017-02-21 19:11 ` [PATCH v3 25/25] openrisc: head: Init r0 to 0 on start Stafford Horne
2017-02-21 19:11   ` [OpenRISC] " Stafford Horne

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9ec913b47790e9412d5b71a5fc52794ce4ebafb9.1487702890.git.shorne@gmail.com \
    --to=shorne@gmail.com \
    --cc=jonas@southpole.se \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@roeck-us.net \
    --cc=mingo@redhat.com \
    --cc=openrisc@lists.librecores.org \
    --cc=peterz@infradead.org \
    --cc=stefan.kristiansson@saunalahti.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.