All of lore.kernel.org
 help / color / mirror / Atom feed
From: Luca Barbieri <luca@luca-barbieri.com>
To: mingo@elte.hu
Cc: hpa@zytor.com, a.p.zijlstra@chello.nl, akpm@linux-foundation.org,
	linux-kernel@vger.kernel.org,
	Luca Barbieri <luca@luca-barbieri.com>
Subject: [PATCH 07/10] lib: move generic atomic64 to atomic64-impl.h
Date: Wed, 17 Feb 2010 12:42:39 +0100	[thread overview]
Message-ID: <1266406962-17463-8-git-send-email-luca@luca-barbieri.com> (raw)
In-Reply-To: <1266406962-17463-1-git-send-email-luca@luca-barbieri.com>

This patch moves the generic implementation of the atomic64 functions
from atomic64.c to atomic64-impl.h

This file will be reused by x86-32 for 386/486 support.

Signed-off-by: Luca Barbieri <luca@luca-barbieri.com>
---
 include/asm-generic/atomic64-impl.h |  167 ++++++++++++++++++++++++++++++++
 include/asm-generic/atomic64.h      |   31 ++++--
 lib/atomic64.c                      |  183 +++--------------------------------
 3 files changed, 203 insertions(+), 178 deletions(-)
 create mode 100644 include/asm-generic/atomic64-impl.h

diff --git a/include/asm-generic/atomic64-impl.h b/include/asm-generic/atomic64-impl.h
new file mode 100644
index 0000000..a0a76f4
--- /dev/null
+++ b/include/asm-generic/atomic64-impl.h
@@ -0,0 +1,167 @@
+#ifndef _ASM_GENERIC_ATOMIC64_IMPL_H
+#define _ASM_GENERIC_ATOMIC64_IMPL_H
+
+#include <linux/spinlock.h>
+
+/*
+ * We use a hashed array of spinlocks to provide exclusive access
+ * to each atomic64_t variable.  Since this is expected to used on
+ * systems with small numbers of CPUs (<= 4 or so), we use a
+ * relatively small array of 16 spinlocks to avoid wasting too much
+ * memory on the spinlock array.
+ */
+#ifndef ATOMIC64_NR_LOCKS
+#define ATOMIC64_NR_LOCKS	16
+#endif
+
+/*
+ * Ensure each lock is in a separate cacheline.
+ */
+union generic_atomic64_lock {
+	spinlock_t lock;
+	char pad[L1_CACHE_BYTES];
+};
+
+extern union generic_atomic64_lock generic_atomic64_lock[ATOMIC64_NR_LOCKS] __cacheline_aligned_in_smp;
+
+static inline int init_generic_atomic64_lock(void)
+{
+	int i;
+
+	for (i = 0; i < ATOMIC64_NR_LOCKS; ++i)
+		spin_lock_init(&generic_atomic64_lock[i].lock);
+	return 0;
+}
+
+static inline spinlock_t *generic_atomic64_lock_addr(const atomic64_t *v)
+{
+	unsigned long addr = (unsigned long) v;
+
+	addr >>= L1_CACHE_SHIFT;
+	addr ^= (addr >> 8) ^ (addr >> 16);
+	return &generic_atomic64_lock[addr & (ATOMIC64_NR_LOCKS - 1)].lock;
+}
+
+long long generic_atomic64_read(const atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+void generic_atomic64_set(atomic64_t *v, long long i)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+
+	spin_lock_irqsave(lock, flags);
+	v->counter = i;
+	spin_unlock_irqrestore(lock, flags);
+}
+
+void generic_atomic64_add(long long a, atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+
+	spin_lock_irqsave(lock, flags);
+	v->counter += a;
+	spin_unlock_irqrestore(lock, flags);
+}
+
+long long generic_atomic64_add_return(long long a, atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter += a;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+void generic_atomic64_sub(long long a, atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+
+	spin_lock_irqsave(lock, flags);
+	v->counter -= a;
+	spin_unlock_irqrestore(lock, flags);
+}
+
+long long generic_atomic64_sub_return(long long a, atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter -= a;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+long long generic_atomic64_dec_if_positive(atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter - 1;
+	if (val >= 0)
+		v->counter = val;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+long long generic_atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter;
+	if (val == o)
+		v->counter = n;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+long long generic_atomic64_xchg(atomic64_t *v, long long new)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter;
+	v->counter = new;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+int generic_atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+	unsigned long flags;
+	spinlock_t *lock = generic_atomic64_lock_addr(v);
+	int ret = 1;
+
+	spin_lock_irqsave(lock, flags);
+	if (v->counter != u) {
+		v->counter += a;
+		ret = 0;
+	}
+	spin_unlock_irqrestore(lock, flags);
+	return ret;
+}
+
+#endif /*  _ASM_GENERIC_ATOMIC64_IMPL_H  */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index b18ce4f..d6775fd 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -18,16 +18,27 @@ typedef struct {
 
 #define ATOMIC64_INIT(i)	{ (i) }
 
-extern long long atomic64_read(const atomic64_t *v);
-extern void	 atomic64_set(atomic64_t *v, long long i);
-extern void	 atomic64_add(long long a, atomic64_t *v);
-extern long long atomic64_add_return(long long a, atomic64_t *v);
-extern void	 atomic64_sub(long long a, atomic64_t *v);
-extern long long atomic64_sub_return(long long a, atomic64_t *v);
-extern long long atomic64_dec_if_positive(atomic64_t *v);
-extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
-extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern int	 atomic64_add_unless(atomic64_t *v, long long a, long long u);
+extern long long generic_atomic64_read(const atomic64_t *v);
+extern void	 generic_atomic64_set(atomic64_t *v, long long i);
+extern void	 generic_atomic64_add(long long a, atomic64_t *v);
+extern long long generic_atomic64_add_return(long long a, atomic64_t *v);
+extern void	 generic_atomic64_sub(long long a, atomic64_t *v);
+extern long long generic_atomic64_sub_return(long long a, atomic64_t *v);
+extern long long generic_atomic64_dec_if_positive(atomic64_t *v);
+extern long long generic_atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
+extern long long generic_atomic64_xchg(atomic64_t *v, long long new);
+extern int	 generic_atomic64_add_unless(atomic64_t *v, long long a, long long u);
+
+#define atomic64_read generic_atomic64_read
+#define atomic64_set generic_atomic64_set
+#define atomic64_add generic_atomic64_add
+#define atomic64_add_return generic_atomic64_add_return
+#define atomic64_sub generic_atomic64_sub
+#define atomic64_sub_return generic_atomic64_sub_return
+#define atomic64_dec_if_positive generic_atomic64_dec_if_positive
+#define atomic64_cmpxchg generic_atomic64_cmpxchg
+#define atomic64_xchg generic_atomic64_xchg
+#define atomic64_add_unless generic_atomic64_add_unless
 
 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
 #define atomic64_inc(v)			atomic64_add(1LL, (v))
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 8bee16e..2565f63 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -16,171 +16,18 @@
 #include <linux/module.h>
 #include <asm/atomic.h>
 
-/*
- * We use a hashed array of spinlocks to provide exclusive access
- * to each atomic64_t variable.  Since this is expected to used on
- * systems with small numbers of CPUs (<= 4 or so), we use a
- * relatively small array of 16 spinlocks to avoid wasting too much
- * memory on the spinlock array.
- */
-#define NR_LOCKS	16
-
-/*
- * Ensure each lock is in a separate cacheline.
- */
-static union {
-	spinlock_t lock;
-	char pad[L1_CACHE_BYTES];
-} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
-
-static inline spinlock_t *lock_addr(const atomic64_t *v)
-{
-	unsigned long addr = (unsigned long) v;
-
-	addr >>= L1_CACHE_SHIFT;
-	addr ^= (addr >> 8) ^ (addr >> 16);
-	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
-}
-
-long long atomic64_read(const atomic64_t *v)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-	long long val;
-
-	spin_lock_irqsave(lock, flags);
-	val = v->counter;
-	spin_unlock_irqrestore(lock, flags);
-	return val;
-}
-EXPORT_SYMBOL(atomic64_read);
-
-void atomic64_set(atomic64_t *v, long long i)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-
-	spin_lock_irqsave(lock, flags);
-	v->counter = i;
-	spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_set);
-
-void atomic64_add(long long a, atomic64_t *v)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-
-	spin_lock_irqsave(lock, flags);
-	v->counter += a;
-	spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_add);
-
-long long atomic64_add_return(long long a, atomic64_t *v)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-	long long val;
-
-	spin_lock_irqsave(lock, flags);
-	val = v->counter += a;
-	spin_unlock_irqrestore(lock, flags);
-	return val;
-}
-EXPORT_SYMBOL(atomic64_add_return);
-
-void atomic64_sub(long long a, atomic64_t *v)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-
-	spin_lock_irqsave(lock, flags);
-	v->counter -= a;
-	spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_sub);
-
-long long atomic64_sub_return(long long a, atomic64_t *v)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-	long long val;
-
-	spin_lock_irqsave(lock, flags);
-	val = v->counter -= a;
-	spin_unlock_irqrestore(lock, flags);
-	return val;
-}
-EXPORT_SYMBOL(atomic64_sub_return);
-
-long long atomic64_dec_if_positive(atomic64_t *v)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-	long long val;
-
-	spin_lock_irqsave(lock, flags);
-	val = v->counter - 1;
-	if (val >= 0)
-		v->counter = val;
-	spin_unlock_irqrestore(lock, flags);
-	return val;
-}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
-
-long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-	long long val;
-
-	spin_lock_irqsave(lock, flags);
-	val = v->counter;
-	if (val == o)
-		v->counter = n;
-	spin_unlock_irqrestore(lock, flags);
-	return val;
-}
-EXPORT_SYMBOL(atomic64_cmpxchg);
-
-long long atomic64_xchg(atomic64_t *v, long long new)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-	long long val;
-
-	spin_lock_irqsave(lock, flags);
-	val = v->counter;
-	v->counter = new;
-	spin_unlock_irqrestore(lock, flags);
-	return val;
-}
-EXPORT_SYMBOL(atomic64_xchg);
-
-int atomic64_add_unless(atomic64_t *v, long long a, long long u)
-{
-	unsigned long flags;
-	spinlock_t *lock = lock_addr(v);
-	int ret = 1;
-
-	spin_lock_irqsave(lock, flags);
-	if (v->counter != u) {
-		v->counter += a;
-		ret = 0;
-	}
-	spin_unlock_irqrestore(lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(atomic64_add_unless);
-
-static int init_atomic64_lock(void)
-{
-	int i;
-
-	for (i = 0; i < NR_LOCKS; ++i)
-		spin_lock_init(&atomic64_lock[i].lock);
-	return 0;
-}
-
-pure_initcall(init_atomic64_lock);
+#include <asm-generic/atomic64-impl.h>
+
+union generic_atomic64_lock generic_atomic64_lock[ATOMIC64_NR_LOCKS] __cacheline_aligned_in_smp;
+pure_initcall(init_generic_atomic64_lock);
+
+EXPORT_SYMBOL(generic_atomic64_read);
+EXPORT_SYMBOL(generic_atomic64_set);
+EXPORT_SYMBOL(generic_atomic64_add);
+EXPORT_SYMBOL(generic_atomic64_add_return);
+EXPORT_SYMBOL(generic_atomic64_sub);
+EXPORT_SYMBOL(generic_atomic64_sub_return);
+EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
+EXPORT_SYMBOL(generic_atomic64_cmpxchg);
+EXPORT_SYMBOL(generic_atomic64_xchg);
+EXPORT_SYMBOL(generic_atomic64_add_unless);
-- 
1.6.6.1.476.g01ddb


  parent reply	other threads:[~2010-02-17 11:43 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-02-17 11:42 [PATCH 0/10] x86-32: improve atomic64_t functions Luca Barbieri
2010-02-17 11:42 ` [PATCH 01/10] x86: add support for multiple choice alternatives Luca Barbieri
2010-02-17 12:47   ` Avi Kivity
2010-02-18 19:46     ` H. Peter Anvin
2010-02-17 11:42 ` [PATCH 02/10] x86: add support for relative CALL and JMP in alternatives Luca Barbieri
2010-02-18 19:40   ` H. Peter Anvin
2010-02-18 23:38     ` Luca Barbieri
2010-02-18 23:54       ` H. Peter Anvin
2010-02-19 14:27   ` Masami Hiramatsu
2010-02-17 11:42 ` [PATCH 03/10] x86: add support for lock prefix " Luca Barbieri
2010-02-17 11:42 ` [PATCH 04/10] x86-32: allow UP/SMP lock replacement in cmpxchg64 Luca Barbieri
2010-02-17 11:42 ` [PATCH 05/10] lib: add self-test for atomic64_t Luca Barbieri
2010-02-17 11:42 ` [PATCH 06/10] x86-32: rewrite 32-bit atomic64 functions in assembly Luca Barbieri
2010-02-17 11:42 ` Luca Barbieri [this message]
2010-02-17 11:42 ` [PATCH 08/10] x86-32: support atomic64_t on 386/486 UP/SMP Luca Barbieri
2010-02-18 10:25   ` Peter Zijlstra
2010-02-18 10:58     ` Luca Barbieri
2010-02-18 15:20     ` H. Peter Anvin
2010-02-17 11:42 ` [PATCH 09/10] x86-32: use SSE for atomic64_read/set if available Luca Barbieri
2010-02-17 22:39   ` H. Peter Anvin
2010-02-18  0:41     ` Luca Barbieri
2010-02-18  0:47       ` H. Peter Anvin
2010-02-18  9:56         ` Avi Kivity
2010-02-18 10:07           ` Luca Barbieri
2010-02-18  8:23   ` Andi Kleen
2010-02-18  9:53     ` Luca Barbieri
2010-02-18  9:56       ` Luca Barbieri
2010-02-18 10:11       ` Andi Kleen
2010-02-18 10:27         ` Luca Barbieri
2010-02-18 15:24           ` H. Peter Anvin
2010-02-18 18:14             ` Luca Barbieri
2010-02-18 18:28               ` H. Peter Anvin
2010-02-18 18:42                 ` Luca Barbieri
2010-02-18 19:07                   ` H. Peter Anvin
2010-02-18 20:26               ` Andi Kleen
2010-02-18 16:52           ` H. Peter Anvin
2010-02-18 18:49             ` Luca Barbieri
2010-02-18 19:06               ` H. Peter Anvin
2010-02-18 19:43                 ` Luca Barbieri
2010-02-18 19:45                 ` Yuhong Bao
2010-02-18 10:24       ` Peter Zijlstra
2010-02-18 10:25   ` Peter Zijlstra
2010-02-18 10:50     ` Luca Barbieri
2010-02-18 11:00       ` Peter Zijlstra
2010-02-18 12:29         ` Luca Barbieri
2010-02-18 12:32           ` Peter Zijlstra
2010-02-18 13:45             ` Luca Barbieri
2010-02-17 11:42 ` [PATCH 10/10] x86-32: panic on !CX8 && XMM Luca Barbieri
2010-02-17 22:38   ` H. Peter Anvin
2010-02-17 23:00     ` Yuhong Bao
2010-02-17 23:41       ` H. Peter Anvin
2010-02-18  1:13         ` Yuhong Bao
2010-02-25 20:24           ` Yuhong Bao
2010-02-18  0:46     ` Luca Barbieri

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1266406962-17463-8-git-send-email-luca@luca-barbieri.com \
    --to=luca@luca-barbieri.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=akpm@linux-foundation.org \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.