From: Nicolas Pitre <nico@cam.org>
To: Ingo Molnar <mingo@elte.hu>
Cc: Linus Torvalds <torvalds@osdl.org>,
lkml <linux-kernel@vger.kernel.org>,
Andrew Morton <akpm@osdl.org>,
Arjan van de Ven <arjanv@infradead.org>,
Jes Sorensen <jes@trained-monkey.org>,
Zwane Mwaikambo <zwane@arm.linux.org.uk>,
Oleg Nesterov <oleg@tv-sign.ru>,
David Howells <dhowells@redhat.com>,
Alan Cox <alan@lxorguk.ukuu.org.uk>,
Benjamin LaHaise <bcrl@kvack.org>,
Steven Rostedt <rostedt@goodmis.org>,
Christoph Hellwig <hch@infradead.org>, Andi Kleen <ak@suse.de>,
Russell King <rmk+lkml@arm.linux.org.uk>
Subject: [patch 5/5] mutex subsystem: allow for the fast path to be inlined
Date: Thu, 22 Dec 2005 01:53:38 -0500 (EST) [thread overview]
Message-ID: <Pine.LNX.4.64.0512220142530.26663@localhost.localdomain> (raw)
In-Reply-To: <20051221231218.GA6747@elte.hu>
This lets the architecture decide whether it wants the mutex fast path
inlined or not. On ARM it is (now) worthwhile to do so.
Also get rid of ARCH_IMPLEMENTS_MUTEX_FASTPATH since at this point it is
rather useless.
Signed-off-by: Nicolas Pitre <nico@cam.org>
---
Index: linux-2.6/include/linux/mutex.h
===================================================================
--- linux-2.6.orig/include/linux/mutex.h
+++ linux-2.6/include/linux/mutex.h
@@ -92,10 +92,41 @@ struct mutex_waiter {
extern void FASTCALL(__mutex_init(struct mutex *lock, const char *name));
+#ifdef CONFIG_DEBUG_MUTEXES
+#undef MUTEX_INLINE_FASTPATH
+#endif
+
+#ifdef MUTEX_INLINE_FASTPATH
+
+extern void FASTCALL(__mutex_lock_noinline(atomic_t *lock_count));
+extern int FASTCALL(__mutex_lock_interruptible_noinline(atomic_t *lock_count));
+extern void FASTCALL(__mutex_unlock_noinline(atomic_t *lock_count));
+
+static inline void mutex_lock(struct mutex *lock)
+{
+ arch_mutex_fast_lock(&lock->count, __mutex_lock_noinline);
+}
+
+static inline int mutex_lock_interruptible(struct mutex *lock)
+{
+ return arch_mutex_fast_lock_retval(&lock->count,
+ __mutex_lock_interruptible_noinline);
+}
+
+static inline void mutex_unlock(struct mutex *lock)
+{
+ arch_mutex_fast_unlock(&lock->count, __mutex_unlock_noinline);
+}
+
+#else
+
extern void FASTCALL(mutex_lock(struct mutex *lock));
extern int FASTCALL(mutex_lock_interruptible(struct mutex *lock));
-extern int FASTCALL(mutex_trylock(struct mutex *lock));
extern void FASTCALL(mutex_unlock(struct mutex *lock));
+
+#endif
+
+extern int FASTCALL(mutex_trylock(struct mutex *lock));
extern int FASTCALL(mutex_is_locked(struct mutex *lock));
#endif
Index: linux-2.6/kernel/mutex.c
===================================================================
--- linux-2.6.orig/kernel/mutex.c
+++ linux-2.6/kernel/mutex.c
@@ -313,7 +313,12 @@ static inline void __mutex_unlock_nonato
* We want the atomic op come first, to make sure the
* branch is predicted as default-untaken:
*/
+
+#ifndef MUTEX_INLINE_FASTPATH
static __sched void FASTCALL(__mutex_lock_noinline(atomic_t *lock_count));
+static __sched int FASTCALL(__mutex_lock_interruptible_noinline(atomic_t *lock_count));
+static __sched void FASTCALL(__mutex_unlock_noinline(atomic_t *lock_count));
+#endif
/*
* The locking fastpath is the 1->0 transition from
@@ -324,7 +329,7 @@ static inline void __mutex_lock_atomic(s
arch_mutex_fast_lock(&lock->count, __mutex_lock_noinline);
}
-static fastcall __sched void __mutex_lock_noinline(atomic_t *lock_count)
+fastcall __sched void __mutex_lock_noinline(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -336,15 +341,13 @@ static inline void __mutex_lock(struct m
__mutex_lock_atomic(lock);
}
-static __sched int FASTCALL(__mutex_lock_interruptible_noinline(atomic_t *lock_count));
-
static inline int __mutex_lock_interruptible_atomic(struct mutex *lock)
{
return arch_mutex_fast_lock_retval(&lock->count,
__mutex_lock_interruptible_noinline);
}
-static fastcall __sched int __mutex_lock_interruptible_noinline(atomic_t *lock_count)
+fastcall __sched int __mutex_lock_interruptible_noinline(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -356,8 +359,6 @@ static inline int __mutex_lock_interrupt
return __mutex_lock_interruptible_atomic(lock);
}
-static void __sched FASTCALL(__mutex_unlock_noinline(atomic_t *lock_count));
-
/*
* The unlocking fastpath is the 0->1 transition from
* 'locked' into 'unlocked' state:
@@ -367,7 +368,7 @@ static inline void __mutex_unlock_atomic
arch_mutex_fast_unlock(&lock->count, __mutex_unlock_noinline);
}
-static fastcall void __sched __mutex_unlock_noinline(atomic_t *lock_count)
+fastcall void __sched __mutex_unlock_noinline(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -412,25 +413,14 @@ static inline int __mutex_lock_interrupt
#endif
/*
- * Some architectures provide hand-coded mutex_lock() functions,
- * the will call the mutex_*_slowpath() generic functions:
+ * Some architectures benefit from extra performances when
+ * the fast path is inlined.
*/
-#ifdef ARCH_IMPLEMENTS_MUTEX_FASTPATH
-
-void __sched fastcall mutex_lock_slowpath(struct mutex *lock)
-{
- __mutex_lock(lock);
-}
+#ifdef MUTEX_INLINE_FASTPATH
-void __sched fastcall mutex_unlock_slowpath(struct mutex *lock)
-{
- __mutex_unlock(lock);
-}
-
-int __sched fastcall mutex_lock_interruptible_slowpath(struct mutex *lock)
-{
- return __mutex_lock_interruptible(lock);
-}
+EXPORT_SYMBOL_GPL(__mutex_lock_noinline);
+EXPORT_SYMBOL_GPL(__mutex_lock_interruptible_noinline);
+EXPORT_SYMBOL_GPL(__mutex_unlock_noinline);
#else
@@ -450,12 +440,12 @@ int __sched fastcall mutex_lock_interrup
return __mutex_lock_interruptible(lock __CALLER_IP__);
}
-#endif
-
EXPORT_SYMBOL_GPL(mutex_lock);
EXPORT_SYMBOL_GPL(mutex_unlock);
EXPORT_SYMBOL_GPL(mutex_lock_interruptible);
+#endif
+
/*
* Initialise the lock:
*/
Index: linux-2.6/include/asm-arm/mutex.h
===================================================================
--- linux-2.6.orig/include/asm-arm/mutex.h
+++ linux-2.6/include/asm-arm/mutex.h
@@ -1 +1,3 @@
#include <asm-generic/mutex.h>
+
+#define MUTEX_INLINE_FASTPATH
prev parent reply other threads:[~2005-12-22 6:53 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2005-12-21 15:54 [patch 0/8] mutex subsystem, ANNOUNCE Ingo Molnar
2005-12-21 16:04 ` Arjan van de Ven
2005-12-21 18:07 ` Jes Sorensen
2005-12-22 2:36 ` Nick Piggin
2005-12-22 2:57 ` Nick Piggin
2005-12-22 7:19 ` Ingo Molnar
2005-12-22 7:56 ` Nick Piggin
2005-12-22 8:00 ` Arjan van de Ven
2005-12-22 8:10 ` Nick Piggin
2005-12-22 8:21 ` Arjan van de Ven
2005-12-22 8:32 ` Nick Piggin
2005-12-22 8:24 ` Ingo Molnar
2005-12-22 8:37 ` Nick Piggin
2005-12-21 22:43 ` Nicolas Pitre
2005-12-21 22:43 ` [patch 1/3] mutex subsystem: fix additions to the ARM atomic.h Nicolas Pitre
2005-12-21 22:44 ` [patch 2/3] mutex subsystem: add new atomic primitives Nicolas Pitre
2005-12-21 22:44 ` [patch 3/3] mutex subsystem: move the core to the new atomic helpers Nicolas Pitre
2005-12-21 23:12 ` Ingo Molnar
2005-12-22 1:16 ` Matt Mackall
2005-12-22 6:50 ` Nicolas Pitre
2005-12-22 6:51 ` [patch 2/5] mutex subsystem: add architecture specific mutex primitives Nicolas Pitre
2005-12-22 7:44 ` Nick Piggin
2005-12-22 8:03 ` Nick Piggin
2005-12-22 6:52 ` [patch 1/5] mutex subsystem: fix asm-arm/atomic.h Nicolas Pitre
2005-12-22 6:53 ` [patch 3/5] mutex subsystem: move the core to the new atomic helpers Nicolas Pitre
2005-12-22 6:53 ` [patch 4/5] mutex subsystem: allow architecture defined fast path for mutex_lock_interruptible Nicolas Pitre
2005-12-22 6:53 ` Nicolas Pitre [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Pine.LNX.4.64.0512220142530.26663@localhost.localdomain \
--to=nico@cam.org \
--cc=ak@suse.de \
--cc=akpm@osdl.org \
--cc=alan@lxorguk.ukuu.org.uk \
--cc=arjanv@infradead.org \
--cc=bcrl@kvack.org \
--cc=dhowells@redhat.com \
--cc=hch@infradead.org \
--cc=jes@trained-monkey.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=oleg@tv-sign.ru \
--cc=rmk+lkml@arm.linux.org.uk \
--cc=rostedt@goodmis.org \
--cc=torvalds@osdl.org \
--cc=zwane@arm.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).