linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch 1/3] spinlock fix #1
@ 2005-01-20 11:43 Ingo Molnar
  2005-01-20 11:59 ` [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding Ingo Molnar
  0 siblings, 1 reply; 7+ messages in thread
From: Ingo Molnar @ 2005-01-20 11:43 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel


i've split up spinlocking-fixes.patch into 3 parts and reworked them. 
This is the first one, against BK-curr:

it fixes the BUILD_LOCK_OPS() bug by introducing the following 3 new
locking primitives:

  spin_trylock_test(lock)
  read_trylock_test(lock)
  write_trylock_test(lock)

this is what is needed by BUILD_LOCK_OPS(): a nonintrusive test to check
whether the real (intrusive) trylock op would succeed or not. Semantics
and naming is completely symmetric to the trylock counterpart. No
changes to exit.c.

build/boot-tested on x86. Architectures that want to support PREEMPT
need to add these definitions.

	Ingo

Signed-off-by: Ingo Molnar <mingo@elte.hu>

--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -173,8 +173,8 @@ EXPORT_SYMBOL(_write_lock);
  * (We do this in a function because inlining it would be excessive.)
  */
 
-#define BUILD_LOCK_OPS(op, locktype, is_locked_fn)			\
-void __lockfunc _##op##_lock(locktype *lock)				\
+#define BUILD_LOCK_OPS(op, locktype)					\
+void __lockfunc _##op##_lock(locktype##_t *lock)			\
 {									\
 	preempt_disable();						\
 	for (;;) {							\
@@ -183,7 +183,7 @@ void __lockfunc _##op##_lock(locktype *l
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		while (is_locked_fn(lock) && (lock)->break_lock)	\
+		while (!op##_trylock_test(lock) && (lock)->break_lock)	\
 			cpu_relax();					\
 		preempt_disable();					\
 	}								\
@@ -191,7 +191,7 @@ void __lockfunc _##op##_lock(locktype *l
 									\
 EXPORT_SYMBOL(_##op##_lock);						\
 									\
-unsigned long __lockfunc _##op##_lock_irqsave(locktype *lock)		\
+unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock)	\
 {									\
 	unsigned long flags;						\
 									\
@@ -205,7 +205,7 @@ unsigned long __lockfunc _##op##_lock_ir
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		while (is_locked_fn(lock) && (lock)->break_lock)	\
+		while (!op##_trylock_test(lock) && (lock)->break_lock)	\
 			cpu_relax();					\
 		preempt_disable();					\
 	}								\
@@ -214,14 +214,14 @@ unsigned long __lockfunc _##op##_lock_ir
 									\
 EXPORT_SYMBOL(_##op##_lock_irqsave);					\
 									\
-void __lockfunc _##op##_lock_irq(locktype *lock)			\
+void __lockfunc _##op##_lock_irq(locktype##_t *lock)			\
 {									\
 	_##op##_lock_irqsave(lock);					\
 }									\
 									\
 EXPORT_SYMBOL(_##op##_lock_irq);					\
 									\
-void __lockfunc _##op##_lock_bh(locktype *lock)				\
+void __lockfunc _##op##_lock_bh(locktype##_t *lock)			\
 {									\
 	unsigned long flags;						\
 									\
@@ -246,9 +246,9 @@ EXPORT_SYMBOL(_##op##_lock_bh)
  *         _[spin|read|write]_lock_irqsave()
  *         _[spin|read|write]_lock_bh()
  */
-BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
-BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
-BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
+BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(read, rwlock);
+BUILD_LOCK_OPS(write, rwlock);
 
 #endif /* CONFIG_PREEMPT */
 
--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -584,4 +584,10 @@ static inline int bit_spin_is_locked(int
 #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
 #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
 
+/**
+ * spin_trylock_test - would spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define spin_trylock_test(lock)		(!spin_is_locked(lock))
+
 #endif /* __LINUX_SPINLOCK_H */
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -188,6 +188,18 @@ typedef struct {
 
 #define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
 
+/**
+ * read_trylock_test - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define read_trylock_test(x) (atomic_read((atomic_t *)&(x)->lock) > 0)
+
+/**
+ * write_trylock_test - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define write_trylock_test(x) ((x)->lock == RW_LOCK_BIAS)
+
 /*
  * On x86, we implement read-write locks as a 32-bit counter
  * with the high bit (sign) being the "contended" bit.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding
  2005-01-20 11:43 [patch 1/3] spinlock fix #1 Ingo Molnar
@ 2005-01-20 11:59 ` Ingo Molnar
  2005-01-20 12:09   ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 Ingo Molnar
  0 siblings, 1 reply; 7+ messages in thread
From: Ingo Molnar @ 2005-01-20 11:59 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel, Paul Mackerras


this patch generalizes the facility of targeted lock-yielding originally
implemented for ppc64. This facility enables a virtual CPU to indicate
towards the hypervisor which other virtual CPU this CPU is 'blocked on',
and hence which CPU the hypervisor should yield the current timeslice
to, in order to make progress on releasing the lock.

On physical platforms these functions default to cpu_relax(). Since
physical platforms are in the overwhelming majority i've added the two
new functions to the new asm-generic/spinlock.h include file - here i
hope we can later on move more generic spinlock bits as well.

this patch solves the ppc64/PREEMPT functionality-regression reported by
Paul Mackerras. I only tested it on x86, Paul, would you mind to test it
on ppc64?

	Ingo

Signed-off-by: Ingo Molnar <mingo@elte.hu>

--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -184,7 +184,7 @@ void __lockfunc _##op##_lock(locktype##_
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
 		while (!op##_trylock_test(lock) && (lock)->break_lock)	\
-			cpu_relax();					\
+			locktype##_yield(lock);				\
 		preempt_disable();					\
 	}								\
 }									\
@@ -206,7 +206,7 @@ unsigned long __lockfunc _##op##_lock_ir
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
 		while (!op##_trylock_test(lock) && (lock)->break_lock)	\
-			cpu_relax();					\
+			locktype##_yield(lock);				\
 		preempt_disable();					\
 	}								\
 	return flags;							\
--- linux/arch/ppc64/lib/locks.c.orig
+++ linux/arch/ppc64/lib/locks.c
@@ -23,7 +23,7 @@
 /* waiting for a spinlock... */
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 
-void __spin_yield(spinlock_t *lock)
+void spinlock_yield(spinlock_t *lock)
 {
 	unsigned int lock_value, holder_cpu, yield_count;
 	struct paca_struct *holder_paca;
@@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock)
  * This turns out to be the same for read and write locks, since
  * we only know the holder if it is write-locked.
  */
-void __rw_yield(rwlock_t *rw)
+void rwlock_yield(rwlock_t *rw)
 {
 	int lock_value;
 	unsigned int holder_cpu, yield_count;
@@ -87,7 +87,7 @@ void spin_unlock_wait(spinlock_t *lock)
 	while (lock->lock) {
 		HMT_low();
 		if (SHARED_PROCESSOR)
-			__spin_yield(lock);
+			spinlock_yield(lock);
 	}
 	HMT_medium();
 }
--- linux/include/asm-ia64/spinlock.h.orig
+++ linux/include/asm-ia64/spinlock.h
@@ -17,6 +17,8 @@
 #include <asm/intrinsics.h>
 #include <asm/system.h>
 
+#include <asm-generic/spinlock.h>
+
 typedef struct {
 	volatile unsigned int lock;
 #ifdef CONFIG_PREEMPT
--- linux/include/asm-generic/spinlock.h.orig
+++ linux/include/asm-generic/spinlock.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_GENERIC_SPINLOCK_H
+#define _ASM_GENERIC_SPINLOCK_H
+
+/*
+ * Virtual platforms might use these to
+ * yield to specific virtual CPUs:
+ */
+#define spinlock_yield(lock)	cpu_relax()
+#define rwlock_yield(lock)	cpu_relax()
+
+#endif /* _ASM_GENERIC_SPINLOCK_H */
--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -206,6 +206,8 @@ typedef struct {
 #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
 #endif /* CONFIG_DEBUG_SPINLOCK */
 
+#define spinlock_yield(lock)	(void)(lock)
+
 /* RW spinlocks: No debug version */
 
 #if (__GNUC__ > 2)
@@ -224,6 +226,8 @@ typedef struct {
 #define _raw_read_trylock(lock) ({ (void)(lock); (1); })
 #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
 
+#define rwlock_yield(lock)	(void)(lock)
+
 #define _spin_trylock(lock)	({preempt_disable(); _raw_spin_trylock(lock) ? \
 				1 : ({preempt_enable(); 0;});})
 
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -7,6 +7,8 @@
 #include <linux/config.h>
 #include <linux/compiler.h>
 
+#include <asm-generic/spinlock.h>
+
 asmlinkage int printk(const char * fmt, ...)
 	__attribute__ ((format (printf, 1, 2)));
 
--- linux/include/asm-ppc64/spinlock.h.orig
+++ linux/include/asm-ppc64/spinlock.h
@@ -64,11 +64,11 @@ static __inline__ void _raw_spin_unlock(
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 /* We only yield to the hypervisor if we are in shared processor mode */
 #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
-extern void __spin_yield(spinlock_t *lock);
-extern void __rw_yield(rwlock_t *lock);
+extern void spinlock_yield(spinlock_t *lock);
+extern void rwlock_yield(rwlock_t *lock);
 #else /* SPLPAR || ISERIES */
-#define __spin_yield(x)	barrier()
-#define __rw_yield(x)	barrier()
+#define spinlock_yield(x)	barrier()
+#define rwlock_yield(x)	barrier()
 #define SHARED_PROCESSOR	0
 #endif
 extern void spin_unlock_wait(spinlock_t *lock);
@@ -109,7 +109,7 @@ static void __inline__ _raw_spin_lock(sp
 		do {
 			HMT_low();
 			if (SHARED_PROCESSOR)
-				__spin_yield(lock);
+				spinlock_yield(lock);
 		} while (likely(lock->lock != 0));
 		HMT_medium();
 	}
@@ -127,7 +127,7 @@ static void __inline__ _raw_spin_lock_fl
 		do {
 			HMT_low();
 			if (SHARED_PROCESSOR)
-				__spin_yield(lock);
+				spinlock_yield(lock);
 		} while (likely(lock->lock != 0));
 		HMT_medium();
 		local_irq_restore(flags_dis);
@@ -201,7 +201,7 @@ static void __inline__ _raw_read_lock(rw
 		do {
 			HMT_low();
 			if (SHARED_PROCESSOR)
-				__rw_yield(rw);
+				rwlock_yield(rw);
 		} while (likely(rw->lock < 0));
 		HMT_medium();
 	}
@@ -258,7 +258,7 @@ static void __inline__ _raw_write_lock(r
 		do {
 			HMT_low();
 			if (SHARED_PROCESSOR)
-				__rw_yield(rw);
+				rwlock_yield(rw);
 		} while (likely(rw->lock != 0));
 		HMT_medium();
 	}
--- linux/include/asm-x86_64/spinlock.h.orig
+++ linux/include/asm-x86_64/spinlock.h
@@ -6,6 +6,8 @@
 #include <asm/page.h>
 #include <linux/config.h>
 
+#include <asm-generic/spinlock.h>
+
 extern int printk(const char * fmt, ...)
 	__attribute__ ((format (printf, 1, 2)));
 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86
  2005-01-20 11:59 ` [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding Ingo Molnar
@ 2005-01-20 12:09   ` Ingo Molnar
  2005-01-20 12:18     ` [patch] stricter type-checking rwlock " Ingo Molnar
  2005-01-20 22:51     ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 J.A. Magallon
  0 siblings, 2 replies; 7+ messages in thread
From: Ingo Molnar @ 2005-01-20 12:09 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel


this patch would have caught the bug in -BK-curr (that patch #1 fixes),
via a compiler warning. Test-built/booted on x86.

	Ingo

Signed-off-by: Ingo Molnar <mingo@elte.hu>

--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -36,7 +36,10 @@ typedef struct {
 
 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
 
-#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+static inline void spin_lock_init(spinlock_t *lock)
+{
+	*lock = SPIN_LOCK_UNLOCKED;
+}
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
@@ -45,8 +48,17 @@ typedef struct {
  * We make no fairness assumptions. They have a cost.
  */
 
-#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->lock) <= 0)
-#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
+static inline int spin_is_locked(spinlock_t *lock)
+{
+	return *(volatile signed char *)(&lock->lock) <= 0;
+}
+
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+	do {
+		barrier();
+	} while (spin_is_locked(lock));
+}
 
 #define spin_lock_string \
 	"\n1:\t" \

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [patch] stricter type-checking rwlock primitives, x86
  2005-01-20 12:09   ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 Ingo Molnar
@ 2005-01-20 12:18     ` Ingo Molnar
  2005-01-20 12:22       ` [patch] minor spinlock cleanups Ingo Molnar
  2005-01-20 22:51     ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 J.A. Magallon
  1 sibling, 1 reply; 7+ messages in thread
From: Ingo Molnar @ 2005-01-20 12:18 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel


turn x86 rwlock macros into inline functions, to get stricter
type-checking. Test-built/booted on x86. (patch comes after all
previous spinlock patches.)

	Ingo

Signed-off-by: Ingo Molnar <mingo@elte.hu>

--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -198,21 +198,33 @@ typedef struct {
 
 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
 
-#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+static inline void rwlock_init(rwlock_t *rw)
+{
+	*rw = RW_LOCK_UNLOCKED;
+}
 
-#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
+static inline int rwlock_is_locked(rwlock_t *rw)
+{
+	return rw->lock != RW_LOCK_BIAS;
+}
 
 /**
  * read_trylock_test - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define read_trylock_test(x) (atomic_read((atomic_t *)&(x)->lock) > 0)
+static inline int read_trylock_test(rwlock_t *rw)
+{
+	return atomic_read((atomic_t *)&rw->lock) > 0;
+}
 
 /**
  * write_trylock_test - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define write_trylock_test(x) ((x)->lock == RW_LOCK_BIAS)
+static inline int write_trylock_test(rwlock_t *rw)
+{
+	return atomic_read((atomic_t *)&rw->lock) == RW_LOCK_BIAS;
+}
 
 /*
  * On x86, we implement read-write locks as a 32-bit counter
@@ -241,8 +253,16 @@ static inline void _raw_write_lock(rwloc
 	__build_write_lock(rw, "__write_lock_failed");
 }
 
-#define _raw_read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
+static inline void _raw_read_unlock(rwlock_t *rw)
+{
+	asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void _raw_write_unlock(rwlock_t *rw)
+{
+	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR
+				",%0":"=m" (rw->lock) : : "memory");
+}
 
 static inline int _raw_read_trylock(rwlock_t *lock)
 {

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [patch] minor spinlock cleanups
  2005-01-20 12:18     ` [patch] stricter type-checking rwlock " Ingo Molnar
@ 2005-01-20 12:22       ` Ingo Molnar
  0 siblings, 0 replies; 7+ messages in thread
From: Ingo Molnar @ 2005-01-20 12:22 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel


cleanup: remove stale semicolon from linux/spinlock.h and stale space
from asm-i386/spinlock.h.

	Ingo

Signed-off-by: Ingo Molnar <mingo@elte.hu>

--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -202,7 +202,7 @@ typedef struct {
 #define _raw_spin_lock(lock)	do { (void)(lock); } while(0)
 #define spin_is_locked(lock)	((void)(lock), 0)
 #define _raw_spin_trylock(lock)	(((void)(lock), 1))
-#define spin_unlock_wait(lock)	(void)(lock);
+#define spin_unlock_wait(lock)	(void)(lock)
 #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
 #endif /* CONFIG_DEBUG_SPINLOCK */
 
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -92,7 +92,7 @@ static inline void spin_unlock_wait(spin
  * (except on PPro SMP or if we are using OOSTORE)
  * (PPro errata 66, 92)
  */
- 
+
 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
 
 #define spin_unlock_string \

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86
  2005-01-20 12:09   ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 Ingo Molnar
  2005-01-20 12:18     ` [patch] stricter type-checking rwlock " Ingo Molnar
@ 2005-01-20 22:51     ` J.A. Magallon
  1 sibling, 0 replies; 7+ messages in thread
From: J.A. Magallon @ 2005-01-20 22:51 UTC (permalink / raw)
  To: linux-kernel

[-- Attachment #1: Type: text/plain, Size: 998 bytes --]


On 2005.01.20, Ingo Molnar wrote:
> 
> this patch would have caught the bug in -BK-curr (that patch #1 fixes),
> via a compiler warning. Test-built/booted on x86.
> 
> 	Ingo
> 
> Signed-off-by: Ingo Molnar <mingo@elte.hu>
> 
> --- linux/include/asm-i386/spinlock.h.orig
> +++ linux/include/asm-i386/spinlock.h
> @@ -36,7 +36,10 @@ typedef struct {
>  
>  #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
>  
> -#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
> +static inline void spin_lock_init(spinlock_t *lock)

Will have any real effect if you add things like:

+static inline void spin_lock_init(spinlock_t *lock) __attribute__((__pure__));

??

TIA

--
J.A. Magallon <jamagallon()able!es>     \               Software is like sex:
werewolf!able!es                         \         It's better when it's free
Mandrakelinux release 10.2 (Cooker) for i586
Linux 2.6.10-jam4 (gcc 3.4.3 (Mandrakelinux 10.2 3.4.3-3mdk)) #2


[-- Attachment #2: Type: application/pgp-signature, Size: 189 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [patch] stricter type-checking rwlock primitives, x86
  2005-01-20 16:12                 ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 Ingo Molnar
@ 2005-01-20 16:14                   ` Ingo Molnar
  0 siblings, 0 replies; 7+ messages in thread
From: Ingo Molnar @ 2005-01-20 16:14 UTC (permalink / raw)
  To: Linus Torvalds
  Cc: Peter Chubb, Chris Wedgwood, Andrew Morton, paulus, linux-kernel,
	tony.luck, dsw, benh, linux-ia64, hch, wli, jbarnes


[patch respun with s/trylock_test/can_lock/]
--

turn x86 rwlock macros into inline functions, to get stricter
type-checking. Test-built/booted on x86. (patch comes after all
previous spinlock patches.)

	Ingo

Signed-off-by: Ingo Molnar <mingo@elte.hu>

--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -198,21 +198,33 @@ typedef struct {
 
 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
 
-#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+static inline void rwlock_init(rwlock_t *rw)
+{
+	*rw = RW_LOCK_UNLOCKED;
+}
 
-#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
+static inline int rwlock_is_locked(rwlock_t *rw)
+{
+	return rw->lock != RW_LOCK_BIAS;
+}
 
 /**
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define read_can_lock(x) (atomic_read((atomic_t *)&(x)->lock) > 0)
+static inline int read_can_lock(rwlock_t *rw)
+{
+	return atomic_read((atomic_t *)&rw->lock) > 0;
+}
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+static inline int write_can_lock(rwlock_t *rw)
+{
+	return atomic_read((atomic_t *)&rw->lock) == RW_LOCK_BIAS;
+}
 
 /*
  * On x86, we implement read-write locks as a 32-bit counter
@@ -241,8 +253,16 @@ static inline void _raw_write_lock(rwloc
 	__build_write_lock(rw, "__write_lock_failed");
 }
 
-#define _raw_read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
+static inline void _raw_read_unlock(rwlock_t *rw)
+{
+	asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void _raw_write_unlock(rwlock_t *rw)
+{
+	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR
+				",%0":"=m" (rw->lock) : : "memory");
+}
 
 static inline int _raw_read_trylock(rwlock_t *lock)
 {

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2005-01-20 22:51 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2005-01-20 11:43 [patch 1/3] spinlock fix #1 Ingo Molnar
2005-01-20 11:59 ` [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding Ingo Molnar
2005-01-20 12:09   ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 Ingo Molnar
2005-01-20 12:18     ` [patch] stricter type-checking rwlock " Ingo Molnar
2005-01-20 12:22       ` [patch] minor spinlock cleanups Ingo Molnar
2005-01-20 22:51     ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 J.A. Magallon
  -- strict thread matches above, loose matches on Subject: below --
2005-01-19  9:20 Horrible regression with -CURRENT from "Don't busy-lock-loop in preemptable spinlocks" patch Ingo Molnar
2005-01-19 21:43 ` Paul Mackerras
2005-01-20  2:34   ` [PATCH RFC] 'spinlock/rwlock fixes' V3 [1/1] Chris Wedgwood
2005-01-20  3:01     ` Andrew Morton
2005-01-20  3:18       ` Chris Wedgwood
2005-01-20  8:59         ` Peter Chubb
2005-01-20 15:51           ` Linus Torvalds
2005-01-20 16:08             ` [patch 1/3] spinlock fix #1, *_can_lock() primitives Ingo Molnar
2005-01-20 16:11               ` [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding Ingo Molnar
2005-01-20 16:12                 ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 Ingo Molnar
2005-01-20 16:14                   ` [patch] stricter type-checking rwlock " Ingo Molnar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).