linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Horrible regression with -CURRENT from "Don't busy-lock-loop in preemptable spinlocks" patch
@ 2005-01-17  5:50 Chris Wedgwood
  2005-01-17  7:09 ` Andrew Morton
  2005-01-17  7:38 ` [PATCH] __get_cpu_var should use __smp_processor_id() not smp_processor_id() Chris Wedgwood
  0 siblings, 2 replies; 50+ messages in thread
From: Chris Wedgwood @ 2005-01-17  5:50 UTC (permalink / raw)
  To: Linus Torvalds, Ingo Molnar; +Cc: cw, Benjamin Herrenschmidt, LKML

Linus,

The change below is causing major problems for me on a dual K7 with
CONFIG_PREEMPT enabled (cset -x and rebuilding makes the machine
usable again).

This change was merged a couple of days ago so I'm surprised nobody
else has reported this.  I tried to find where this patch came from
but I don't see it on lkml only the bk history.

Note, even with this removed I'm still seeing a few (many actually)
"BUG: using smp_processor_id() in preemptible [00000001] code: xxx"
messages which I've not seen before --- that might be unrelated but I
do see *many* such messages so I'm sure I would have noticed this
before or it would have broken something earlier.

Is this specific to the k7 or do other people also see this?



Thanks,

  --cw

---

# This is a BitKeeper generated diff -Nru style patch.
#
# ChangeSet
#   2005/01/15 09:40:45-08:00 mingo@elte.hu 
#   [PATCH] Don't busy-lock-loop in preemptable spinlocks
#   
#   Paul Mackerras points out that doing the _raw_spin_trylock each time
#   through the loop will generate tons of unnecessary bus traffic.
#   Instead, after we fail to get the lock we should poll it with simple
#   loads until we see that it is clear and then retry the atomic op. 
#   Assuming a reasonable cache design, the loads won't generate any bus
#   traffic until another cpu writes to the cacheline containing the lock.
#   
#   Agreed.
#   
#   Signed-off-by: Ingo Molnar <mingo@elte.hu>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# kernel/spinlock.c
#   2005/01/14 16:00:00-08:00 mingo@elte.hu +8 -6
#   Don't busy-lock-loop in preemptable spinlocks
# 
diff -Nru a/kernel/spinlock.c b/kernel/spinlock.c
--- a/kernel/spinlock.c	2005-01-16 21:43:15 -08:00
+++ b/kernel/spinlock.c	2005-01-16 21:43:15 -08:00
@@ -173,7 +173,7 @@
  * (We do this in a function because inlining it would be excessive.)
  */
 
-#define BUILD_LOCK_OPS(op, locktype)					\
+#define BUILD_LOCK_OPS(op, locktype, is_locked_fn)			\
 void __lockfunc _##op##_lock(locktype *lock)				\
 {									\
 	preempt_disable();						\
@@ -183,7 +183,8 @@
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		cpu_relax();						\
+		while (is_locked_fn(lock) && (lock)->break_lock)	\
+			cpu_relax();					\
 		preempt_disable();					\
 	}								\
 }									\
@@ -204,7 +205,8 @@
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		cpu_relax();						\
+		while (is_locked_fn(lock) && (lock)->break_lock)	\
+			cpu_relax();					\
 		preempt_disable();					\
 	}								\
 	return flags;							\
@@ -244,9 +246,9 @@
  *         _[spin|read|write]_lock_irqsave()
  *         _[spin|read|write]_lock_bh()
  */
-BUILD_LOCK_OPS(spin, spinlock_t);
-BUILD_LOCK_OPS(read, rwlock_t);
-BUILD_LOCK_OPS(write, rwlock_t);
+BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
+BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
+BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
 
 #endif /* CONFIG_PREEMPT */
 

^ permalink raw reply	[flat|nested] 50+ messages in thread
* [patch 1/3] spinlock fix #1
@ 2005-01-20 11:43 Ingo Molnar
  2005-01-20 11:59 ` [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding Ingo Molnar
  0 siblings, 1 reply; 50+ messages in thread
From: Ingo Molnar @ 2005-01-20 11:43 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel


i've split up spinlocking-fixes.patch into 3 parts and reworked them. 
This is the first one, against BK-curr:

it fixes the BUILD_LOCK_OPS() bug by introducing the following 3 new
locking primitives:

  spin_trylock_test(lock)
  read_trylock_test(lock)
  write_trylock_test(lock)

this is what is needed by BUILD_LOCK_OPS(): a nonintrusive test to check
whether the real (intrusive) trylock op would succeed or not. Semantics
and naming is completely symmetric to the trylock counterpart. No
changes to exit.c.

build/boot-tested on x86. Architectures that want to support PREEMPT
need to add these definitions.

	Ingo

Signed-off-by: Ingo Molnar <mingo@elte.hu>

--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -173,8 +173,8 @@ EXPORT_SYMBOL(_write_lock);
  * (We do this in a function because inlining it would be excessive.)
  */
 
-#define BUILD_LOCK_OPS(op, locktype, is_locked_fn)			\
-void __lockfunc _##op##_lock(locktype *lock)				\
+#define BUILD_LOCK_OPS(op, locktype)					\
+void __lockfunc _##op##_lock(locktype##_t *lock)			\
 {									\
 	preempt_disable();						\
 	for (;;) {							\
@@ -183,7 +183,7 @@ void __lockfunc _##op##_lock(locktype *l
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		while (is_locked_fn(lock) && (lock)->break_lock)	\
+		while (!op##_trylock_test(lock) && (lock)->break_lock)	\
 			cpu_relax();					\
 		preempt_disable();					\
 	}								\
@@ -191,7 +191,7 @@ void __lockfunc _##op##_lock(locktype *l
 									\
 EXPORT_SYMBOL(_##op##_lock);						\
 									\
-unsigned long __lockfunc _##op##_lock_irqsave(locktype *lock)		\
+unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock)	\
 {									\
 	unsigned long flags;						\
 									\
@@ -205,7 +205,7 @@ unsigned long __lockfunc _##op##_lock_ir
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		while (is_locked_fn(lock) && (lock)->break_lock)	\
+		while (!op##_trylock_test(lock) && (lock)->break_lock)	\
 			cpu_relax();					\
 		preempt_disable();					\
 	}								\
@@ -214,14 +214,14 @@ unsigned long __lockfunc _##op##_lock_ir
 									\
 EXPORT_SYMBOL(_##op##_lock_irqsave);					\
 									\
-void __lockfunc _##op##_lock_irq(locktype *lock)			\
+void __lockfunc _##op##_lock_irq(locktype##_t *lock)			\
 {									\
 	_##op##_lock_irqsave(lock);					\
 }									\
 									\
 EXPORT_SYMBOL(_##op##_lock_irq);					\
 									\
-void __lockfunc _##op##_lock_bh(locktype *lock)				\
+void __lockfunc _##op##_lock_bh(locktype##_t *lock)			\
 {									\
 	unsigned long flags;						\
 									\
@@ -246,9 +246,9 @@ EXPORT_SYMBOL(_##op##_lock_bh)
  *         _[spin|read|write]_lock_irqsave()
  *         _[spin|read|write]_lock_bh()
  */
-BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
-BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
-BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
+BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(read, rwlock);
+BUILD_LOCK_OPS(write, rwlock);
 
 #endif /* CONFIG_PREEMPT */
 
--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -584,4 +584,10 @@ static inline int bit_spin_is_locked(int
 #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
 #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
 
+/**
+ * spin_trylock_test - would spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define spin_trylock_test(lock)		(!spin_is_locked(lock))
+
 #endif /* __LINUX_SPINLOCK_H */
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -188,6 +188,18 @@ typedef struct {
 
 #define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
 
+/**
+ * read_trylock_test - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define read_trylock_test(x) (atomic_read((atomic_t *)&(x)->lock) > 0)
+
+/**
+ * write_trylock_test - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define write_trylock_test(x) ((x)->lock == RW_LOCK_BIAS)
+
 /*
  * On x86, we implement read-write locks as a 32-bit counter
  * with the high bit (sign) being the "contended" bit.

^ permalink raw reply	[flat|nested] 50+ messages in thread

end of thread, other threads:[~2005-01-20 23:46 UTC | newest]

Thread overview: 50+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2005-01-17  5:50 Horrible regression with -CURRENT from "Don't busy-lock-loop in preemptable spinlocks" patch Chris Wedgwood
2005-01-17  7:09 ` Andrew Morton
2005-01-17  7:33   ` Chris Wedgwood
2005-01-17  7:50     ` Paul Mackerras
2005-01-17  8:00       ` Chris Wedgwood
2005-01-17 14:33   ` Ingo Molnar
2005-01-18  1:47     ` Darren Williams
2005-01-18  4:28       ` Darren Williams
2005-01-18  7:08         ` Chris Wedgwood
2005-01-19  0:14       ` Peter Chubb
2005-01-19  8:04         ` Ingo Molnar
2005-01-19  9:18           ` Peter Chubb
2005-01-19  9:20             ` Ingo Molnar
2005-01-19 21:43               ` Paul Mackerras
2005-01-20  2:34                 ` [PATCH RFC] 'spinlock/rwlock fixes' V3 [1/1] Chris Wedgwood
2005-01-20  3:01                   ` Andrew Morton
2005-01-20  3:18                     ` Chris Wedgwood
2005-01-20  3:33                       ` Andrew Morton
2005-01-20  8:59                       ` Peter Chubb
2005-01-20 13:04                         ` Ingo Molnar
2005-01-20 15:51                         ` Linus Torvalds
2005-01-20 16:08                           ` [patch 1/3] spinlock fix #1, *_can_lock() primitives Ingo Molnar
2005-01-20 16:11                             ` [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding Ingo Molnar
2005-01-20 16:12                               ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 Ingo Molnar
2005-01-20 16:14                                 ` [patch] stricter type-checking rwlock " Ingo Molnar
2005-01-20 16:16                                   ` [patch] minor spinlock cleanups Ingo Molnar
2005-01-20 16:31                             ` [patch 1/3] spinlock fix #1, *_can_lock() primitives Linus Torvalds
2005-01-20 16:40                               ` Ingo Molnar
2005-01-20 17:48                                 ` Linus Torvalds
2005-01-20 17:53                                   ` Ingo Molnar
2005-01-20 18:22                                     ` [patch, BK-curr] nonintrusive spin-polling loop in kernel/spinlock.c Ingo Molnar
2005-01-20 18:25                                       ` [patch, BK-curr] rename 'lock' to 'slock' in asm-i386/spinlock.h Ingo Molnar
2005-01-20 23:45                                       ` [patch, BK-curr] nonintrusive spin-polling loop in kernel/spinlock.c Linus Torvalds
2005-01-20 16:44                               ` [patch 1/3] spinlock fix #1, *_can_lock() primitives Ingo Molnar
2005-01-20 16:59                                 ` Ingo Molnar
2005-01-20 16:47                               ` Ingo Molnar
2005-01-20 16:57                               ` Ingo Molnar
2005-01-20 16:05                       ` [PATCH RFC] 'spinlock/rwlock fixes' V3 [1/1] Linus Torvalds
2005-01-20 16:20                         ` Ingo Molnar
2005-01-20 16:18                   ` Linus Torvalds
2005-01-20 16:23                     ` Ingo Molnar
2005-01-20 17:30                       ` Linus Torvalds
2005-01-20 17:38                         ` Ingo Molnar
2005-01-20 16:28                     ` Ingo Molnar
2005-01-20  5:49                 ` Horrible regression with -CURRENT from "Don't busy-lock-loop in preemptable spinlocks" patch Grant Grundler
2005-01-17  7:38 ` [PATCH] __get_cpu_var should use __smp_processor_id() not smp_processor_id() Chris Wedgwood
2005-01-17 14:40   ` Ingo Molnar
2005-01-17 18:53     ` Chris Wedgwood
2005-01-20 11:43 [patch 1/3] spinlock fix #1 Ingo Molnar
2005-01-20 11:59 ` [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding Ingo Molnar
2005-01-20 12:09   ` [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86 Ingo Molnar
2005-01-20 22:51     ` J.A. Magallon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).