All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V3 1/2] MIPS: Remove irqflags.h dependency from bitops.h
@ 2012-09-03 21:52 Jim Quinlan
  2012-09-03 21:52 ` [PATCH V3 2/2] MIPS: make funcs preempt-safe for non-mipsr2 cpus Jim Quinlan
  2012-09-04 17:30 ` [PATCH V3 1/2] MIPS: Remove irqflags.h dependency from bitops.h David Daney
  0 siblings, 2 replies; 4+ messages in thread
From: Jim Quinlan @ 2012-09-03 21:52 UTC (permalink / raw)
  To: ralf, linux-mips; +Cc: ddaney.cavm, cernekee, Jim Quinlan

The "else clause" of most functions in bitops.h invoked
raw_local_irq_{save,restore}() and so had a dependency on irqflags.h.  This
fix moves said code to bitops.c, removing the dependency.

Signed-off-by: Jim Quinlan <jim2101024@gmail.com>
---
 arch/mips/include/asm/bitops.h |  114 +++++++------------------
 arch/mips/include/asm/io.h     |    1 +
 arch/mips/lib/Makefile         |    2 +-
 arch/mips/lib/bitops.c         |  180 ++++++++++++++++++++++++++++++++++++++++
 4 files changed, 214 insertions(+), 83 deletions(-)
 create mode 100644 arch/mips/lib/bitops.c

diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 82ad35c..9fd0b1d 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -14,7 +14,6 @@
 #endif
 
 #include <linux/compiler.h>
-#include <linux/irqflags.h>
 #include <linux/types.h>
 #include <asm/barrier.h>
 #include <asm/byteorder.h>		/* sigh ... */
@@ -44,6 +43,24 @@
 #define smp_mb__before_clear_bit()	smp_mb__before_llsc()
 #define smp_mb__after_clear_bit()	smp_llsc_mb()
 
+
+/*
+ * These are the "slower" versions of the functions and are in bitops.c.
+ * These functions call raw_local_irq_{save,restore}().
+ */
+extern void atomic_set_bit(unsigned long nr, volatile unsigned long *addr);
+extern void atomic_clear_bit(unsigned long nr, volatile unsigned long *addr);
+extern void atomic_change_bit(unsigned long nr, volatile unsigned long *addr);
+extern int atomic_test_and_set_bit(unsigned long nr,
+				   volatile unsigned long *addr);
+extern int atomic_test_and_set_bit_lock(unsigned long nr,
+					volatile unsigned long *addr);
+extern int atomic_test_and_clear_bit(unsigned long nr,
+				     volatile unsigned long *addr);
+extern int atomic_test_and_change_bit(unsigned long nr,
+				      volatile unsigned long *addr);
+
+
 /*
  * set_bit - Atomically set a bit in memory
  * @nr: the bit to set
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 			: "=&r" (temp), "+m" (*m)
 			: "ir" (1UL << bit));
 		} while (unlikely(!temp));
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		*a |= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		atomic_set_bit(nr, addr);
 }
 
 /*
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 			: "=&r" (temp), "+m" (*m)
 			: "ir" (~(1UL << bit)));
 		} while (unlikely(!temp));
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		*a &= ~mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		atomic_clear_bit(nr, addr);
 }
 
 /*
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 			: "=&r" (temp), "+m" (*m)
 			: "ir" (1UL << bit));
 		} while (unlikely(!temp));
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		*a ^= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		atomic_change_bit(nr, addr);
 }
 
 /*
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
 		} while (unlikely(!res));
 
 		res = temp & (1UL << bit);
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		res = (mask & *a);
-		*a |= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		res = atomic_test_and_set_bit(nr, addr);
 
 	smp_llsc_mb();
 
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 		} while (unlikely(!res));
 
 		res = temp & (1UL << bit);
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		res = (mask & *a);
-		*a |= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		res = atomic_test_and_set_bit_lock(nr, addr);
 
 	smp_llsc_mb();
 
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
 		} while (unlikely(!res));
 
 		res = temp & (1UL << bit);
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		res = (mask & *a);
-		*a &= ~mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		res = atomic_test_and_clear_bit(nr, addr);
 
 	smp_llsc_mb();
 
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
 		} while (unlikely(!res));
 
 		res = temp & (1UL << bit);
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		res = (mask & *a);
-		*a ^= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		res = atomic_test_and_change_bit(nr, addr);
 
 	smp_llsc_mb();
 
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 29d9c23..ff2e034 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -15,6 +15,7 @@
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/irqflags.h>
 
 #include <asm/addrspace.h>
 #include <asm/bug.h>
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index c4a82e8..a7b8937 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,7 +2,7 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y	+= csum_partial.o delay.o memcpy.o memset.o \
+lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \
 	   strlen_user.o strncpy_user.o strnlen_user.o uncached.o
 
 obj-y			+= iomap.o
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
new file mode 100644
index 0000000..6562ab2
--- /dev/null
+++ b/arch/mips/lib/bitops.c
@@ -0,0 +1,180 @@
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
+ */
+
+#include <linux/irqflags.h>
+
+#if _MIPS_SZLONG == 32
+#define SZLONG_LOG 5
+#define SZLONG_MASK 31UL
+#elif _MIPS_SZLONG == 64
+#define SZLONG_MASK 63UL
+#define SZLONG_LOG 6
+#endif
+
+
+/*
+ * atomic_set_bit - Atomically set a bit in memory.  This is called by
+ * if set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void atomic_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned short bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a |= mask;
+	raw_local_irq_restore(flags);
+}
+
+
+/*
+ * atomic_clear_bit - Clears a bit in memory.  This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void atomic_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned short bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a &= ~mask;
+	raw_local_irq_restore(flags);
+}
+
+
+/*
+ * atomic_change_bit - Toggle a bit in memory.  This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void atomic_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned short bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a ^= mask;
+	raw_local_irq_restore(flags);
+}
+
+
+/*
+ * atomic_test_and_set_bit - Set a bit and return its old value.  This is
+ * called by test_and_set_bit() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int atomic_test_and_set_bit(unsigned long nr,
+			    volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned short bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	unsigned long res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a);
+	*a |= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+
+
+/*
+ * atomic_test_and_set_bit_lock - Set a bit and return its old value.  This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int atomic_test_and_set_bit_lock(unsigned long nr,
+				 volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned short bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	unsigned long res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a);
+	*a |= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+
+
+/*
+ * atomic_test_and_clear_bit - Clear a bit and return its old value.  This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int atomic_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned short bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	unsigned long res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a);
+	*a &= ~mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+
+
+/*
+ * atomic_test_and_change_bit - Change a bit and return its old value.  This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int atomic_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned short bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	unsigned long res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a);
+	*a ^= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
-- 
1.7.6

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH V3 2/2] MIPS: make funcs preempt-safe for non-mipsr2 cpus
  2012-09-03 21:52 [PATCH V3 1/2] MIPS: Remove irqflags.h dependency from bitops.h Jim Quinlan
@ 2012-09-03 21:52 ` Jim Quinlan
  2012-09-04 17:36   ` David Daney
  2012-09-04 17:30 ` [PATCH V3 1/2] MIPS: Remove irqflags.h dependency from bitops.h David Daney
  1 sibling, 1 reply; 4+ messages in thread
From: Jim Quinlan @ 2012-09-03 21:52 UTC (permalink / raw)
  To: ralf, linux-mips; +Cc: ddaney.cavm, cernekee, Jim Quinlan

For non MIPSr2 processors, such as the BMIPS 5000, calls to
arch_local_irq_disable() and others may be preempted, and in doing
so a stale value may be restored to c0_status.  This fix disables
preemption for such processors prior to the call and enables it
after the call.

Those functions that needed this fix have been "outlined" to
mips-atomic.c, as they are no longer good candidates for inlining.

This bug was observed in a BMIPS 5000, occuring once every few hours
in a continuous reboot test.  It was traced to the write_lock_irq()
function which was being invoked in release_task() in exit.c.
By placing a number of "nops" inbetween the mfc0/mtc0 pair in
arch_local_irq_disable(), which is called by write_lock_irq(), we
were able to greatly increase the occurance of this bug.  Similarly,
the application of this commit silenced the bug.

Signed-off-by: Jim Quinlan <jim2101024@gmail.com>
---
 arch/mips/include/asm/irqflags.h |   92 +++++---------------
 arch/mips/lib/Makefile           |    3 +-
 arch/mips/lib/mips-atomic.c      |  179 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 202 insertions(+), 72 deletions(-)
 create mode 100644 arch/mips/lib/mips-atomic.c

diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 309cbcd..9174d88 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -16,6 +16,15 @@
 #include <linux/compiler.h>
 #include <asm/hazards.h>
 
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MT_SMTC)
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+extern void arch_local_irq_disable(void);
+extern unsigned long arch_local_irq_save(void);
+extern void arch_local_irq_restore(unsigned long flags);
+extern void __arch_local_irq_restore(unsigned long flags);
+#endif
+
+
 __asm__(
 	"	.macro	arch_local_irq_enable				\n"
 	"	.set	push						\n"
@@ -57,42 +66,12 @@ static inline void arch_local_irq_enable(void)
 }
 
 
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MT_SMTC)
 __asm__(
 	"	.macro	arch_local_irq_disable\n"
 	"	.set	push						\n"
 	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	$1, $2, 1					\n"
-	"	ori	$1, 0x400					\n"
-	"	.set	noreorder					\n"
-	"	mtc0	$1, $2, 1					\n"
-#elif defined(CONFIG_CPU_MIPSR2)
 	"	di							\n"
-#else
-	"	mfc0	$1,$12						\n"
-	"	ori	$1,0x1f						\n"
-	"	xori	$1,0x1f						\n"
-	"	.set	noreorder					\n"
-	"	mtc0	$1,$12						\n"
-#endif
 	"	irq_disable_hazard					\n"
 	"	.set	pop						\n"
 	"	.endm							\n");
@@ -105,6 +84,8 @@ static inline void arch_local_irq_disable(void)
 		: /* no inputs */
 		: "memory");
 }
+#endif
+
 
 __asm__(
 	"	.macro	arch_local_save_flags flags			\n"
@@ -125,27 +106,15 @@ static inline unsigned long arch_local_save_flags(void)
 	return flags;
 }
 
+
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MT_SMTC)
 __asm__(
 	"	.macro	arch_local_irq_save result			\n"
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\result, $2, 1					\n"
-	"	ori	$1, \\result, 0x400				\n"
-	"	.set	noreorder					\n"
-	"	mtc0	$1, $2, 1					\n"
-	"	andi	\\result, \\result, 0x400			\n"
-#elif defined(CONFIG_CPU_MIPSR2)
 	"	di	\\result					\n"
 	"	andi	\\result, 1					\n"
-#else
-	"	mfc0	\\result, $12					\n"
-	"	ori	$1, \\result, 0x1f				\n"
-	"	xori	$1, 0x1f					\n"
-	"	.set	noreorder					\n"
-	"	mtc0	$1, $12						\n"
-#endif
 	"	irq_disable_hazard					\n"
 	"	.set	pop						\n"
 	"	.endm							\n");
@@ -159,20 +128,16 @@ static inline unsigned long arch_local_irq_save(void)
 		     : "memory");
 	return flags;
 }
+#endif
 
+
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MT_SMTC)
 __asm__(
 	"	.macro	arch_local_irq_restore flags			\n"
 	"	.set	push						\n"
 	"	.set	noreorder					\n"
 	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"mfc0	$1, $2, 1						\n"
-	"andi	\\flags, 0x400						\n"
-	"ori	$1, 0x400						\n"
-	"xori	$1, 0x400						\n"
-	"or	\\flags, $1						\n"
-	"mtc0	\\flags, $2, 1						\n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
 	/*
 	 * Slow, but doesn't suffer from a relatively unlikely race
 	 * condition we're having since days 1.
@@ -181,20 +146,13 @@ __asm__(
 	"	 di							\n"
 	"	ei							\n"
 	"1:								\n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
 	/*
 	 * Fast, dangerous.  Life is fun, life is good.
 	 */
 	"	mfc0	$1, $12						\n"
 	"	ins	$1, \\flags, 0, 1				\n"
 	"	mtc0	$1, $12						\n"
-#else
-	"	mfc0	$1, $12						\n"
-	"	andi	\\flags, 1					\n"
-	"	ori	$1, 0x1f					\n"
-	"	xori	$1, 0x1f					\n"
-	"	or	\\flags, $1					\n"
-	"	mtc0	\\flags, $12					\n"
 #endif
 	"	irq_disable_hazard					\n"
 	"	.set	pop						\n"
@@ -205,16 +163,6 @@ static inline void arch_local_irq_restore(unsigned long flags)
 {
 	unsigned long __tmp1;
 
-#ifdef CONFIG_MIPS_MT_SMTC
-	/*
-	 * SMTC kernel needs to do a software replay of queued
-	 * IPIs, at the cost of branch and call overhead on each
-	 * local_irq_restore()
-	 */
-	if (unlikely(!(flags & 0x0400)))
-		smtc_ipi_replay();
-#endif
-
 	__asm__ __volatile__(
 		"arch_local_irq_restore\t%0"
 		: "=r" (__tmp1)
@@ -232,6 +180,8 @@ static inline void __arch_local_irq_restore(unsigned long flags)
 		: "0" (flags)
 		: "memory");
 }
+#endif
+
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index a7b8937..eeddc58 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -3,7 +3,8 @@
 #
 
 lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \
-	   strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+	   mips-atomic.o strlen_user.o strncpy_user.o \
+	   strnlen_user.o uncached.o
 
 obj-y			+= iomap.o
 obj-$(CONFIG_PCI)	+= iomap-pci.o
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644
index 0000000..546eb25
--- /dev/null
+++ b/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,179 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+#if defined(CONFIG_PREEMPT)
+#define arch_local_preempt_enable() preempt_enable()
+#define arch_local_preempt_disable() preempt_disable()
+#else
+#define arch_local_preempt_enable()
+#define arch_local_preempt_disable()
+#endif
+
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+	"	.macro	arch_local_irq_disable\n"
+	"	.set	push						\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1					\n"
+	"	ori	$1, 0x400					\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1,$12						\n"
+	"	ori	$1,0x1f						\n"
+	"	xori	$1,0x1f						\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1,$12						\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+void arch_local_irq_disable(void)
+{
+	arch_local_preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_disable"
+		: /* no outputs */
+		: /* no inputs */
+		: "memory");
+	arch_local_preempt_enable();
+}
+
+
+__asm__(
+	"	.macro	arch_local_irq_save result			\n"
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	\\result, $2, 1					\n"
+	"	ori	$1, \\result, 0x400				\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $2, 1					\n"
+	"	andi	\\result, \\result, 0x400			\n"
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	\\result, $12					\n"
+	"	ori	$1, \\result, 0x1f				\n"
+	"	xori	$1, 0x1f					\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $12						\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
+	arch_local_preempt_disable();
+	asm volatile("arch_local_irq_save\t%0"
+		     : "=r" (flags)
+		     : /* no inputs */
+		     : "memory");
+	arch_local_preempt_enable();
+	return flags;
+}
+
+
+__asm__(
+	"	.macro	arch_local_irq_restore flags			\n"
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"mfc0	$1, $2, 1						\n"
+	"andi	\\flags, 0x400						\n"
+	"ori	$1, 0x400						\n"
+	"xori	$1, 0x400						\n"
+	"or	\\flags, $1						\n"
+	"mtc0	\\flags, $2, 1						\n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+	/* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1, $12						\n"
+	"	andi	\\flags, 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	\\flags, $1					\n"
+	"	mtc0	\\flags, $12					\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+void arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC kernel needs to do a software replay of queued
+	 * IPIs, at the cost of branch and call overhead on each
+	 * local_irq_restore()
+	 */
+	if (unlikely(!(flags & 0x0400)))
+		smtc_ipi_replay();
+#endif
+	arch_local_preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_restore\t%0"
+		: "=r" (__tmp1)
+		: "0" (flags)
+		: "memory");
+	arch_local_preempt_enable();
+}
+
+void __arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+	arch_local_preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_restore\t%0"
+		: "=r" (__tmp1)
+		: "0" (flags)
+		: "memory");
+	arch_local_preempt_enable();
+}
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
-- 
1.7.6

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH V3 1/2] MIPS: Remove irqflags.h dependency from bitops.h
  2012-09-03 21:52 [PATCH V3 1/2] MIPS: Remove irqflags.h dependency from bitops.h Jim Quinlan
  2012-09-03 21:52 ` [PATCH V3 2/2] MIPS: make funcs preempt-safe for non-mipsr2 cpus Jim Quinlan
@ 2012-09-04 17:30 ` David Daney
  1 sibling, 0 replies; 4+ messages in thread
From: David Daney @ 2012-09-04 17:30 UTC (permalink / raw)
  To: Jim Quinlan; +Cc: ralf, linux-mips, cernekee

On 09/03/2012 02:52 PM, Jim Quinlan wrote:
> The "else clause" of most functions in bitops.h invoked
> raw_local_irq_{save,restore}() and so had a dependency on irqflags.h.  This
> fix moves said code to bitops.c, removing the dependency.
>
> Signed-off-by: Jim Quinlan <jim2101024@gmail.com>


This is much better I think.

Now only a few very minor things I would change...

> ---
>   arch/mips/include/asm/bitops.h |  114 +++++++------------------
>   arch/mips/include/asm/io.h     |    1 +
>   arch/mips/lib/Makefile         |    2 +-
>   arch/mips/lib/bitops.c         |  180 ++++++++++++++++++++++++++++++++++++++++
>   4 files changed, 214 insertions(+), 83 deletions(-)
>   create mode 100644 arch/mips/lib/bitops.c
>
> diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
> index 82ad35c..9fd0b1d 100644
> --- a/arch/mips/include/asm/bitops.h
> +++ b/arch/mips/include/asm/bitops.h
> @@ -14,7 +14,6 @@
>   #endif
>
>   #include <linux/compiler.h>
> -#include <linux/irqflags.h>
>   #include <linux/types.h>
>   #include <asm/barrier.h>
>   #include <asm/byteorder.h>		/* sigh ... */
> @@ -44,6 +43,24 @@
>   #define smp_mb__before_clear_bit()	smp_mb__before_llsc()
>   #define smp_mb__after_clear_bit()	smp_llsc_mb()
>
> +
> +/*
> + * These are the "slower" versions of the functions and are in bitops.c.
> + * These functions call raw_local_irq_{save,restore}().
> + */
> +extern void atomic_set_bit(unsigned long nr, volatile unsigned long *addr);
> +extern void atomic_clear_bit(unsigned long nr, volatile unsigned long *addr);
> +extern void atomic_change_bit(unsigned long nr, volatile unsigned long *addr);
> +extern int atomic_test_and_set_bit(unsigned long nr,
> +				   volatile unsigned long *addr);
> +extern int atomic_test_and_set_bit_lock(unsigned long nr,
> +					volatile unsigned long *addr);
> +extern int atomic_test_and_clear_bit(unsigned long nr,
> +				     volatile unsigned long *addr);
> +extern int atomic_test_and_change_bit(unsigned long nr,
> +				      volatile unsigned long *addr);
> +

No 'extern' needed.

These shouldn't be directly called from user code.  I would suggest 
renaming the functions to something like:

__mips_set_bit();


[...]
> diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
> new file mode 100644
> index 0000000..6562ab2
> --- /dev/null
> +++ b/arch/mips/lib/bitops.c
> @@ -0,0 +1,180 @@
> +
> +/*
> + * This file is subject to the terms and conditions of the GNU General Public
> + * License.  See the file "COPYING" in the main directory of this archive
> + * for more details.
> + *
> + * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
> + * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
> + */
> +
> +#include <linux/irqflags.h>
> +
> +#if _MIPS_SZLONG == 32
> +#define SZLONG_LOG 5
> +#define SZLONG_MASK 31UL
> +#elif _MIPS_SZLONG == 64
> +#define SZLONG_MASK 63UL
> +#define SZLONG_LOG 6
> +#endif
> +

There has to be a cleaner way to do this...  Perhaps:



#define SZLONG_LOG (ilog2(sizeof(unsigned long)) + 3)
#define SZLONG_MASK ((1 << SZLONG_LOG) - 1)

> +
> +/*
> + * atomic_set_bit - Atomically set a bit in memory.  This is called by
> + * if set_bit() if it cannot find a faster solution.
> + * @nr: the bit to set
> + * @addr: the address to start counting from
> + */
> +void atomic_set_bit(unsigned long nr, volatile unsigned long *addr)
> +{
> +	volatile unsigned long *a = addr;
> +	unsigned short bit = nr & SZLONG_MASK;

just make bit type 'int'.  In some cases forcing a narrower type than 
necessary requires the compiler to emit extra code.  I am not sure if it 
would here, but why use a type other than int unless absolutely necessary?

> +	unsigned long mask;
> +	unsigned long flags;
> +
> +	a += nr >> SZLONG_LOG;
> +	mask = 1UL << bit;
> +	raw_local_irq_save(flags);
> +	*a |= mask;
> +	raw_local_irq_restore(flags);
> +}

All these must be EXPORT_SYMBOL(), so the bitop intrinsics can be used 
from modules.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH V3 2/2] MIPS: make funcs preempt-safe for non-mipsr2 cpus
  2012-09-03 21:52 ` [PATCH V3 2/2] MIPS: make funcs preempt-safe for non-mipsr2 cpus Jim Quinlan
@ 2012-09-04 17:36   ` David Daney
  0 siblings, 0 replies; 4+ messages in thread
From: David Daney @ 2012-09-04 17:36 UTC (permalink / raw)
  To: Jim Quinlan; +Cc: ralf, linux-mips, cernekee

On 09/03/2012 02:52 PM, Jim Quinlan wrote:
> For non MIPSr2 processors, such as the BMIPS 5000, calls to
> arch_local_irq_disable() and others may be preempted, and in doing
> so a stale value may be restored to c0_status.  This fix disables
> preemption for such processors prior to the call and enables it
> after the call.
>
> Those functions that needed this fix have been "outlined" to
> mips-atomic.c, as they are no longer good candidates for inlining.
>
> This bug was observed in a BMIPS 5000, occuring once every few hours
> in a continuous reboot test.  It was traced to the write_lock_irq()
> function which was being invoked in release_task() in exit.c.
> By placing a number of "nops" inbetween the mfc0/mtc0 pair in
> arch_local_irq_disable(), which is called by write_lock_irq(), we
> were able to greatly increase the occurance of this bug.  Similarly,
> the application of this commit silenced the bug.
>
> Signed-off-by: Jim Quinlan <jim2101024@gmail.com>

This one seems better too...

[...]
> diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
> new file mode 100644
> index 0000000..546eb25
> --- /dev/null
> +++ b/arch/mips/lib/mips-atomic.c
> @@ -0,0 +1,179 @@
> +/*
> + * This file is subject to the terms and conditions of the GNU General Public
> + * License.  See the file "COPYING" in the main directory of this archive
> + * for more details.
> + *
> + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
> + * Copyright (C) 1996 by Paul M. Antoine
> + * Copyright (C) 1999 Silicon Graphics
> + * Copyright (C) 2000 MIPS Technologies, Inc.
> + */
> +#include <asm/irqflags.h>
> +#include <asm/hazards.h>
> +#include <linux/compiler.h>
> +#include <linux/preempt.h>
> +
> +#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
> +
> +#if defined(CONFIG_PREEMPT)
> +#define arch_local_preempt_enable() preempt_enable()
> +#define arch_local_preempt_disable() preempt_disable()
> +#else
> +#define arch_local_preempt_enable()
> +#define arch_local_preempt_disable()
> +#endif
> +
> +
> +/*
> + * For cli() we have to insert nops to make sure that the new value
> + * has actually arrived in the status register before the end of this
> + * macro.
> + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
> + * no nops at all.
> + */
> +/*
> + * For TX49, operating only IE bit is not enough.
> + *
> + * If mfc0 $12 follows store and the mfc0 is last instruction of a
> + * page and fetching the next instruction causes TLB miss, the result
> + * of the mfc0 might wrongly contain EXL bit.
> + *
> + * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
> + *
> + * Workaround: mask EXL bit of the result or place a nop before mfc0.
> + */
> +__asm__(
> +	"	.macro	arch_local_irq_disable\n"
> +	"	.set	push						\n"
> +	"	.set	noat						\n"
> +#ifdef CONFIG_MIPS_MT_SMTC
> +	"	mfc0	$1, $2, 1					\n"
> +	"	ori	$1, 0x400					\n"
> +	"	.set	noreorder					\n"
> +	"	mtc0	$1, $2, 1					\n"
> +#elif defined(CONFIG_CPU_MIPSR2)
> +	/* see irqflags.h for inline function */
> +#else
> +	"	mfc0	$1,$12						\n"
> +	"	ori	$1,0x1f						\n"
> +	"	xori	$1,0x1f						\n"
> +	"	.set	noreorder					\n"
> +	"	mtc0	$1,$12						\n"
> +#endif
> +	"	irq_disable_hazard					\n"
> +	"	.set	pop						\n"
> +	"	.endm							\n");
> +
> +void arch_local_irq_disable(void)
> +{
> +	arch_local_preempt_disable();
> +	__asm__ __volatile__(
> +		"arch_local_irq_disable"
> +		: /* no outputs */
> +		: /* no inputs */
> +		: "memory");
> +	arch_local_preempt_enable();
> +}

I think this function must be EXPORT_SYMBOL() too.

David Daney

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-09-04 17:36 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-09-03 21:52 [PATCH V3 1/2] MIPS: Remove irqflags.h dependency from bitops.h Jim Quinlan
2012-09-03 21:52 ` [PATCH V3 2/2] MIPS: make funcs preempt-safe for non-mipsr2 cpus Jim Quinlan
2012-09-04 17:36   ` David Daney
2012-09-04 17:30 ` [PATCH V3 1/2] MIPS: Remove irqflags.h dependency from bitops.h David Daney

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.