linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
@ 2016-08-28  5:39 chengang
  2016-08-28  7:02 ` kbuild test robot
                   ` (4 more replies)
  0 siblings, 5 replies; 19+ messages in thread
From: chengang @ 2016-08-28  5:39 UTC (permalink / raw)
  To: akpm
  Cc: minchan, vbabka, gi-oh.kim, iamjoonsoo.kim, hillf.zj, mgorman,
	mhocko, rientjes, linux-kernel, rth, ink, mattst88, vgupta,
	linux, catalin.marinas, will.deacon, hskinnemoen, egtvedt,
	realmz6, ysato, rkuo, tony.luck, fenghua.yu, geert, james.hogan,
	ralf, dhowells, deller, benh, paulus, mpe, schwidefsky,
	heiko.carstens, dalias, davem, cmetcalf, chris, jcmvbkbc, arnd,
	noamc, brueckner, mingo, peterz, linux-arch, Chen Gang,
	Chen Gang

From: Chen Gang <chengang@emindsoft.com.cn>

Also use the same changing to asm-generic, and also use bool variable
instead of int variable for mips, mn10300, parisc and tile related
functions, and also avoid checkpatch.pl to report ERROR.

Originally, except powerpc and xtensa, all another architectures intend
to return 0 or 1. After this patch, also let powerpc and xtensa return 0
or 1.

The patch passes cross building for mips and parisc with default config.
All related contents are found by "grep test_bit, grep test_and" under
arch sub-directory.

Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
---
 arch/alpha/include/asm/bitops.h         | 16 ++++++++--------
 arch/arc/include/asm/bitops.h           | 10 +++++-----
 arch/arm/include/asm/bitops.h           | 12 ++++++------
 arch/arm64/include/asm/bitops.h         |  6 +++---
 arch/avr32/include/asm/bitops.h         |  6 +++---
 arch/blackfin/include/asm/bitops.h      | 16 ++++++++--------
 arch/frv/include/asm/bitops.h           | 16 ++++++++--------
 arch/h8300/include/asm/bitops.h         |  4 ++--
 arch/hexagon/include/asm/bitops.h       | 14 +++++++-------
 arch/ia64/include/asm/bitops.h          | 14 +++++++-------
 arch/m32r/include/asm/bitops.h          |  6 +++---
 arch/m68k/include/asm/bitops.h          | 20 ++++++++++----------
 arch/metag/include/asm/bitops.h         |  6 +++---
 arch/mips/include/asm/bitops.h          | 16 ++++++++--------
 arch/mips/lib/bitops.c                  | 16 ++++++++--------
 arch/mn10300/include/asm/bitops.h       |  7 ++++---
 arch/parisc/include/asm/bitops.h        | 16 ++++++++--------
 arch/powerpc/include/asm/bitops.h       | 10 +++++-----
 arch/s390/include/asm/bitops.h          | 18 +++++++++---------
 arch/sh/include/asm/bitops-cas.h        |  6 +++---
 arch/sh/include/asm/bitops-grb.h        |  6 +++---
 arch/sh/include/asm/bitops-llsc.h       |  6 +++---
 arch/sh/include/asm/bitops-op32.h       |  8 ++++----
 arch/sparc/include/asm/bitops_32.h      |  6 +++---
 arch/sparc/include/asm/bitops_64.h      |  6 +++---
 arch/tile/include/asm/bitops_32.h       |  6 +++---
 arch/tile/include/asm/bitops_64.h       | 10 +++++-----
 arch/xtensa/include/asm/bitops.h        |  6 +++---
 include/asm-generic/bitops/atomic.h     |  6 +++---
 include/asm-generic/bitops/le.h         | 10 +++++-----
 include/asm-generic/bitops/non-atomic.h |  8 ++++----
 31 files changed, 157 insertions(+), 156 deletions(-)

diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
index 4bdfbd4..92d468f 100644
--- a/arch/alpha/include/asm/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
@@ -125,7 +125,7 @@ __change_bit(unsigned long nr, volatile void * addr)
 	*m ^= 1 << (nr & 31);
 }
 
-static inline int
+static inline bool
 test_and_set_bit(unsigned long nr, volatile void *addr)
 {
 	unsigned long oldbit;
@@ -155,7 +155,7 @@ test_and_set_bit(unsigned long nr, volatile void *addr)
 	return oldbit != 0;
 }
 
-static inline int
+static inline bool
 test_and_set_bit_lock(unsigned long nr, volatile void *addr)
 {
 	unsigned long oldbit;
@@ -185,7 +185,7 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
 /*
  * WARNING: non atomic version.
  */
-static inline int
+static inline bool
 __test_and_set_bit(unsigned long nr, volatile void * addr)
 {
 	unsigned long mask = 1 << (nr & 0x1f);
@@ -196,7 +196,7 @@ __test_and_set_bit(unsigned long nr, volatile void * addr)
 	return (old & mask) != 0;
 }
 
-static inline int
+static inline bool
 test_and_clear_bit(unsigned long nr, volatile void * addr)
 {
 	unsigned long oldbit;
@@ -229,7 +229,7 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
 /*
  * WARNING: non atomic version.
  */
-static inline int
+static inline bool
 __test_and_clear_bit(unsigned long nr, volatile void * addr)
 {
 	unsigned long mask = 1 << (nr & 0x1f);
@@ -240,7 +240,7 @@ __test_and_clear_bit(unsigned long nr, volatile void * addr)
 	return (old & mask) != 0;
 }
 
-static inline int
+static inline bool
 test_and_change_bit(unsigned long nr, volatile void * addr)
 {
 	unsigned long oldbit;
@@ -271,7 +271,7 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
 /*
  * WARNING: non atomic version.
  */
-static __inline__ int
+static __inline__ bool
 __test_and_change_bit(unsigned long nr, volatile void * addr)
 {
 	unsigned long mask = 1 << (nr & 0x1f);
@@ -282,7 +282,7 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
 	return (old & mask) != 0;
 }
 
-static inline int
+static inline bool
 test_bit(int nr, const volatile void * addr)
 {
 	return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 8da87fee..e1976ab 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -60,7 +60,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
  * and the old value of bit is returned
  */
 #define TEST_N_BIT_OP(op, c_op, asm_op)					\
-static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+static inline bool test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
 {									\
 	unsigned long old, temp;					\
 									\
@@ -124,7 +124,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
 }
 
 #define TEST_N_BIT_OP(op, c_op, asm_op)					\
-static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+static inline bool test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
 {									\
 	unsigned long old, flags;					\
 	m += nr >> 5;							\
@@ -160,7 +160,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
 }
 
 #define TEST_N_BIT_OP(op, c_op, asm_op)					\
-static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+static inline bool test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
 {									\
 	unsigned long old;						\
 									\
@@ -204,7 +204,7 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
 }
 
 #define __TEST_N_BIT_OP(op, c_op, asm_op)				\
-static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+static inline bool __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
 {									\
 	unsigned long old;						\
 	m += nr >> 5;							\
@@ -242,7 +242,7 @@ BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
 /*
  * This routine doesn't need to be atomic.
  */
-static inline int
+static inline bool
 test_bit(unsigned int nr, const volatile unsigned long *addr)
 {
 	unsigned long mask;
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index e943e6c..719a598 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -68,7 +68,7 @@ static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned lon
 	raw_local_irq_restore(flags);
 }
 
-static inline int
+static inline bool
 ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
@@ -85,7 +85,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 	return (res & mask) != 0;
 }
 
-static inline int
+static inline bool
 ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
@@ -102,7 +102,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
 	return (res & mask) != 0;
 }
 
-static inline int
+static inline bool
 ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
@@ -152,9 +152,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
 extern void _set_bit(int nr, volatile unsigned long * p);
 extern void _clear_bit(int nr, volatile unsigned long * p);
 extern void _change_bit(int nr, volatile unsigned long * p);
-extern int _test_and_set_bit(int nr, volatile unsigned long * p);
-extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
-extern int _test_and_change_bit(int nr, volatile unsigned long * p);
+extern bool _test_and_set_bit(int nr, volatile unsigned long *p);
+extern bool _test_and_clear_bit(int nr, volatile unsigned long *p);
+extern bool _test_and_change_bit(int nr, volatile unsigned long *p);
 
 /*
  * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
index 9c19594..61f9f3c 100644
--- a/arch/arm64/include/asm/bitops.h
+++ b/arch/arm64/include/asm/bitops.h
@@ -29,9 +29,9 @@
 extern void set_bit(int nr, volatile unsigned long *p);
 extern void clear_bit(int nr, volatile unsigned long *p);
 extern void change_bit(int nr, volatile unsigned long *p);
-extern int test_and_set_bit(int nr, volatile unsigned long *p);
-extern int test_and_clear_bit(int nr, volatile unsigned long *p);
-extern int test_and_change_bit(int nr, volatile unsigned long *p);
+extern bool test_and_set_bit(int nr, volatile unsigned long *p);
+extern bool test_and_clear_bit(int nr, volatile unsigned long *p);
+extern bool test_and_change_bit(int nr, volatile unsigned long *p);
 
 #include <asm-generic/bitops/builtin-__ffs.h>
 #include <asm-generic/bitops/builtin-ffs.h>
diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h
index 910d537..0e3e08b 100644
--- a/arch/avr32/include/asm/bitops.h
+++ b/arch/avr32/include/asm/bitops.h
@@ -128,7 +128,7 @@ static inline void change_bit(int nr, volatile void * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_set_bit(int nr, volatile void * addr)
+static inline bool test_and_set_bit(int nr, volatile void *addr)
 {
 	unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
 	unsigned long mask = 1UL << (nr % BITS_PER_LONG);
@@ -168,7 +168,7 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_clear_bit(int nr, volatile void * addr)
+static inline bool test_and_clear_bit(int nr, volatile void *addr)
 {
 	unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
 	unsigned long mask = 1UL << (nr % BITS_PER_LONG);
@@ -209,7 +209,7 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_change_bit(int nr, volatile void * addr)
+static inline bool test_and_change_bit(int nr, volatile void *addr)
 {
 	unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
 	unsigned long mask = 1UL << (nr % BITS_PER_LONG);
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index b298b65..ff43a11 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -47,13 +47,13 @@ asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr);
 
 asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr);
 
-asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
+asmlinkage bool __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
 
-asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
+asmlinkage bool __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
 
-asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
+asmlinkage bool __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
 
-asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
+asmlinkage bool __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
 
 static inline void set_bit(int nr, volatile unsigned long *addr)
 {
@@ -73,25 +73,25 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
 	__raw_bit_toggle_asm(a, nr & 0x1f);
 }
 
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static inline bool test_bit(int nr, const volatile unsigned long *addr)
 {
 	volatile const unsigned long *a = addr + (nr >> 5);
 	return __raw_bit_test_asm(a, nr & 0x1f) != 0;
 }
 
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline bool test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	volatile unsigned long *a = addr + (nr >> 5);
 	return __raw_bit_test_set_asm(a, nr & 0x1f);
 }
 
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline bool test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	volatile unsigned long *a = addr + (nr >> 5);
 	return __raw_bit_test_clear_asm(a, nr & 0x1f);
 }
 
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline bool test_and_change_bit(int nr, volatile unsigned long *addr)
 {
 	volatile unsigned long *a = addr + (nr >> 5);
 	return __raw_bit_test_toggle_asm(a, nr & 0x1f);
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 0df8e95..c9bf93d 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -27,7 +27,7 @@
 
 #include <asm/atomic.h>
 
-static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
+static inline bool test_and_clear_bit(unsigned long nr, volatile void *addr)
 {
 	unsigned int *ptr = (void *)addr;
 	unsigned int mask = 1UL << (nr & 31);
@@ -35,7 +35,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
 	return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
 }
 
-static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
+static inline bool test_and_set_bit(unsigned long nr, volatile void *addr)
 {
 	unsigned int *ptr = (void *)addr;
 	unsigned int mask = 1UL << (nr & 31);
@@ -43,7 +43,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
 	return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
 }
 
-static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
+static inline bool test_and_change_bit(unsigned long nr, volatile void *addr)
 {
 	unsigned int *ptr = (void *)addr;
 	unsigned int mask = 1UL << (nr & 31);
@@ -96,7 +96,7 @@ static inline void __change_bit(unsigned long nr, volatile void *addr)
 	*a ^= mask;
 }
 
-static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
+static inline bool __test_and_clear_bit(unsigned long nr, volatile void *addr)
 {
 	volatile unsigned long *a = addr;
 	int mask, retval;
@@ -108,7 +108,7 @@ static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
 	return retval;
 }
 
-static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
+static inline bool __test_and_set_bit(unsigned long nr, volatile void *addr)
 {
 	volatile unsigned long *a = addr;
 	int mask, retval;
@@ -120,7 +120,7 @@ static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
 	return retval;
 }
 
-static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
+static inline bool __test_and_change_bit(unsigned long nr, volatile void *addr)
 {
 	volatile unsigned long *a = addr;
 	int mask, retval;
@@ -135,13 +135,13 @@ static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
 /*
  * This routine doesn't need to be atomic.
  */
-static inline int
+static inline bool
 __constant_test_bit(unsigned long nr, const volatile void *addr)
 {
 	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
 }
 
-static inline int __test_bit(unsigned long nr, const volatile void *addr)
+static inline bool __test_bit(unsigned long nr, const volatile void *addr)
 {
 	int 	* a = (int *) addr;
 	int	mask;
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
index 05999ab..8f6dfc6 100644
--- a/arch/h8300/include/asm/bitops.h
+++ b/arch/h8300/include/asm/bitops.h
@@ -65,7 +65,7 @@ H8300_GEN_BITOP(change_bit, "bnot")
 
 #undef H8300_GEN_BITOP
 
-static inline int test_bit(int nr, const unsigned long *addr)
+static inline bool test_bit(int nr, const unsigned long *addr)
 {
 	int ret = 0;
 	unsigned char *b_addr;
@@ -91,7 +91,7 @@ static inline int test_bit(int nr, const unsigned long *addr)
 #define __test_bit(nr, addr) test_bit(nr, addr)
 
 #define H8300_GEN_TEST_BITOP(FNNAME, OP)				\
-static inline int FNNAME(int nr, void *addr)				\
+static inline bool FNNAME(int nr, void *addr)				\
 {									\
 	int retval = 0;							\
 	char ccrsave;							\
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 5e4a59b..fa6b32c 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -42,7 +42,7 @@
  * @nr:  bit number to clear
  * @addr:  pointer to memory
  */
-static inline int test_and_clear_bit(int nr, volatile void *addr)
+static inline bool test_and_clear_bit(int nr, volatile void *addr)
 {
 	int oldval;
 
@@ -66,7 +66,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
  * @nr:  bit number to set
  * @addr:  pointer to memory
  */
-static inline int test_and_set_bit(int nr, volatile void *addr)
+static inline bool test_and_set_bit(int nr, volatile void *addr)
 {
 	int oldval;
 
@@ -92,7 +92,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
  * @nr:  bit number to set
  * @addr:  pointer to memory
  */
-static inline int test_and_change_bit(int nr, volatile void *addr)
+static inline bool test_and_change_bit(int nr, volatile void *addr)
 {
 	int oldval;
 
@@ -157,22 +157,22 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
 }
 
 /*  Apparently, at least some of these are allowed to be non-atomic  */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline bool __test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	return test_and_clear_bit(nr, addr);
 }
 
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline bool __test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	return test_and_set_bit(nr, addr);
 }
 
-static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline bool __test_and_change_bit(int nr, volatile unsigned long *addr)
 {
 	return test_and_change_bit(nr, addr);
 }
 
-static inline int __test_bit(int nr, const volatile unsigned long *addr)
+static inline bool __test_bit(int nr, const volatile unsigned long *addr)
 {
 	int retval;
 
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 71e8145..38edf72 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -196,7 +196,7 @@ __change_bit (int nr, volatile void *addr)
  * This operation is atomic and cannot be reordered.  
  * It also implies the acquisition side of the memory barrier.
  */
-static __inline__ int
+static __inline__ bool
 test_and_set_bit (int nr, volatile void *addr)
 {
 	__u32 bit, old, new;
@@ -231,7 +231,7 @@ test_and_set_bit (int nr, volatile void *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static __inline__ int
+static __inline__ bool
 __test_and_set_bit (int nr, volatile void *addr)
 {
 	__u32 *p = (__u32 *) addr + (nr >> 5);
@@ -250,7 +250,7 @@ __test_and_set_bit (int nr, volatile void *addr)
  * This operation is atomic and cannot be reordered.  
  * It also implies the acquisition side of the memory barrier.
  */
-static __inline__ int
+static __inline__ bool
 test_and_clear_bit (int nr, volatile void *addr)
 {
 	__u32 mask, old, new;
@@ -276,7 +276,7 @@ test_and_clear_bit (int nr, volatile void *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static __inline__ int
+static __inline__ bool
 __test_and_clear_bit(int nr, volatile void * addr)
 {
 	__u32 *p = (__u32 *) addr + (nr >> 5);
@@ -295,7 +295,7 @@ __test_and_clear_bit(int nr, volatile void * addr)
  * This operation is atomic and cannot be reordered.  
  * It also implies the acquisition side of the memory barrier.
  */
-static __inline__ int
+static __inline__ bool
 test_and_change_bit (int nr, volatile void *addr)
 {
 	__u32 bit, old, new;
@@ -319,7 +319,7 @@ test_and_change_bit (int nr, volatile void *addr)
  *
  * This operation is non-atomic and can be reordered.
  */
-static __inline__ int
+static __inline__ bool
 __test_and_change_bit (int nr, void *addr)
 {
 	__u32 old, bit = (1 << (nr & 31));
@@ -330,7 +330,7 @@ __test_and_change_bit (int nr, void *addr)
 	return (old & bit) != 0;
 }
 
-static __inline__ int
+static __inline__ bool
 test_bit (int nr, const volatile void *addr)
 {
 	return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h
index 86ba2b4..5f12ceb 100644
--- a/arch/m32r/include/asm/bitops.h
+++ b/arch/m32r/include/asm/bitops.h
@@ -147,7 +147,7 @@ static __inline__ void change_bit(int nr, volatile void * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+static __inline__ bool test_and_set_bit(int nr, volatile void *addr)
 {
 	__u32 mask, oldbit;
 	volatile __u32 *a = addr;
@@ -182,7 +182,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+static __inline__ bool test_and_clear_bit(int nr, volatile void *addr)
 {
 	__u32 mask, oldbit;
 	volatile __u32 *a = addr;
@@ -219,7 +219,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+static __inline__ bool test_and_change_bit(int nr, volatile void *addr)
 {
 	__u32 mask, oldbit;
 	volatile __u32 *a = addr;
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index b4a9b0d..9f5835d 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -148,13 +148,13 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
 #define __change_bit(nr, vaddr)	change_bit(nr, vaddr)
 
 
-static inline int test_bit(int nr, const unsigned long *vaddr)
+static inline bool test_bit(int nr, const unsigned long *vaddr)
 {
 	return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
 }
 
 
-static inline int bset_reg_test_and_set_bit(int nr,
+static inline bool bset_reg_test_and_set_bit(int nr,
 					    volatile unsigned long *vaddr)
 {
 	char *p = (char *)vaddr + (nr ^ 31) / 8;
@@ -167,7 +167,7 @@ static inline int bset_reg_test_and_set_bit(int nr,
 	return retval;
 }
 
-static inline int bset_mem_test_and_set_bit(int nr,
+static inline bool bset_mem_test_and_set_bit(int nr,
 					    volatile unsigned long *vaddr)
 {
 	char *p = (char *)vaddr + (nr ^ 31) / 8;
@@ -179,7 +179,7 @@ static inline int bset_mem_test_and_set_bit(int nr,
 	return retval;
 }
 
-static inline int bfset_mem_test_and_set_bit(int nr,
+static inline bool bfset_mem_test_and_set_bit(int nr,
 					     volatile unsigned long *vaddr)
 {
 	char retval;
@@ -204,7 +204,7 @@ static inline int bfset_mem_test_and_set_bit(int nr,
 #define __test_and_set_bit(nr, vaddr)	test_and_set_bit(nr, vaddr)
 
 
-static inline int bclr_reg_test_and_clear_bit(int nr,
+static inline bool bclr_reg_test_and_clear_bit(int nr,
 					      volatile unsigned long *vaddr)
 {
 	char *p = (char *)vaddr + (nr ^ 31) / 8;
@@ -217,7 +217,7 @@ static inline int bclr_reg_test_and_clear_bit(int nr,
 	return retval;
 }
 
-static inline int bclr_mem_test_and_clear_bit(int nr,
+static inline bool bclr_mem_test_and_clear_bit(int nr,
 					      volatile unsigned long *vaddr)
 {
 	char *p = (char *)vaddr + (nr ^ 31) / 8;
@@ -229,7 +229,7 @@ static inline int bclr_mem_test_and_clear_bit(int nr,
 	return retval;
 }
 
-static inline int bfclr_mem_test_and_clear_bit(int nr,
+static inline bool bfclr_mem_test_and_clear_bit(int nr,
 					       volatile unsigned long *vaddr)
 {
 	char retval;
@@ -254,7 +254,7 @@ static inline int bfclr_mem_test_and_clear_bit(int nr,
 #define __test_and_clear_bit(nr, vaddr)	test_and_clear_bit(nr, vaddr)
 
 
-static inline int bchg_reg_test_and_change_bit(int nr,
+static inline bool bchg_reg_test_and_change_bit(int nr,
 					       volatile unsigned long *vaddr)
 {
 	char *p = (char *)vaddr + (nr ^ 31) / 8;
@@ -267,7 +267,7 @@ static inline int bchg_reg_test_and_change_bit(int nr,
 	return retval;
 }
 
-static inline int bchg_mem_test_and_change_bit(int nr,
+static inline bool bchg_mem_test_and_change_bit(int nr,
 					       volatile unsigned long *vaddr)
 {
 	char *p = (char *)vaddr + (nr ^ 31) / 8;
@@ -279,7 +279,7 @@ static inline int bchg_mem_test_and_change_bit(int nr,
 	return retval;
 }
 
-static inline int bfchg_mem_test_and_change_bit(int nr,
+static inline bool bfchg_mem_test_and_change_bit(int nr,
 						volatile unsigned long *vaddr)
 {
 	char retval;
diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h
index 2671134..11df061 100644
--- a/arch/metag/include/asm/bitops.h
+++ b/arch/metag/include/asm/bitops.h
@@ -48,7 +48,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
 	__global_unlock1(flags);
 }
 
-static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+static inline bool test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
 	unsigned long old;
@@ -67,7 +67,7 @@ static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 	return (old & mask) != 0;
 }
 
-static inline int test_and_clear_bit(unsigned int bit,
+static inline bool test_and_clear_bit(unsigned int bit,
 				     volatile unsigned long *p)
 {
 	unsigned long flags;
@@ -87,7 +87,7 @@ static inline int test_and_clear_bit(unsigned int bit,
 	return (old & mask) != 0;
 }
 
-static inline int test_and_change_bit(unsigned int bit,
+static inline bool test_and_change_bit(unsigned int bit,
 				      volatile unsigned long *p)
 {
 	unsigned long flags;
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index fa57cef..7e53c66 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -30,13 +30,13 @@
 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
-int __mips_test_and_set_bit(unsigned long nr,
+bool __mips_test_and_set_bit(unsigned long nr,
 			    volatile unsigned long *addr);
-int __mips_test_and_set_bit_lock(unsigned long nr,
+bool __mips_test_and_set_bit_lock(unsigned long nr,
 				 volatile unsigned long *addr);
-int __mips_test_and_clear_bit(unsigned long nr,
+bool __mips_test_and_clear_bit(unsigned long nr,
 			      volatile unsigned long *addr);
-int __mips_test_and_change_bit(unsigned long nr,
+bool __mips_test_and_change_bit(unsigned long nr,
 			       volatile unsigned long *addr);
 
 
@@ -210,7 +210,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_set_bit(unsigned long nr,
+static inline bool test_and_set_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
 	int bit = nr & SZLONG_MASK;
@@ -266,7 +266,7 @@ static inline int test_and_set_bit(unsigned long nr,
  * This operation is atomic and implies acquire ordering semantics
  * after the memory operation.
  */
-static inline int test_and_set_bit_lock(unsigned long nr,
+static inline bool test_and_set_bit_lock(unsigned long nr,
 	volatile unsigned long *addr)
 {
 	int bit = nr & SZLONG_MASK;
@@ -319,7 +319,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_clear_bit(unsigned long nr,
+static inline bool test_and_clear_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
 	int bit = nr & SZLONG_MASK;
@@ -393,7 +393,7 @@ static inline int test_and_clear_bit(unsigned long nr,
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_change_bit(unsigned long nr,
+static inline bool test_and_change_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
 	int bit = nr & SZLONG_MASK;
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 3b2a1e7..8f0ba2a 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -83,14 +83,14 @@ EXPORT_SYMBOL(__mips_change_bit);
  * @nr: Bit to set
  * @addr: Address to count from
  */
-int __mips_test_and_set_bit(unsigned long nr,
+bool __mips_test_and_set_bit(unsigned long nr,
 			    volatile unsigned long *addr)
 {
 	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
-	int res;
+	bool res;
 
 	a += nr >> SZLONG_LOG;
 	mask = 1UL << bit;
@@ -109,14 +109,14 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
  * @nr: Bit to set
  * @addr: Address to count from
  */
-int __mips_test_and_set_bit_lock(unsigned long nr,
+bool __mips_test_and_set_bit_lock(unsigned long nr,
 				 volatile unsigned long *addr)
 {
 	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
-	int res;
+	bool res;
 
 	a += nr >> SZLONG_LOG;
 	mask = 1UL << bit;
@@ -135,13 +135,13 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
  * @nr: Bit to clear
  * @addr: Address to count from
  */
-int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+bool __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
-	int res;
+	bool res;
 
 	a += nr >> SZLONG_LOG;
 	mask = 1UL << bit;
@@ -160,13 +160,13 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
  * @nr: Bit to change
  * @addr: Address to count from
  */
-int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+bool __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
-	int res;
+	bool res;
 
 	a += nr >> SZLONG_LOG;
 	mask = 1UL << bit;
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index fe6f8e2..5b00e95 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -68,7 +68,7 @@ static inline void __clear_bit(unsigned long nr, volatile void *addr)
 /*
  * test bit
  */
-static inline int test_bit(unsigned long nr, const volatile void *addr)
+static inline bool test_bit(unsigned long nr, const volatile void *addr)
 {
 	return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
 }
@@ -133,9 +133,10 @@ extern void change_bit(unsigned long nr, volatile void *addr);
 /*
  * test and change bit
  */
-static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
+static inline bool __test_and_change_bit(unsigned long nr, volatile void *addr)
 {
-	int	mask, retval;
+	int mask;
+	bool retval;
 	unsigned int *a = (unsigned int *)addr;
 
 	a += nr >> 5;
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 3f9406d..bac163d 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -59,17 +59,17 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
 	_atomic_spin_unlock_irqrestore(addr, flags);
 }
 
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
+static __inline__ bool test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
 	unsigned long old;
 	unsigned long flags;
-	int set;
+	bool set;
 
 	addr += (nr >> SHIFT_PER_LONG);
 	_atomic_spin_lock_irqsave(addr, flags);
 	old = *addr;
-	set = (old & mask) ? 1 : 0;
+	set = (old & mask) ? true : false;
 	if (!set)
 		*addr = old | mask;
 	_atomic_spin_unlock_irqrestore(addr, flags);
@@ -77,17 +77,17 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
 	return set;
 }
 
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
+static __inline__ bool test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
 	unsigned long old;
 	unsigned long flags;
-	int set;
+	bool set;
 
 	addr += (nr >> SHIFT_PER_LONG);
 	_atomic_spin_lock_irqsave(addr, flags);
 	old = *addr;
-	set = (old & mask) ? 1 : 0;
+	set = (old & mask) ? true : false;
 	if (set)
 		*addr = old & ~mask;
 	_atomic_spin_unlock_irqrestore(addr, flags);
@@ -95,7 +95,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
 	return set;
 }
 
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
+static __inline__ bool test_and_change_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
 	unsigned long oldbit;
@@ -107,7 +107,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
 	*addr = oldbit ^ mask;
 	_atomic_spin_unlock_irqrestore(addr, flags);
 
-	return (oldbit & mask) ? 1 : 0;
+	return (oldbit & mask) ? true : false;
 }
 
 #include <asm-generic/bitops/non-atomic.h>
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 59abc62..7838138 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -100,7 +100,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
 /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
  * operands. */
 #define DEFINE_TESTOP(fn, op, prefix, postfix, eh)	\
-static __inline__ unsigned long fn(			\
+static __inline__ bool fn(				\
 		unsigned long mask,			\
 		volatile unsigned long *_p)		\
 {							\
@@ -129,26 +129,26 @@ DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
 DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
 	      PPC_ATOMIC_EXIT_BARRIER, 0)
 
-static __inline__ int test_and_set_bit(unsigned long nr,
+static __inline__ bool test_and_set_bit(unsigned long nr,
 				       volatile unsigned long *addr)
 {
 	return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
 }
 
-static __inline__ int test_and_set_bit_lock(unsigned long nr,
+static __inline__ bool test_and_set_bit_lock(unsigned long nr,
 				       volatile unsigned long *addr)
 {
 	return test_and_set_bits_lock(BIT_MASK(nr),
 				addr + BIT_WORD(nr)) != 0;
 }
 
-static __inline__ int test_and_clear_bit(unsigned long nr,
+static __inline__ bool test_and_clear_bit(unsigned long nr,
 					 volatile unsigned long *addr)
 {
 	return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
 }
 
-static __inline__ int test_and_change_bit(unsigned long nr,
+static __inline__ bool test_and_change_bit(unsigned long nr,
 					  volatile unsigned long *addr)
 {
 	return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 8043f10..71e6202 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -173,7 +173,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
 	__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
 }
 
-static inline int
+static inline bool
 test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
 	unsigned long *addr = __bitops_word(nr, ptr);
@@ -184,7 +184,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 	return (old & mask) != 0;
 }
 
-static inline int
+static inline bool
 test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
 	unsigned long *addr = __bitops_word(nr, ptr);
@@ -195,7 +195,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 	return (old & ~mask) != 0;
 }
 
-static inline int
+static inline bool
 test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
 	unsigned long *addr = __bitops_word(nr, ptr);
@@ -228,7 +228,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
 	*addr ^= 1 << (nr & 7);
 }
 
-static inline int
+static inline bool
 __test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
 	unsigned char *addr = __bitops_byte(nr, ptr);
@@ -239,7 +239,7 @@ __test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 	return (ch >> (nr & 7)) & 1;
 }
 
-static inline int
+static inline bool
 __test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
 	unsigned char *addr = __bitops_byte(nr, ptr);
@@ -250,7 +250,7 @@ __test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 	return (ch >> (nr & 7)) & 1;
 }
 
-static inline int
+static inline bool
 __test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
 	unsigned char *addr = __bitops_byte(nr, ptr);
@@ -261,7 +261,7 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 	return (ch >> (nr & 7)) & 1;
 }
 
-static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
+static inline bool test_bit(unsigned long nr, const volatile unsigned long *ptr)
 {
 	const volatile unsigned char *addr;
 
@@ -270,7 +270,7 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
 	return (*addr >> (nr & 7)) & 1;
 }
 
-static inline int test_and_set_bit_lock(unsigned long nr,
+static inline bool test_and_set_bit_lock(unsigned long nr,
 					volatile unsigned long *ptr)
 {
 	if (test_bit(nr, ptr))
@@ -321,7 +321,7 @@ static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr
 	return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-static inline int test_bit_inv(unsigned long nr,
+static inline bool test_bit_inv(unsigned long nr,
 			       const volatile unsigned long *ptr)
 {
 	return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
diff --git a/arch/sh/include/asm/bitops-cas.h b/arch/sh/include/asm/bitops-cas.h
index 88f793c..c4fde9c 100644
--- a/arch/sh/include/asm/bitops-cas.h
+++ b/arch/sh/include/asm/bitops-cas.h
@@ -46,7 +46,7 @@ static inline void change_bit(int nr, volatile void *addr)
 	while (__bo_cas(a, old, old^mask) != old);
 }
 
-static inline int test_and_set_bit(int nr, volatile void *addr)
+static inline bool test_and_set_bit(int nr, volatile void *addr)
 {
 	unsigned mask, old;
 	volatile unsigned *a = addr;
@@ -60,7 +60,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
 	return !!(old & mask);
 }
 
-static inline int test_and_clear_bit(int nr, volatile void *addr)
+static inline bool test_and_clear_bit(int nr, volatile void *addr)
 {
 	unsigned mask, old;
 	volatile unsigned *a = addr;
@@ -74,7 +74,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
 	return !!(old & mask);
 }
 
-static inline int test_and_change_bit(int nr, volatile void *addr)
+static inline bool test_and_change_bit(int nr, volatile void *addr)
 {
 	unsigned mask, old;
 	volatile unsigned *a = addr;
diff --git a/arch/sh/include/asm/bitops-grb.h b/arch/sh/include/asm/bitops-grb.h
index e73af33..866f26a 100644
--- a/arch/sh/include/asm/bitops-grb.h
+++ b/arch/sh/include/asm/bitops-grb.h
@@ -71,7 +71,7 @@ static inline void change_bit(int nr, volatile void * addr)
                 : "memory" , "r0", "r1");
 }
 
-static inline int test_and_set_bit(int nr, volatile void * addr)
+static inline bool test_and_set_bit(int nr, volatile void *addr)
 {
         int     mask, retval;
 	volatile unsigned int *a = addr;
@@ -102,7 +102,7 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
         return retval;
 }
 
-static inline int test_and_clear_bit(int nr, volatile void * addr)
+static inline bool test_and_clear_bit(int nr, volatile void *addr)
 {
         int     mask, retval,not_mask;
         volatile unsigned int *a = addr;
@@ -136,7 +136,7 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
         return retval;
 }
 
-static inline int test_and_change_bit(int nr, volatile void * addr)
+static inline bool test_and_change_bit(int nr, volatile void *addr)
 {
         int     mask, retval;
         volatile unsigned int *a = addr;
diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h
index d8328be..7dcf5ea 100644
--- a/arch/sh/include/asm/bitops-llsc.h
+++ b/arch/sh/include/asm/bitops-llsc.h
@@ -64,7 +64,7 @@ static inline void change_bit(int nr, volatile void *addr)
 	);
 }
 
-static inline int test_and_set_bit(int nr, volatile void *addr)
+static inline bool test_and_set_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
@@ -89,7 +89,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
 	return retval != 0;
 }
 
-static inline int test_and_clear_bit(int nr, volatile void *addr)
+static inline bool test_and_clear_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
@@ -115,7 +115,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
 	return retval != 0;
 }
 
-static inline int test_and_change_bit(int nr, volatile void *addr)
+static inline bool test_and_change_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h
index f0ae7e9..f677a4e 100644
--- a/arch/sh/include/asm/bitops-op32.h
+++ b/arch/sh/include/asm/bitops-op32.h
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline bool __test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -107,7 +107,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline bool __test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -118,7 +118,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 }
 
 /* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
+static inline bool __test_and_change_bit(int nr,
 					    volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
@@ -134,7 +134,7 @@ static inline int __test_and_change_bit(int nr,
  * @nr: bit number to test
  * @addr: Address to start counting from
  */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static inline bool test_bit(int nr, const volatile unsigned long *addr)
 {
 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h
index 600ed1d..afe275a 100644
--- a/arch/sparc/include/asm/bitops_32.h
+++ b/arch/sparc/include/asm/bitops_32.h
@@ -28,7 +28,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
  * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
  * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
  */
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+static inline bool test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *ADDR, mask;
 
@@ -48,7 +48,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 	(void) ___set_bit(ADDR, mask);
 }
 
-static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+static inline bool test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *ADDR, mask;
 
@@ -68,7 +68,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 	(void) ___clear_bit(ADDR, mask);
 }
 
-static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+static inline bool test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *ADDR, mask;
 
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index 2d52240..8cbd032 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -15,9 +15,9 @@
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 
-int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
-int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
-int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
+bool test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
+bool test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
+bool test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
 void set_bit(unsigned long nr, volatile unsigned long *addr);
 void clear_bit(unsigned long nr, volatile unsigned long *addr);
 void change_bit(unsigned long nr, volatile unsigned long *addr);
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index d1406a9..9ef0ba4 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -80,7 +80,7 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
+static inline bool test_and_set_bit(unsigned nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	addr += BIT_WORD(nr);
@@ -96,7 +96,7 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
+static inline bool test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	addr += BIT_WORD(nr);
@@ -112,7 +112,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_change_bit(unsigned nr,
+static inline bool test_and_change_bit(unsigned nr,
 				      volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index bb1a292..d970306 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -52,9 +52,9 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr)
  * barrier(), to block until the atomic op is complete.
  */
 
-static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
+static inline bool test_and_set_bit(unsigned nr, volatile unsigned long *addr)
 {
-	int val;
+	bool val;
 	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
 	smp_mb();  /* barrier for proper semantics */
 	val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
@@ -64,9 +64,9 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
 }
 
 
-static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
+static inline bool test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
 {
-	int val;
+	bool val;
 	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
 	smp_mb();  /* barrier for proper semantics */
 	val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
@@ -76,7 +76,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
 }
 
 
-static inline int test_and_change_bit(unsigned nr,
+static inline bool test_and_change_bit(unsigned nr,
 				      volatile unsigned long *addr)
 {
 	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index d349018..485d95d 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -154,7 +154,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
 			: "memory");
 }
 
-static inline int
+static inline bool
 test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long tmp, value;
@@ -175,7 +175,7 @@ test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 	return tmp & mask;
 }
 
-static inline int
+static inline bool
 test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long tmp, value;
@@ -196,7 +196,7 @@ test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
 	return tmp & mask;
 }
 
-static inline int
+static inline bool
 test_and_change_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long tmp, value;
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 4967351..eb68d8d 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -124,7 +124,7 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
  * It may be reordered on other architectures than x86.
  * It also implies a memory barrier.
  */
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline bool test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -148,7 +148,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
  * It can be reorderdered on other architectures other than x86.
  * It also implies a memory barrier.
  */
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline bool test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -171,7 +171,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline bool test_and_change_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 6173154..c610b99 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -49,7 +49,7 @@ extern unsigned long find_next_bit_le(const void *addr,
 #error "Please fix <asm/byteorder.h>"
 #endif
 
-static inline int test_bit_le(int nr, const void *addr)
+static inline bool test_bit_le(int nr, const void *addr)
 {
 	return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
 }
@@ -74,22 +74,22 @@ static inline void __clear_bit_le(int nr, void *addr)
 	__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
 }
 
-static inline int test_and_set_bit_le(int nr, void *addr)
+static inline bool test_and_set_bit_le(int nr, void *addr)
 {
 	return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
 }
 
-static inline int test_and_clear_bit_le(int nr, void *addr)
+static inline bool test_and_clear_bit_le(int nr, void *addr)
 {
 	return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
 }
 
-static inline int __test_and_set_bit_le(int nr, void *addr)
+static inline bool __test_and_set_bit_le(int nr, void *addr)
 {
 	return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
 }
 
-static inline int __test_and_clear_bit_le(int nr, void *addr)
+static inline bool __test_and_clear_bit_le(int nr, void *addr)
 {
 	return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
 }
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
index 697cc2b..fea2b40 100644
--- a/include/asm-generic/bitops/non-atomic.h
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -54,7 +54,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline bool __test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -73,7 +73,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline bool __test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -84,7 +84,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 }
 
 /* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
+static inline bool __test_and_change_bit(int nr,
 					    volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
@@ -100,7 +100,7 @@ static inline int __test_and_change_bit(int nr,
  * @nr: bit number to test
  * @addr: Address to start counting from
  */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static inline bool test_bit(int nr, const volatile unsigned long *addr)
 {
 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
-- 
1.9.3

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-28  5:39 [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions chengang
@ 2016-08-28  7:02 ` kbuild test robot
  2016-08-28  7:10 ` kbuild test robot
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 19+ messages in thread
From: kbuild test robot @ 2016-08-28  7:02 UTC (permalink / raw)
  To: chengang
  Cc: kbuild-all, akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim,
	hillf.zj, mgorman, mhocko, rientjes, linux-kernel, rth, ink,
	mattst88, vgupta, linux, catalin.marinas, will.deacon,
	hskinnemoen, egtvedt, realmz6, ysato, rkuo, tony.luck,
	fenghua.yu, geert, james.hogan, ralf, dhowells, deller, benh,
	paulus, mpe, schwidefsky, heiko.carstens, dalias, davem,
	cmetcalf, chris, jcmvbkbc, arnd, noamc, brueckner, mingo, peterz,
	linux-arch, Chen Gang, Chen Gang

[-- Attachment #1: Type: text/plain, Size: 5460 bytes --]

Hi Chen,

[auto build test WARNING on linus/master]
[also build test WARNING on v4.8-rc3 next-20160825]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
[Suggest to use git(>=2.9.0) format-patch --base=<commit> (or --base=auto for convenience) to record what (public, well-known) commit your patch series was built on]
[Check https://git-scm.com/docs/git-format-patch for more information]

url:    https://github.com/0day-ci/linux/commits/chengang-emindsoft-com-cn/arch-all-include-asm-bitops-Use-bool-instead-of-int-for-all-bit-test-functions/20160828-134633
config: m68k-sun3_defconfig (attached as .config)
compiler: m68k-linux-gcc (GCC) 4.9.0
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=m68k 

All warnings (new ones prefixed by >>):

   In file included from include/linux/bitops.h:36:0,
                    from include/linux/kernel.h:10,
                    from include/linux/list.h:8,
                    from include/linux/module.h:9,
                    from fs/lockd/clntlock.c:9:
   include/linux/sunrpc/xprt.h: In function 'xprt_set_bound':
>> arch/m68k/include/asm/bitops.h:200:43: warning: value computed is not used [-Wunused-value]
         bset_mem_test_and_set_bit(nr, vaddr) : \
                                              ^
>> include/linux/sunrpc/xprt.h:433:2: note: in expansion of macro 'test_and_set_bit'
     test_and_set_bit(XPRT_BOUND, &xprt->state);
     ^
--
   In file included from include/linux/bitops.h:36:0,
                    from include/linux/kernel.h:10,
                    from include/linux/list.h:8,
                    from include/linux/module.h:9,
                    from lib/lru_cache.c:26:
   lib/lru_cache.c: In function '__lc_get':
>> arch/m68k/include/asm/bitops.h:200:43: warning: value computed is not used [-Wunused-value]
         bset_mem_test_and_set_bit(nr, vaddr) : \
                                              ^
>> lib/lru_cache.c:417:2: note: in expansion of macro 'test_and_set_bit'
     test_and_set_bit(__LC_DIRTY, &lc->flags);
     ^
--
   In file included from include/linux/bitops.h:36:0,
                    from include/linux/kernel.h:10,
                    from include/linux/list.h:8,
                    from include/linux/module.h:9,
                    from net/sunrpc/auth_gss/auth_gss.c:39:
   include/linux/sunrpc/xprt.h: In function 'xprt_set_bound':
>> arch/m68k/include/asm/bitops.h:200:43: warning: value computed is not used [-Wunused-value]
         bset_mem_test_and_set_bit(nr, vaddr) : \
                                              ^
>> include/linux/sunrpc/xprt.h:433:2: note: in expansion of macro 'test_and_set_bit'
     test_and_set_bit(XPRT_BOUND, &xprt->state);
     ^
   net/sunrpc/auth_gss/auth_gss.c: In function 'gss_match':
   arch/m68k/include/asm/bitops.h:250:45: warning: value computed is not used [-Wunused-value]
         bclr_mem_test_and_clear_bit(nr, vaddr) : \
                                                ^
>> net/sunrpc/auth_gss/auth_gss.c:1469:3: note: in expansion of macro 'test_and_clear_bit'
      test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags);
      ^

vim +200 arch/m68k/include/asm/bitops.h

171d809d Greg Ungerer 2011-05-17  184  {
171d809d Greg Ungerer 2011-05-17  185  	char retval;
171d809d Greg Ungerer 2011-05-17  186  
171d809d Greg Ungerer 2011-05-17  187  	__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
171d809d Greg Ungerer 2011-05-17  188  		: "=d" (retval)
171d809d Greg Ungerer 2011-05-17  189  		: "d" (nr ^ 31), "o" (*vaddr)
171d809d Greg Ungerer 2011-05-17  190  		: "memory");
171d809d Greg Ungerer 2011-05-17  191  	return retval;
171d809d Greg Ungerer 2011-05-17  192  }
171d809d Greg Ungerer 2011-05-17  193  
171d809d Greg Ungerer 2011-05-17  194  #if defined(CONFIG_COLDFIRE)
171d809d Greg Ungerer 2011-05-17  195  #define	test_and_set_bit(nr, vaddr)	bset_reg_test_and_set_bit(nr, vaddr)
171d809d Greg Ungerer 2011-05-17  196  #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
171d809d Greg Ungerer 2011-05-17  197  #define	test_and_set_bit(nr, vaddr)	bset_mem_test_and_set_bit(nr, vaddr)
171d809d Greg Ungerer 2011-05-17  198  #else
171d809d Greg Ungerer 2011-05-17  199  #define test_and_set_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
171d809d Greg Ungerer 2011-05-17 @200  					bset_mem_test_and_set_bit(nr, vaddr) : \
171d809d Greg Ungerer 2011-05-17  201  					bfset_mem_test_and_set_bit(nr, vaddr))
171d809d Greg Ungerer 2011-05-17  202  #endif
171d809d Greg Ungerer 2011-05-17  203  
171d809d Greg Ungerer 2011-05-17  204  #define __test_and_set_bit(nr, vaddr)	test_and_set_bit(nr, vaddr)
171d809d Greg Ungerer 2011-05-17  205  
171d809d Greg Ungerer 2011-05-17  206  
f5fbac0b Chen Gang    2016-08-28  207  static inline bool bclr_reg_test_and_clear_bit(int nr,
171d809d Greg Ungerer 2011-05-17  208  					      volatile unsigned long *vaddr)

:::::: The code at line 200 was first introduced by commit
:::::: 171d809df1896c1022f9778cd2788be6c255a7dc m68k: merge mmu and non-mmu bitops.h

:::::: TO: Greg Ungerer <gerg@uclinux.org>
:::::: CC: Greg Ungerer <gerg@uclinux.org>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/octet-stream, Size: 11444 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-28  5:39 [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions chengang
  2016-08-28  7:02 ` kbuild test robot
@ 2016-08-28  7:10 ` kbuild test robot
  2016-08-28  7:10 ` kbuild test robot
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 19+ messages in thread
From: kbuild test robot @ 2016-08-28  7:10 UTC (permalink / raw)
  To: chengang
  Cc: kbuild-all, akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim,
	hillf.zj, mgorman, mhocko, rientjes, linux-kernel, rth, ink,
	mattst88, vgupta, linux, catalin.marinas, will.deacon,
	hskinnemoen, egtvedt, realmz6, ysato, rkuo, tony.luck,
	fenghua.yu, geert, james.hogan, ralf, dhowells, deller, benh,
	paulus, mpe, schwidefsky, heiko.carstens, dalias, davem,
	cmetcalf, chris, jcmvbkbc, arnd, noamc, brueckner, mingo, peterz,
	linux-arch, Chen Gang, Chen Gang

[-- Attachment #1: Type: text/plain, Size: 4546 bytes --]

Hi Chen,

[auto build test WARNING on linus/master]
[also build test WARNING on v4.8-rc3 next-20160825]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
[Suggest to use git(>=2.9.0) format-patch --base=<commit> (or --base=auto for convenience) to record what (public, well-known) commit your patch series was built on]
[Check https://git-scm.com/docs/git-format-patch for more information]

url:    https://github.com/0day-ci/linux/commits/chengang-emindsoft-com-cn/arch-all-include-asm-bitops-Use-bool-instead-of-int-for-all-bit-test-functions/20160828-134633
config: arm-at91_dt_defconfig (attached as .config)
compiler: arm-linux-gnueabi-gcc (Debian 5.4.0-6) 5.4.0 20160609
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=arm 

All warnings (new ones prefixed by >>):

   In file included from include/linux/bitops.h:36:0,
                    from include/linux/kernel.h:10,
                    from include/linux/list.h:8,
                    from include/linux/module.h:9,
                    from net/sunrpc/clnt.c:21:
   include/linux/sunrpc/xprt.h: In function 'xprt_set_bound':
>> arch/arm/include/asm/bitops.h:180:55: warning: value computed is not used [-Wunused-value]
     (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
                                                          ^
>> arch/arm/include/asm/bitops.h:191:33: note: in expansion of macro 'ATOMIC_BITOP'
    #define test_and_set_bit(nr,p)  ATOMIC_BITOP(test_and_set_bit,nr,p)
                                    ^
   include/linux/sunrpc/xprt.h:433:2: note: in expansion of macro 'test_and_set_bit'
     test_and_set_bit(XPRT_BOUND, &xprt->state);
     ^

vim +180 arch/arm/include/asm/bitops.h

^1da177e include/asm-arm/bitops.h      Linus Torvalds 2005-04-16  174  
e7ec0293 include/asm-arm/bitops.h      Russell King   2005-07-28  175  #ifndef CONFIG_SMP
^1da177e include/asm-arm/bitops.h      Linus Torvalds 2005-04-16  176  /*
^1da177e include/asm-arm/bitops.h      Linus Torvalds 2005-04-16  177   * The __* form of bitops are non-atomic and may be reordered.
^1da177e include/asm-arm/bitops.h      Linus Torvalds 2005-04-16  178   */
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  179  #define ATOMIC_BITOP(name,nr,p)			\
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16 @180  	(__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
e7ec0293 include/asm-arm/bitops.h      Russell King   2005-07-28  181  #else
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  182  #define ATOMIC_BITOP(name,nr,p)		_##name(nr,p)
e7ec0293 include/asm-arm/bitops.h      Russell King   2005-07-28  183  #endif
^1da177e include/asm-arm/bitops.h      Linus Torvalds 2005-04-16  184  
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  185  /*
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  186   * Native endian atomic definitions.
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  187   */
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  188  #define set_bit(nr,p)			ATOMIC_BITOP(set_bit,nr,p)
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  189  #define clear_bit(nr,p)			ATOMIC_BITOP(clear_bit,nr,p)
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  190  #define change_bit(nr,p)		ATOMIC_BITOP(change_bit,nr,p)
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16 @191  #define test_and_set_bit(nr,p)		ATOMIC_BITOP(test_and_set_bit,nr,p)
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  192  #define test_and_clear_bit(nr,p)	ATOMIC_BITOP(test_and_clear_bit,nr,p)
6323f0cc arch/arm/include/asm/bitops.h Russell King   2011-01-16  193  #define test_and_change_bit(nr,p)	ATOMIC_BITOP(test_and_change_bit,nr,p)
^1da177e include/asm-arm/bitops.h      Linus Torvalds 2005-04-16  194  

:::::: The code at line 180 was first introduced by commit
:::::: 6323f0ccedf756dfe5f46549cec69a2d6d97937b ARM: bitops: switch set/clear/change bitops to use ldrex/strex

:::::: TO: Russell King <rmk+kernel@arm.linux.org.uk>
:::::: CC: Russell King <rmk+kernel@arm.linux.org.uk>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/octet-stream, Size: 21569 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-28  5:39 [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions chengang
  2016-08-28  7:02 ` kbuild test robot
  2016-08-28  7:10 ` kbuild test robot
@ 2016-08-28  7:10 ` kbuild test robot
  2016-08-28 14:54   ` Chen Gang
  2016-08-29  8:45 ` Michal Hocko
  2016-08-29 13:03 ` Arnd Bergmann
  4 siblings, 1 reply; 19+ messages in thread
From: kbuild test robot @ 2016-08-28  7:10 UTC (permalink / raw)
  To: chengang
  Cc: kbuild-all, akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim,
	hillf.zj, mgorman, mhocko, rientjes, linux-kernel, rth, ink,
	mattst88, vgupta, linux, catalin.marinas, will.deacon,
	hskinnemoen, egtvedt, realmz6, ysato, rkuo, tony.luck,
	fenghua.yu, geert, james.hogan, ralf, dhowells, deller, benh,
	paulus, mpe, schwidefsky, heiko.carstens, dalias, davem,
	cmetcalf, chris, jcmvbkbc, arnd, noamc, brueckner, mingo, peterz,
	linux-arch, Chen Gang, Chen Gang

[-- Attachment #1: Type: text/plain, Size: 3180 bytes --]

Hi Chen,

[auto build test ERROR on linus/master]
[also build test ERROR on v4.8-rc3 next-20160825]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
[Suggest to use git(>=2.9.0) format-patch --base=<commit> (or --base=auto for convenience) to record what (public, well-known) commit your patch series was built on]
[Check https://git-scm.com/docs/git-format-patch for more information]

url:    https://github.com/0day-ci/linux/commits/chengang-emindsoft-com-cn/arch-all-include-asm-bitops-Use-bool-instead-of-int-for-all-bit-test-functions/20160828-134633
config: m68k-sun3_defconfig (attached as .config)
compiler: m68k-linux-gcc (GCC) 4.9.0
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=m68k 

All errors (new ones prefixed by >>):

   In file included from include/linux/bitops.h:36:0,
                    from include/linux/jhash.h:26,
                    from net/ipv6/ila/ila_xlat.c:1:
>> arch/m68k/include/asm/bitops.h:151:15: error: unknown type name 'bool'
    static inline bool test_bit(int nr, const unsigned long *vaddr)
                  ^
   arch/m68k/include/asm/bitops.h:157:15: error: unknown type name 'bool'
    static inline bool bset_reg_test_and_set_bit(int nr,
                  ^
   arch/m68k/include/asm/bitops.h:170:15: error: unknown type name 'bool'
    static inline bool bset_mem_test_and_set_bit(int nr,
                  ^
   arch/m68k/include/asm/bitops.h:182:15: error: unknown type name 'bool'
    static inline bool bfset_mem_test_and_set_bit(int nr,
                  ^
   arch/m68k/include/asm/bitops.h:207:15: error: unknown type name 'bool'
    static inline bool bclr_reg_test_and_clear_bit(int nr,
                  ^
   arch/m68k/include/asm/bitops.h:220:15: error: unknown type name 'bool'
    static inline bool bclr_mem_test_and_clear_bit(int nr,
                  ^
   arch/m68k/include/asm/bitops.h:232:15: error: unknown type name 'bool'
    static inline bool bfclr_mem_test_and_clear_bit(int nr,
                  ^
   arch/m68k/include/asm/bitops.h:257:15: error: unknown type name 'bool'
    static inline bool bchg_reg_test_and_change_bit(int nr,
                  ^
   arch/m68k/include/asm/bitops.h:270:15: error: unknown type name 'bool'
    static inline bool bchg_mem_test_and_change_bit(int nr,
                  ^
   arch/m68k/include/asm/bitops.h:282:15: error: unknown type name 'bool'
    static inline bool bfchg_mem_test_and_change_bit(int nr,
                  ^

vim +/bool +151 arch/m68k/include/asm/bitops.h

   145					bfchg_mem_change_bit(nr, vaddr))
   146	#endif
   147	
   148	#define __change_bit(nr, vaddr)	change_bit(nr, vaddr)
   149	
   150	
 > 151	static inline bool test_bit(int nr, const unsigned long *vaddr)
   152	{
   153		return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
   154	}

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/octet-stream, Size: 11444 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-28  7:10 ` kbuild test robot
@ 2016-08-28 14:54   ` Chen Gang
  0 siblings, 0 replies; 19+ messages in thread
From: Chen Gang @ 2016-08-28 14:54 UTC (permalink / raw)
  To: kbuild test robot
  Cc: kbuild-all, akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim,
	hillf.zj, mgorman, mhocko, rientjes, linux-kernel, rth, ink,
	mattst88, vgupta, linux, catalin.marinas, will.deacon,
	hskinnemoen, egtvedt, realmz6, ysato, rkuo, tony.luck,
	fenghua.yu, geert, james.hogan, ralf, dhowells, deller, benh,
	paulus, mpe, schwidefsky, heiko.carstens, dalias, davem,
	cmetcalf, chris, jcmvbkbc, arnd, noamc, brueckner, mingo, peterz,
	linux-arch, Chen Gang

Hello all:

I have tried m68k and aarch64, they need include linux/types.h just like
another archs have done (e.g. arc). And then they can pass building for
the default config.

For alpha, it can pass building with my alpha cross compiler (gcc 5.0),
but for safety reason, we'd better let it include linux/types.h, too.

For openrisc, after check the code, I guess, it is the same reason. And
excuse me, I lost my openrisc cross compiler which I originally built,
so I do not give a test now, I can generate it, but really need time.

Thanks.

On 8/28/16 15:10, kbuild test robot wrote:
> Hi Chen,
> 
> [auto build test ERROR on linus/master]
> [also build test ERROR on v4.8-rc3 next-20160825]
> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
> [Suggest to use git(>=2.9.0) format-patch --base=<commit> (or --base=auto for convenience) to record what (public, well-known) commit your patch series was built on]
> [Check https://git-scm.com/docs/git-format-patch for more information]
> 
> url:    https://github.com/0day-ci/linux/commits/chengang-emindsoft-com-cn/arch-all-include-asm-bitops-Use-bool-instead-of-int-for-all-bit-test-functions/20160828-134633
> config: m68k-sun3_defconfig (attached as .config)
> compiler: m68k-linux-gcc (GCC) 4.9.0
> reproduce:
>         wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
>         chmod +x ~/bin/make.cross
>         # save the attached .config to linux build tree
>         make.cross ARCH=m68k 
> 
> All errors (new ones prefixed by >>):
> 
>    In file included from include/linux/bitops.h:36:0,
>                     from include/linux/jhash.h:26,
>                     from net/ipv6/ila/ila_xlat.c:1:
>>> arch/m68k/include/asm/bitops.h:151:15: error: unknown type name 'bool'
>     static inline bool test_bit(int nr, const unsigned long *vaddr)
>                   ^
>    arch/m68k/include/asm/bitops.h:157:15: error: unknown type name 'bool'
>     static inline bool bset_reg_test_and_set_bit(int nr,
>                   ^
>    arch/m68k/include/asm/bitops.h:170:15: error: unknown type name 'bool'
>     static inline bool bset_mem_test_and_set_bit(int nr,
>                   ^
>    arch/m68k/include/asm/bitops.h:182:15: error: unknown type name 'bool'
>     static inline bool bfset_mem_test_and_set_bit(int nr,
>                   ^
>    arch/m68k/include/asm/bitops.h:207:15: error: unknown type name 'bool'
>     static inline bool bclr_reg_test_and_clear_bit(int nr,
>                   ^
>    arch/m68k/include/asm/bitops.h:220:15: error: unknown type name 'bool'
>     static inline bool bclr_mem_test_and_clear_bit(int nr,
>                   ^
>    arch/m68k/include/asm/bitops.h:232:15: error: unknown type name 'bool'
>     static inline bool bfclr_mem_test_and_clear_bit(int nr,
>                   ^
>    arch/m68k/include/asm/bitops.h:257:15: error: unknown type name 'bool'
>     static inline bool bchg_reg_test_and_change_bit(int nr,
>                   ^
>    arch/m68k/include/asm/bitops.h:270:15: error: unknown type name 'bool'
>     static inline bool bchg_mem_test_and_change_bit(int nr,
>                   ^
>    arch/m68k/include/asm/bitops.h:282:15: error: unknown type name 'bool'
>     static inline bool bfchg_mem_test_and_change_bit(int nr,
>                   ^
> 
> vim +/bool +151 arch/m68k/include/asm/bitops.h
> 
>    145					bfchg_mem_change_bit(nr, vaddr))
>    146	#endif
>    147	
>    148	#define __change_bit(nr, vaddr)	change_bit(nr, vaddr)
>    149	
>    150	
>  > 151	static inline bool test_bit(int nr, const unsigned long *vaddr)
>    152	{
>    153		return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
>    154	}
> 
> ---
> 0-DAY kernel test infrastructure                Open Source Technology Center
> https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
> 

-- 
Chen Gang (陈刚)

Managing Natural Environments is the Duty of Human Beings.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-28  5:39 [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions chengang
                   ` (2 preceding siblings ...)
  2016-08-28  7:10 ` kbuild test robot
@ 2016-08-29  8:45 ` Michal Hocko
  2016-08-29 13:03 ` Arnd Bergmann
  4 siblings, 0 replies; 19+ messages in thread
From: Michal Hocko @ 2016-08-29  8:45 UTC (permalink / raw)
  To: chengang
  Cc: akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim, hillf.zj,
	mgorman, rientjes, linux-kernel, rth, ink, mattst88, vgupta,
	linux, catalin.marinas, will.deacon, hskinnemoen, egtvedt,
	realmz6, ysato, rkuo, tony.luck, fenghua.yu, geert, james.hogan,
	ralf, dhowells, deller, benh, paulus, mpe, schwidefsky,
	heiko.carstens, dalias, davem, cmetcalf, chris, jcmvbkbc, arnd,
	noamc, brueckner, mingo, peterz, linux-arch, Chen Gang

On Sun 28-08-16 13:39:15, chengang@emindsoft.com.cn wrote:
> From: Chen Gang <chengang@emindsoft.com.cn>
> 
> Also use the same changing to asm-generic, and also use bool variable
> instead of int variable for mips, mn10300, parisc and tile related
> functions, and also avoid checkpatch.pl to report ERROR.
> 
> Originally, except powerpc and xtensa, all another architectures intend
> to return 0 or 1. After this patch, also let powerpc and xtensa return 0
> or 1.
> 
> The patch passes cross building for mips and parisc with default config.
> All related contents are found by "grep test_bit, grep test_and" under
> arch sub-directory.

the changelog doesn't mention the most important part. Why do we want to
change this? Does it really improve thing (generate a better code) or
clears out the semantic of those functions?

> 
> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
> ---
>  arch/alpha/include/asm/bitops.h         | 16 ++++++++--------
>  arch/arc/include/asm/bitops.h           | 10 +++++-----
>  arch/arm/include/asm/bitops.h           | 12 ++++++------
>  arch/arm64/include/asm/bitops.h         |  6 +++---
>  arch/avr32/include/asm/bitops.h         |  6 +++---
>  arch/blackfin/include/asm/bitops.h      | 16 ++++++++--------
>  arch/frv/include/asm/bitops.h           | 16 ++++++++--------
>  arch/h8300/include/asm/bitops.h         |  4 ++--
>  arch/hexagon/include/asm/bitops.h       | 14 +++++++-------
>  arch/ia64/include/asm/bitops.h          | 14 +++++++-------
>  arch/m32r/include/asm/bitops.h          |  6 +++---
>  arch/m68k/include/asm/bitops.h          | 20 ++++++++++----------
>  arch/metag/include/asm/bitops.h         |  6 +++---
>  arch/mips/include/asm/bitops.h          | 16 ++++++++--------
>  arch/mips/lib/bitops.c                  | 16 ++++++++--------
>  arch/mn10300/include/asm/bitops.h       |  7 ++++---
>  arch/parisc/include/asm/bitops.h        | 16 ++++++++--------
>  arch/powerpc/include/asm/bitops.h       | 10 +++++-----
>  arch/s390/include/asm/bitops.h          | 18 +++++++++---------
>  arch/sh/include/asm/bitops-cas.h        |  6 +++---
>  arch/sh/include/asm/bitops-grb.h        |  6 +++---
>  arch/sh/include/asm/bitops-llsc.h       |  6 +++---
>  arch/sh/include/asm/bitops-op32.h       |  8 ++++----
>  arch/sparc/include/asm/bitops_32.h      |  6 +++---
>  arch/sparc/include/asm/bitops_64.h      |  6 +++---
>  arch/tile/include/asm/bitops_32.h       |  6 +++---
>  arch/tile/include/asm/bitops_64.h       | 10 +++++-----
>  arch/xtensa/include/asm/bitops.h        |  6 +++---
>  include/asm-generic/bitops/atomic.h     |  6 +++---
>  include/asm-generic/bitops/le.h         | 10 +++++-----
>  include/asm-generic/bitops/non-atomic.h |  8 ++++----
>  31 files changed, 157 insertions(+), 156 deletions(-)
> 
> diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
> index 4bdfbd4..92d468f 100644
> --- a/arch/alpha/include/asm/bitops.h
> +++ b/arch/alpha/include/asm/bitops.h
> @@ -125,7 +125,7 @@ __change_bit(unsigned long nr, volatile void * addr)
>  	*m ^= 1 << (nr & 31);
>  }
>  
> -static inline int
> +static inline bool
>  test_and_set_bit(unsigned long nr, volatile void *addr)
>  {
>  	unsigned long oldbit;
> @@ -155,7 +155,7 @@ test_and_set_bit(unsigned long nr, volatile void *addr)
>  	return oldbit != 0;
>  }
>  
> -static inline int
> +static inline bool
>  test_and_set_bit_lock(unsigned long nr, volatile void *addr)
>  {
>  	unsigned long oldbit;
> @@ -185,7 +185,7 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
>  /*
>   * WARNING: non atomic version.
>   */
> -static inline int
> +static inline bool
>  __test_and_set_bit(unsigned long nr, volatile void * addr)
>  {
>  	unsigned long mask = 1 << (nr & 0x1f);
> @@ -196,7 +196,7 @@ __test_and_set_bit(unsigned long nr, volatile void * addr)
>  	return (old & mask) != 0;
>  }
>  
> -static inline int
> +static inline bool
>  test_and_clear_bit(unsigned long nr, volatile void * addr)
>  {
>  	unsigned long oldbit;
> @@ -229,7 +229,7 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
>  /*
>   * WARNING: non atomic version.
>   */
> -static inline int
> +static inline bool
>  __test_and_clear_bit(unsigned long nr, volatile void * addr)
>  {
>  	unsigned long mask = 1 << (nr & 0x1f);
> @@ -240,7 +240,7 @@ __test_and_clear_bit(unsigned long nr, volatile void * addr)
>  	return (old & mask) != 0;
>  }
>  
> -static inline int
> +static inline bool
>  test_and_change_bit(unsigned long nr, volatile void * addr)
>  {
>  	unsigned long oldbit;
> @@ -271,7 +271,7 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
>  /*
>   * WARNING: non atomic version.
>   */
> -static __inline__ int
> +static __inline__ bool
>  __test_and_change_bit(unsigned long nr, volatile void * addr)
>  {
>  	unsigned long mask = 1 << (nr & 0x1f);
> @@ -282,7 +282,7 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
>  	return (old & mask) != 0;
>  }
>  
> -static inline int
> +static inline bool
>  test_bit(int nr, const volatile void * addr)
>  {
>  	return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
> diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
> index 8da87fee..e1976ab 100644
> --- a/arch/arc/include/asm/bitops.h
> +++ b/arch/arc/include/asm/bitops.h
> @@ -60,7 +60,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
>   * and the old value of bit is returned
>   */
>  #define TEST_N_BIT_OP(op, c_op, asm_op)					\
> -static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
> +static inline bool test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
>  {									\
>  	unsigned long old, temp;					\
>  									\
> @@ -124,7 +124,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
>  }
>  
>  #define TEST_N_BIT_OP(op, c_op, asm_op)					\
> -static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
> +static inline bool test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
>  {									\
>  	unsigned long old, flags;					\
>  	m += nr >> 5;							\
> @@ -160,7 +160,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
>  }
>  
>  #define TEST_N_BIT_OP(op, c_op, asm_op)					\
> -static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
> +static inline bool test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
>  {									\
>  	unsigned long old;						\
>  									\
> @@ -204,7 +204,7 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
>  }
>  
>  #define __TEST_N_BIT_OP(op, c_op, asm_op)				\
> -static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
> +static inline bool __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
>  {									\
>  	unsigned long old;						\
>  	m += nr >> 5;							\
> @@ -242,7 +242,7 @@ BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
>  /*
>   * This routine doesn't need to be atomic.
>   */
> -static inline int
> +static inline bool
>  test_bit(unsigned int nr, const volatile unsigned long *addr)
>  {
>  	unsigned long mask;
> diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
> index e943e6c..719a598 100644
> --- a/arch/arm/include/asm/bitops.h
> +++ b/arch/arm/include/asm/bitops.h
> @@ -68,7 +68,7 @@ static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned lon
>  	raw_local_irq_restore(flags);
>  }
>  
> -static inline int
> +static inline bool
>  ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
>  {
>  	unsigned long flags;
> @@ -85,7 +85,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
>  	return (res & mask) != 0;
>  }
>  
> -static inline int
> +static inline bool
>  ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
>  {
>  	unsigned long flags;
> @@ -102,7 +102,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
>  	return (res & mask) != 0;
>  }
>  
> -static inline int
> +static inline bool
>  ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
>  {
>  	unsigned long flags;
> @@ -152,9 +152,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
>  extern void _set_bit(int nr, volatile unsigned long * p);
>  extern void _clear_bit(int nr, volatile unsigned long * p);
>  extern void _change_bit(int nr, volatile unsigned long * p);
> -extern int _test_and_set_bit(int nr, volatile unsigned long * p);
> -extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
> -extern int _test_and_change_bit(int nr, volatile unsigned long * p);
> +extern bool _test_and_set_bit(int nr, volatile unsigned long *p);
> +extern bool _test_and_clear_bit(int nr, volatile unsigned long *p);
> +extern bool _test_and_change_bit(int nr, volatile unsigned long *p);
>  
>  /*
>   * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
> diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
> index 9c19594..61f9f3c 100644
> --- a/arch/arm64/include/asm/bitops.h
> +++ b/arch/arm64/include/asm/bitops.h
> @@ -29,9 +29,9 @@
>  extern void set_bit(int nr, volatile unsigned long *p);
>  extern void clear_bit(int nr, volatile unsigned long *p);
>  extern void change_bit(int nr, volatile unsigned long *p);
> -extern int test_and_set_bit(int nr, volatile unsigned long *p);
> -extern int test_and_clear_bit(int nr, volatile unsigned long *p);
> -extern int test_and_change_bit(int nr, volatile unsigned long *p);
> +extern bool test_and_set_bit(int nr, volatile unsigned long *p);
> +extern bool test_and_clear_bit(int nr, volatile unsigned long *p);
> +extern bool test_and_change_bit(int nr, volatile unsigned long *p);
>  
>  #include <asm-generic/bitops/builtin-__ffs.h>
>  #include <asm-generic/bitops/builtin-ffs.h>
> diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h
> index 910d537..0e3e08b 100644
> --- a/arch/avr32/include/asm/bitops.h
> +++ b/arch/avr32/include/asm/bitops.h
> @@ -128,7 +128,7 @@ static inline void change_bit(int nr, volatile void * addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_set_bit(int nr, volatile void * addr)
> +static inline bool test_and_set_bit(int nr, volatile void *addr)
>  {
>  	unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
>  	unsigned long mask = 1UL << (nr % BITS_PER_LONG);
> @@ -168,7 +168,7 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_clear_bit(int nr, volatile void * addr)
> +static inline bool test_and_clear_bit(int nr, volatile void *addr)
>  {
>  	unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
>  	unsigned long mask = 1UL << (nr % BITS_PER_LONG);
> @@ -209,7 +209,7 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_change_bit(int nr, volatile void * addr)
> +static inline bool test_and_change_bit(int nr, volatile void *addr)
>  {
>  	unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
>  	unsigned long mask = 1UL << (nr % BITS_PER_LONG);
> diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
> index b298b65..ff43a11 100644
> --- a/arch/blackfin/include/asm/bitops.h
> +++ b/arch/blackfin/include/asm/bitops.h
> @@ -47,13 +47,13 @@ asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr);
>  
>  asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr);
>  
> -asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
> +asmlinkage bool __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
>  
> -asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
> +asmlinkage bool __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
>  
> -asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
> +asmlinkage bool __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
>  
> -asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
> +asmlinkage bool __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
>  
>  static inline void set_bit(int nr, volatile unsigned long *addr)
>  {
> @@ -73,25 +73,25 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
>  	__raw_bit_toggle_asm(a, nr & 0x1f);
>  }
>  
> -static inline int test_bit(int nr, const volatile unsigned long *addr)
> +static inline bool test_bit(int nr, const volatile unsigned long *addr)
>  {
>  	volatile const unsigned long *a = addr + (nr >> 5);
>  	return __raw_bit_test_asm(a, nr & 0x1f) != 0;
>  }
>  
> -static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
> +static inline bool test_and_set_bit(int nr, volatile unsigned long *addr)
>  {
>  	volatile unsigned long *a = addr + (nr >> 5);
>  	return __raw_bit_test_set_asm(a, nr & 0x1f);
>  }
>  
> -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
> +static inline bool test_and_clear_bit(int nr, volatile unsigned long *addr)
>  {
>  	volatile unsigned long *a = addr + (nr >> 5);
>  	return __raw_bit_test_clear_asm(a, nr & 0x1f);
>  }
>  
> -static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
> +static inline bool test_and_change_bit(int nr, volatile unsigned long *addr)
>  {
>  	volatile unsigned long *a = addr + (nr >> 5);
>  	return __raw_bit_test_toggle_asm(a, nr & 0x1f);
> diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
> index 0df8e95..c9bf93d 100644
> --- a/arch/frv/include/asm/bitops.h
> +++ b/arch/frv/include/asm/bitops.h
> @@ -27,7 +27,7 @@
>  
>  #include <asm/atomic.h>
>  
> -static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
> +static inline bool test_and_clear_bit(unsigned long nr, volatile void *addr)
>  {
>  	unsigned int *ptr = (void *)addr;
>  	unsigned int mask = 1UL << (nr & 31);
> @@ -35,7 +35,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
>  	return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
>  }
>  
> -static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
> +static inline bool test_and_set_bit(unsigned long nr, volatile void *addr)
>  {
>  	unsigned int *ptr = (void *)addr;
>  	unsigned int mask = 1UL << (nr & 31);
> @@ -43,7 +43,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
>  	return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
>  }
>  
> -static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
> +static inline bool test_and_change_bit(unsigned long nr, volatile void *addr)
>  {
>  	unsigned int *ptr = (void *)addr;
>  	unsigned int mask = 1UL << (nr & 31);
> @@ -96,7 +96,7 @@ static inline void __change_bit(unsigned long nr, volatile void *addr)
>  	*a ^= mask;
>  }
>  
> -static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
> +static inline bool __test_and_clear_bit(unsigned long nr, volatile void *addr)
>  {
>  	volatile unsigned long *a = addr;
>  	int mask, retval;
> @@ -108,7 +108,7 @@ static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
>  	return retval;
>  }
>  
> -static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
> +static inline bool __test_and_set_bit(unsigned long nr, volatile void *addr)
>  {
>  	volatile unsigned long *a = addr;
>  	int mask, retval;
> @@ -120,7 +120,7 @@ static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
>  	return retval;
>  }
>  
> -static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
> +static inline bool __test_and_change_bit(unsigned long nr, volatile void *addr)
>  {
>  	volatile unsigned long *a = addr;
>  	int mask, retval;
> @@ -135,13 +135,13 @@ static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
>  /*
>   * This routine doesn't need to be atomic.
>   */
> -static inline int
> +static inline bool
>  __constant_test_bit(unsigned long nr, const volatile void *addr)
>  {
>  	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
>  }
>  
> -static inline int __test_bit(unsigned long nr, const volatile void *addr)
> +static inline bool __test_bit(unsigned long nr, const volatile void *addr)
>  {
>  	int 	* a = (int *) addr;
>  	int	mask;
> diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
> index 05999ab..8f6dfc6 100644
> --- a/arch/h8300/include/asm/bitops.h
> +++ b/arch/h8300/include/asm/bitops.h
> @@ -65,7 +65,7 @@ H8300_GEN_BITOP(change_bit, "bnot")
>  
>  #undef H8300_GEN_BITOP
>  
> -static inline int test_bit(int nr, const unsigned long *addr)
> +static inline bool test_bit(int nr, const unsigned long *addr)
>  {
>  	int ret = 0;
>  	unsigned char *b_addr;
> @@ -91,7 +91,7 @@ static inline int test_bit(int nr, const unsigned long *addr)
>  #define __test_bit(nr, addr) test_bit(nr, addr)
>  
>  #define H8300_GEN_TEST_BITOP(FNNAME, OP)				\
> -static inline int FNNAME(int nr, void *addr)				\
> +static inline bool FNNAME(int nr, void *addr)				\
>  {									\
>  	int retval = 0;							\
>  	char ccrsave;							\
> diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
> index 5e4a59b..fa6b32c 100644
> --- a/arch/hexagon/include/asm/bitops.h
> +++ b/arch/hexagon/include/asm/bitops.h
> @@ -42,7 +42,7 @@
>   * @nr:  bit number to clear
>   * @addr:  pointer to memory
>   */
> -static inline int test_and_clear_bit(int nr, volatile void *addr)
> +static inline bool test_and_clear_bit(int nr, volatile void *addr)
>  {
>  	int oldval;
>  
> @@ -66,7 +66,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
>   * @nr:  bit number to set
>   * @addr:  pointer to memory
>   */
> -static inline int test_and_set_bit(int nr, volatile void *addr)
> +static inline bool test_and_set_bit(int nr, volatile void *addr)
>  {
>  	int oldval;
>  
> @@ -92,7 +92,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
>   * @nr:  bit number to set
>   * @addr:  pointer to memory
>   */
> -static inline int test_and_change_bit(int nr, volatile void *addr)
> +static inline bool test_and_change_bit(int nr, volatile void *addr)
>  {
>  	int oldval;
>  
> @@ -157,22 +157,22 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
>  }
>  
>  /*  Apparently, at least some of these are allowed to be non-atomic  */
> -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
> +static inline bool __test_and_clear_bit(int nr, volatile unsigned long *addr)
>  {
>  	return test_and_clear_bit(nr, addr);
>  }
>  
> -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
> +static inline bool __test_and_set_bit(int nr, volatile unsigned long *addr)
>  {
>  	return test_and_set_bit(nr, addr);
>  }
>  
> -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
> +static inline bool __test_and_change_bit(int nr, volatile unsigned long *addr)
>  {
>  	return test_and_change_bit(nr, addr);
>  }
>  
> -static inline int __test_bit(int nr, const volatile unsigned long *addr)
> +static inline bool __test_bit(int nr, const volatile unsigned long *addr)
>  {
>  	int retval;
>  
> diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
> index 71e8145..38edf72 100644
> --- a/arch/ia64/include/asm/bitops.h
> +++ b/arch/ia64/include/asm/bitops.h
> @@ -196,7 +196,7 @@ __change_bit (int nr, volatile void *addr)
>   * This operation is atomic and cannot be reordered.  
>   * It also implies the acquisition side of the memory barrier.
>   */
> -static __inline__ int
> +static __inline__ bool
>  test_and_set_bit (int nr, volatile void *addr)
>  {
>  	__u32 bit, old, new;
> @@ -231,7 +231,7 @@ test_and_set_bit (int nr, volatile void *addr)
>   * If two examples of this operation race, one can appear to succeed
>   * but actually fail.  You must protect multiple accesses with a lock.
>   */
> -static __inline__ int
> +static __inline__ bool
>  __test_and_set_bit (int nr, volatile void *addr)
>  {
>  	__u32 *p = (__u32 *) addr + (nr >> 5);
> @@ -250,7 +250,7 @@ __test_and_set_bit (int nr, volatile void *addr)
>   * This operation is atomic and cannot be reordered.  
>   * It also implies the acquisition side of the memory barrier.
>   */
> -static __inline__ int
> +static __inline__ bool
>  test_and_clear_bit (int nr, volatile void *addr)
>  {
>  	__u32 mask, old, new;
> @@ -276,7 +276,7 @@ test_and_clear_bit (int nr, volatile void *addr)
>   * If two examples of this operation race, one can appear to succeed
>   * but actually fail.  You must protect multiple accesses with a lock.
>   */
> -static __inline__ int
> +static __inline__ bool
>  __test_and_clear_bit(int nr, volatile void * addr)
>  {
>  	__u32 *p = (__u32 *) addr + (nr >> 5);
> @@ -295,7 +295,7 @@ __test_and_clear_bit(int nr, volatile void * addr)
>   * This operation is atomic and cannot be reordered.  
>   * It also implies the acquisition side of the memory barrier.
>   */
> -static __inline__ int
> +static __inline__ bool
>  test_and_change_bit (int nr, volatile void *addr)
>  {
>  	__u32 bit, old, new;
> @@ -319,7 +319,7 @@ test_and_change_bit (int nr, volatile void *addr)
>   *
>   * This operation is non-atomic and can be reordered.
>   */
> -static __inline__ int
> +static __inline__ bool
>  __test_and_change_bit (int nr, void *addr)
>  {
>  	__u32 old, bit = (1 << (nr & 31));
> @@ -330,7 +330,7 @@ __test_and_change_bit (int nr, void *addr)
>  	return (old & bit) != 0;
>  }
>  
> -static __inline__ int
> +static __inline__ bool
>  test_bit (int nr, const volatile void *addr)
>  {
>  	return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
> diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h
> index 86ba2b4..5f12ceb 100644
> --- a/arch/m32r/include/asm/bitops.h
> +++ b/arch/m32r/include/asm/bitops.h
> @@ -147,7 +147,7 @@ static __inline__ void change_bit(int nr, volatile void * addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static __inline__ int test_and_set_bit(int nr, volatile void * addr)
> +static __inline__ bool test_and_set_bit(int nr, volatile void *addr)
>  {
>  	__u32 mask, oldbit;
>  	volatile __u32 *a = addr;
> @@ -182,7 +182,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
> +static __inline__ bool test_and_clear_bit(int nr, volatile void *addr)
>  {
>  	__u32 mask, oldbit;
>  	volatile __u32 *a = addr;
> @@ -219,7 +219,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static __inline__ int test_and_change_bit(int nr, volatile void * addr)
> +static __inline__ bool test_and_change_bit(int nr, volatile void *addr)
>  {
>  	__u32 mask, oldbit;
>  	volatile __u32 *a = addr;
> diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
> index b4a9b0d..9f5835d 100644
> --- a/arch/m68k/include/asm/bitops.h
> +++ b/arch/m68k/include/asm/bitops.h
> @@ -148,13 +148,13 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
>  #define __change_bit(nr, vaddr)	change_bit(nr, vaddr)
>  
>  
> -static inline int test_bit(int nr, const unsigned long *vaddr)
> +static inline bool test_bit(int nr, const unsigned long *vaddr)
>  {
>  	return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
>  }
>  
>  
> -static inline int bset_reg_test_and_set_bit(int nr,
> +static inline bool bset_reg_test_and_set_bit(int nr,
>  					    volatile unsigned long *vaddr)
>  {
>  	char *p = (char *)vaddr + (nr ^ 31) / 8;
> @@ -167,7 +167,7 @@ static inline int bset_reg_test_and_set_bit(int nr,
>  	return retval;
>  }
>  
> -static inline int bset_mem_test_and_set_bit(int nr,
> +static inline bool bset_mem_test_and_set_bit(int nr,
>  					    volatile unsigned long *vaddr)
>  {
>  	char *p = (char *)vaddr + (nr ^ 31) / 8;
> @@ -179,7 +179,7 @@ static inline int bset_mem_test_and_set_bit(int nr,
>  	return retval;
>  }
>  
> -static inline int bfset_mem_test_and_set_bit(int nr,
> +static inline bool bfset_mem_test_and_set_bit(int nr,
>  					     volatile unsigned long *vaddr)
>  {
>  	char retval;
> @@ -204,7 +204,7 @@ static inline int bfset_mem_test_and_set_bit(int nr,
>  #define __test_and_set_bit(nr, vaddr)	test_and_set_bit(nr, vaddr)
>  
>  
> -static inline int bclr_reg_test_and_clear_bit(int nr,
> +static inline bool bclr_reg_test_and_clear_bit(int nr,
>  					      volatile unsigned long *vaddr)
>  {
>  	char *p = (char *)vaddr + (nr ^ 31) / 8;
> @@ -217,7 +217,7 @@ static inline int bclr_reg_test_and_clear_bit(int nr,
>  	return retval;
>  }
>  
> -static inline int bclr_mem_test_and_clear_bit(int nr,
> +static inline bool bclr_mem_test_and_clear_bit(int nr,
>  					      volatile unsigned long *vaddr)
>  {
>  	char *p = (char *)vaddr + (nr ^ 31) / 8;
> @@ -229,7 +229,7 @@ static inline int bclr_mem_test_and_clear_bit(int nr,
>  	return retval;
>  }
>  
> -static inline int bfclr_mem_test_and_clear_bit(int nr,
> +static inline bool bfclr_mem_test_and_clear_bit(int nr,
>  					       volatile unsigned long *vaddr)
>  {
>  	char retval;
> @@ -254,7 +254,7 @@ static inline int bfclr_mem_test_and_clear_bit(int nr,
>  #define __test_and_clear_bit(nr, vaddr)	test_and_clear_bit(nr, vaddr)
>  
>  
> -static inline int bchg_reg_test_and_change_bit(int nr,
> +static inline bool bchg_reg_test_and_change_bit(int nr,
>  					       volatile unsigned long *vaddr)
>  {
>  	char *p = (char *)vaddr + (nr ^ 31) / 8;
> @@ -267,7 +267,7 @@ static inline int bchg_reg_test_and_change_bit(int nr,
>  	return retval;
>  }
>  
> -static inline int bchg_mem_test_and_change_bit(int nr,
> +static inline bool bchg_mem_test_and_change_bit(int nr,
>  					       volatile unsigned long *vaddr)
>  {
>  	char *p = (char *)vaddr + (nr ^ 31) / 8;
> @@ -279,7 +279,7 @@ static inline int bchg_mem_test_and_change_bit(int nr,
>  	return retval;
>  }
>  
> -static inline int bfchg_mem_test_and_change_bit(int nr,
> +static inline bool bfchg_mem_test_and_change_bit(int nr,
>  						volatile unsigned long *vaddr)
>  {
>  	char retval;
> diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h
> index 2671134..11df061 100644
> --- a/arch/metag/include/asm/bitops.h
> +++ b/arch/metag/include/asm/bitops.h
> @@ -48,7 +48,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
>  	__global_unlock1(flags);
>  }
>  
> -static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p)
> +static inline bool test_and_set_bit(unsigned int bit, volatile unsigned long *p)
>  {
>  	unsigned long flags;
>  	unsigned long old;
> @@ -67,7 +67,7 @@ static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p)
>  	return (old & mask) != 0;
>  }
>  
> -static inline int test_and_clear_bit(unsigned int bit,
> +static inline bool test_and_clear_bit(unsigned int bit,
>  				     volatile unsigned long *p)
>  {
>  	unsigned long flags;
> @@ -87,7 +87,7 @@ static inline int test_and_clear_bit(unsigned int bit,
>  	return (old & mask) != 0;
>  }
>  
> -static inline int test_and_change_bit(unsigned int bit,
> +static inline bool test_and_change_bit(unsigned int bit,
>  				      volatile unsigned long *p)
>  {
>  	unsigned long flags;
> diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
> index fa57cef..7e53c66 100644
> --- a/arch/mips/include/asm/bitops.h
> +++ b/arch/mips/include/asm/bitops.h
> @@ -30,13 +30,13 @@
>  void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
>  void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
>  void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
> -int __mips_test_and_set_bit(unsigned long nr,
> +bool __mips_test_and_set_bit(unsigned long nr,
>  			    volatile unsigned long *addr);
> -int __mips_test_and_set_bit_lock(unsigned long nr,
> +bool __mips_test_and_set_bit_lock(unsigned long nr,
>  				 volatile unsigned long *addr);
> -int __mips_test_and_clear_bit(unsigned long nr,
> +bool __mips_test_and_clear_bit(unsigned long nr,
>  			      volatile unsigned long *addr);
> -int __mips_test_and_change_bit(unsigned long nr,
> +bool __mips_test_and_change_bit(unsigned long nr,
>  			       volatile unsigned long *addr);
>  
>  
> @@ -210,7 +210,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_set_bit(unsigned long nr,
> +static inline bool test_and_set_bit(unsigned long nr,
>  	volatile unsigned long *addr)
>  {
>  	int bit = nr & SZLONG_MASK;
> @@ -266,7 +266,7 @@ static inline int test_and_set_bit(unsigned long nr,
>   * This operation is atomic and implies acquire ordering semantics
>   * after the memory operation.
>   */
> -static inline int test_and_set_bit_lock(unsigned long nr,
> +static inline bool test_and_set_bit_lock(unsigned long nr,
>  	volatile unsigned long *addr)
>  {
>  	int bit = nr & SZLONG_MASK;
> @@ -319,7 +319,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_clear_bit(unsigned long nr,
> +static inline bool test_and_clear_bit(unsigned long nr,
>  	volatile unsigned long *addr)
>  {
>  	int bit = nr & SZLONG_MASK;
> @@ -393,7 +393,7 @@ static inline int test_and_clear_bit(unsigned long nr,
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_change_bit(unsigned long nr,
> +static inline bool test_and_change_bit(unsigned long nr,
>  	volatile unsigned long *addr)
>  {
>  	int bit = nr & SZLONG_MASK;
> diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
> index 3b2a1e7..8f0ba2a 100644
> --- a/arch/mips/lib/bitops.c
> +++ b/arch/mips/lib/bitops.c
> @@ -83,14 +83,14 @@ EXPORT_SYMBOL(__mips_change_bit);
>   * @nr: Bit to set
>   * @addr: Address to count from
>   */
> -int __mips_test_and_set_bit(unsigned long nr,
> +bool __mips_test_and_set_bit(unsigned long nr,
>  			    volatile unsigned long *addr)
>  {
>  	unsigned long *a = (unsigned long *)addr;
>  	unsigned bit = nr & SZLONG_MASK;
>  	unsigned long mask;
>  	unsigned long flags;
> -	int res;
> +	bool res;
>  
>  	a += nr >> SZLONG_LOG;
>  	mask = 1UL << bit;
> @@ -109,14 +109,14 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
>   * @nr: Bit to set
>   * @addr: Address to count from
>   */
> -int __mips_test_and_set_bit_lock(unsigned long nr,
> +bool __mips_test_and_set_bit_lock(unsigned long nr,
>  				 volatile unsigned long *addr)
>  {
>  	unsigned long *a = (unsigned long *)addr;
>  	unsigned bit = nr & SZLONG_MASK;
>  	unsigned long mask;
>  	unsigned long flags;
> -	int res;
> +	bool res;
>  
>  	a += nr >> SZLONG_LOG;
>  	mask = 1UL << bit;
> @@ -135,13 +135,13 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
>   * @nr: Bit to clear
>   * @addr: Address to count from
>   */
> -int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
> +bool __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
>  {
>  	unsigned long *a = (unsigned long *)addr;
>  	unsigned bit = nr & SZLONG_MASK;
>  	unsigned long mask;
>  	unsigned long flags;
> -	int res;
> +	bool res;
>  
>  	a += nr >> SZLONG_LOG;
>  	mask = 1UL << bit;
> @@ -160,13 +160,13 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
>   * @nr: Bit to change
>   * @addr: Address to count from
>   */
> -int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
> +bool __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
>  {
>  	unsigned long *a = (unsigned long *)addr;
>  	unsigned bit = nr & SZLONG_MASK;
>  	unsigned long mask;
>  	unsigned long flags;
> -	int res;
> +	bool res;
>  
>  	a += nr >> SZLONG_LOG;
>  	mask = 1UL << bit;
> diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
> index fe6f8e2..5b00e95 100644
> --- a/arch/mn10300/include/asm/bitops.h
> +++ b/arch/mn10300/include/asm/bitops.h
> @@ -68,7 +68,7 @@ static inline void __clear_bit(unsigned long nr, volatile void *addr)
>  /*
>   * test bit
>   */
> -static inline int test_bit(unsigned long nr, const volatile void *addr)
> +static inline bool test_bit(unsigned long nr, const volatile void *addr)
>  {
>  	return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
>  }
> @@ -133,9 +133,10 @@ extern void change_bit(unsigned long nr, volatile void *addr);
>  /*
>   * test and change bit
>   */
> -static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
> +static inline bool __test_and_change_bit(unsigned long nr, volatile void *addr)
>  {
> -	int	mask, retval;
> +	int mask;
> +	bool retval;
>  	unsigned int *a = (unsigned int *)addr;
>  
>  	a += nr >> 5;
> diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
> index 3f9406d..bac163d 100644
> --- a/arch/parisc/include/asm/bitops.h
> +++ b/arch/parisc/include/asm/bitops.h
> @@ -59,17 +59,17 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
>  	_atomic_spin_unlock_irqrestore(addr, flags);
>  }
>  
> -static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
> +static __inline__ bool test_and_set_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
>  	unsigned long old;
>  	unsigned long flags;
> -	int set;
> +	bool set;
>  
>  	addr += (nr >> SHIFT_PER_LONG);
>  	_atomic_spin_lock_irqsave(addr, flags);
>  	old = *addr;
> -	set = (old & mask) ? 1 : 0;
> +	set = (old & mask) ? true : false;
>  	if (!set)
>  		*addr = old | mask;
>  	_atomic_spin_unlock_irqrestore(addr, flags);
> @@ -77,17 +77,17 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
>  	return set;
>  }
>  
> -static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
> +static __inline__ bool test_and_clear_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
>  	unsigned long old;
>  	unsigned long flags;
> -	int set;
> +	bool set;
>  
>  	addr += (nr >> SHIFT_PER_LONG);
>  	_atomic_spin_lock_irqsave(addr, flags);
>  	old = *addr;
> -	set = (old & mask) ? 1 : 0;
> +	set = (old & mask) ? true : false;
>  	if (set)
>  		*addr = old & ~mask;
>  	_atomic_spin_unlock_irqrestore(addr, flags);
> @@ -95,7 +95,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
>  	return set;
>  }
>  
> -static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
> +static __inline__ bool test_and_change_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
>  	unsigned long oldbit;
> @@ -107,7 +107,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
>  	*addr = oldbit ^ mask;
>  	_atomic_spin_unlock_irqrestore(addr, flags);
>  
> -	return (oldbit & mask) ? 1 : 0;
> +	return (oldbit & mask) ? true : false;
>  }
>  
>  #include <asm-generic/bitops/non-atomic.h>
> diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
> index 59abc62..7838138 100644
> --- a/arch/powerpc/include/asm/bitops.h
> +++ b/arch/powerpc/include/asm/bitops.h
> @@ -100,7 +100,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
>  /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
>   * operands. */
>  #define DEFINE_TESTOP(fn, op, prefix, postfix, eh)	\
> -static __inline__ unsigned long fn(			\
> +static __inline__ bool fn(				\
>  		unsigned long mask,			\
>  		volatile unsigned long *_p)		\
>  {							\
> @@ -129,26 +129,26 @@ DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
>  DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
>  	      PPC_ATOMIC_EXIT_BARRIER, 0)
>  
> -static __inline__ int test_and_set_bit(unsigned long nr,
> +static __inline__ bool test_and_set_bit(unsigned long nr,
>  				       volatile unsigned long *addr)
>  {
>  	return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
>  }
>  
> -static __inline__ int test_and_set_bit_lock(unsigned long nr,
> +static __inline__ bool test_and_set_bit_lock(unsigned long nr,
>  				       volatile unsigned long *addr)
>  {
>  	return test_and_set_bits_lock(BIT_MASK(nr),
>  				addr + BIT_WORD(nr)) != 0;
>  }
>  
> -static __inline__ int test_and_clear_bit(unsigned long nr,
> +static __inline__ bool test_and_clear_bit(unsigned long nr,
>  					 volatile unsigned long *addr)
>  {
>  	return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
>  }
>  
> -static __inline__ int test_and_change_bit(unsigned long nr,
> +static __inline__ bool test_and_change_bit(unsigned long nr,
>  					  volatile unsigned long *addr)
>  {
>  	return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
> diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
> index 8043f10..71e6202 100644
> --- a/arch/s390/include/asm/bitops.h
> +++ b/arch/s390/include/asm/bitops.h
> @@ -173,7 +173,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
>  	__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
>  }
>  
> -static inline int
> +static inline bool
>  test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
>  {
>  	unsigned long *addr = __bitops_word(nr, ptr);
> @@ -184,7 +184,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
>  	return (old & mask) != 0;
>  }
>  
> -static inline int
> +static inline bool
>  test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
>  {
>  	unsigned long *addr = __bitops_word(nr, ptr);
> @@ -195,7 +195,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
>  	return (old & ~mask) != 0;
>  }
>  
> -static inline int
> +static inline bool
>  test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
>  {
>  	unsigned long *addr = __bitops_word(nr, ptr);
> @@ -228,7 +228,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
>  	*addr ^= 1 << (nr & 7);
>  }
>  
> -static inline int
> +static inline bool
>  __test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
>  {
>  	unsigned char *addr = __bitops_byte(nr, ptr);
> @@ -239,7 +239,7 @@ __test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
>  	return (ch >> (nr & 7)) & 1;
>  }
>  
> -static inline int
> +static inline bool
>  __test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
>  {
>  	unsigned char *addr = __bitops_byte(nr, ptr);
> @@ -250,7 +250,7 @@ __test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
>  	return (ch >> (nr & 7)) & 1;
>  }
>  
> -static inline int
> +static inline bool
>  __test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
>  {
>  	unsigned char *addr = __bitops_byte(nr, ptr);
> @@ -261,7 +261,7 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
>  	return (ch >> (nr & 7)) & 1;
>  }
>  
> -static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
> +static inline bool test_bit(unsigned long nr, const volatile unsigned long *ptr)
>  {
>  	const volatile unsigned char *addr;
>  
> @@ -270,7 +270,7 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
>  	return (*addr >> (nr & 7)) & 1;
>  }
>  
> -static inline int test_and_set_bit_lock(unsigned long nr,
> +static inline bool test_and_set_bit_lock(unsigned long nr,
>  					volatile unsigned long *ptr)
>  {
>  	if (test_bit(nr, ptr))
> @@ -321,7 +321,7 @@ static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr
>  	return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
>  }
>  
> -static inline int test_bit_inv(unsigned long nr,
> +static inline bool test_bit_inv(unsigned long nr,
>  			       const volatile unsigned long *ptr)
>  {
>  	return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
> diff --git a/arch/sh/include/asm/bitops-cas.h b/arch/sh/include/asm/bitops-cas.h
> index 88f793c..c4fde9c 100644
> --- a/arch/sh/include/asm/bitops-cas.h
> +++ b/arch/sh/include/asm/bitops-cas.h
> @@ -46,7 +46,7 @@ static inline void change_bit(int nr, volatile void *addr)
>  	while (__bo_cas(a, old, old^mask) != old);
>  }
>  
> -static inline int test_and_set_bit(int nr, volatile void *addr)
> +static inline bool test_and_set_bit(int nr, volatile void *addr)
>  {
>  	unsigned mask, old;
>  	volatile unsigned *a = addr;
> @@ -60,7 +60,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
>  	return !!(old & mask);
>  }
>  
> -static inline int test_and_clear_bit(int nr, volatile void *addr)
> +static inline bool test_and_clear_bit(int nr, volatile void *addr)
>  {
>  	unsigned mask, old;
>  	volatile unsigned *a = addr;
> @@ -74,7 +74,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
>  	return !!(old & mask);
>  }
>  
> -static inline int test_and_change_bit(int nr, volatile void *addr)
> +static inline bool test_and_change_bit(int nr, volatile void *addr)
>  {
>  	unsigned mask, old;
>  	volatile unsigned *a = addr;
> diff --git a/arch/sh/include/asm/bitops-grb.h b/arch/sh/include/asm/bitops-grb.h
> index e73af33..866f26a 100644
> --- a/arch/sh/include/asm/bitops-grb.h
> +++ b/arch/sh/include/asm/bitops-grb.h
> @@ -71,7 +71,7 @@ static inline void change_bit(int nr, volatile void * addr)
>                  : "memory" , "r0", "r1");
>  }
>  
> -static inline int test_and_set_bit(int nr, volatile void * addr)
> +static inline bool test_and_set_bit(int nr, volatile void *addr)
>  {
>          int     mask, retval;
>  	volatile unsigned int *a = addr;
> @@ -102,7 +102,7 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
>          return retval;
>  }
>  
> -static inline int test_and_clear_bit(int nr, volatile void * addr)
> +static inline bool test_and_clear_bit(int nr, volatile void *addr)
>  {
>          int     mask, retval,not_mask;
>          volatile unsigned int *a = addr;
> @@ -136,7 +136,7 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
>          return retval;
>  }
>  
> -static inline int test_and_change_bit(int nr, volatile void * addr)
> +static inline bool test_and_change_bit(int nr, volatile void *addr)
>  {
>          int     mask, retval;
>          volatile unsigned int *a = addr;
> diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h
> index d8328be..7dcf5ea 100644
> --- a/arch/sh/include/asm/bitops-llsc.h
> +++ b/arch/sh/include/asm/bitops-llsc.h
> @@ -64,7 +64,7 @@ static inline void change_bit(int nr, volatile void *addr)
>  	);
>  }
>  
> -static inline int test_and_set_bit(int nr, volatile void *addr)
> +static inline bool test_and_set_bit(int nr, volatile void *addr)
>  {
>  	int	mask, retval;
>  	volatile unsigned int *a = addr;
> @@ -89,7 +89,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
>  	return retval != 0;
>  }
>  
> -static inline int test_and_clear_bit(int nr, volatile void *addr)
> +static inline bool test_and_clear_bit(int nr, volatile void *addr)
>  {
>  	int	mask, retval;
>  	volatile unsigned int *a = addr;
> @@ -115,7 +115,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
>  	return retval != 0;
>  }
>  
> -static inline int test_and_change_bit(int nr, volatile void *addr)
> +static inline bool test_and_change_bit(int nr, volatile void *addr)
>  {
>  	int	mask, retval;
>  	volatile unsigned int *a = addr;
> diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h
> index f0ae7e9..f677a4e 100644
> --- a/arch/sh/include/asm/bitops-op32.h
> +++ b/arch/sh/include/asm/bitops-op32.h
> @@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
>   * If two examples of this operation race, one can appear to succeed
>   * but actually fail.  You must protect multiple accesses with a lock.
>   */
> -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
> +static inline bool __test_and_set_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
> @@ -107,7 +107,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
>   * If two examples of this operation race, one can appear to succeed
>   * but actually fail.  You must protect multiple accesses with a lock.
>   */
> -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
> +static inline bool __test_and_clear_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
> @@ -118,7 +118,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
>  }
>  
>  /* WARNING: non atomic and it can be reordered! */
> -static inline int __test_and_change_bit(int nr,
> +static inline bool __test_and_change_bit(int nr,
>  					    volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
> @@ -134,7 +134,7 @@ static inline int __test_and_change_bit(int nr,
>   * @nr: bit number to test
>   * @addr: Address to start counting from
>   */
> -static inline int test_bit(int nr, const volatile unsigned long *addr)
> +static inline bool test_bit(int nr, const volatile unsigned long *addr)
>  {
>  	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
>  }
> diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h
> index 600ed1d..afe275a 100644
> --- a/arch/sparc/include/asm/bitops_32.h
> +++ b/arch/sparc/include/asm/bitops_32.h
> @@ -28,7 +28,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
>   * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
>   * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
>   */
> -static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
> +static inline bool test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
>  {
>  	unsigned long *ADDR, mask;
>  
> @@ -48,7 +48,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
>  	(void) ___set_bit(ADDR, mask);
>  }
>  
> -static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
> +static inline bool test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
>  {
>  	unsigned long *ADDR, mask;
>  
> @@ -68,7 +68,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
>  	(void) ___clear_bit(ADDR, mask);
>  }
>  
> -static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
> +static inline bool test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
>  {
>  	unsigned long *ADDR, mask;
>  
> diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
> index 2d52240..8cbd032 100644
> --- a/arch/sparc/include/asm/bitops_64.h
> +++ b/arch/sparc/include/asm/bitops_64.h
> @@ -15,9 +15,9 @@
>  #include <asm/byteorder.h>
>  #include <asm/barrier.h>
>  
> -int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
> -int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
> -int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
> +bool test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
> +bool test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
> +bool test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
>  void set_bit(unsigned long nr, volatile unsigned long *addr);
>  void clear_bit(unsigned long nr, volatile unsigned long *addr);
>  void change_bit(unsigned long nr, volatile unsigned long *addr);
> diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
> index d1406a9..9ef0ba4 100644
> --- a/arch/tile/include/asm/bitops_32.h
> +++ b/arch/tile/include/asm/bitops_32.h
> @@ -80,7 +80,7 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
> +static inline bool test_and_set_bit(unsigned nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	addr += BIT_WORD(nr);
> @@ -96,7 +96,7 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
> +static inline bool test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	addr += BIT_WORD(nr);
> @@ -112,7 +112,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_change_bit(unsigned nr,
> +static inline bool test_and_change_bit(unsigned nr,
>  				      volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
> diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
> index bb1a292..d970306 100644
> --- a/arch/tile/include/asm/bitops_64.h
> +++ b/arch/tile/include/asm/bitops_64.h
> @@ -52,9 +52,9 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr)
>   * barrier(), to block until the atomic op is complete.
>   */
>  
> -static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
> +static inline bool test_and_set_bit(unsigned nr, volatile unsigned long *addr)
>  {
> -	int val;
> +	bool val;
>  	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
>  	smp_mb();  /* barrier for proper semantics */
>  	val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
> @@ -64,9 +64,9 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
>  }
>  
>  
> -static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
> +static inline bool test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
>  {
> -	int val;
> +	bool val;
>  	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
>  	smp_mb();  /* barrier for proper semantics */
>  	val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
> @@ -76,7 +76,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
>  }
>  
>  
> -static inline int test_and_change_bit(unsigned nr,
> +static inline bool test_and_change_bit(unsigned nr,
>  				      volatile unsigned long *addr)
>  {
>  	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
> diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
> index d349018..485d95d 100644
> --- a/arch/xtensa/include/asm/bitops.h
> +++ b/arch/xtensa/include/asm/bitops.h
> @@ -154,7 +154,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
>  			: "memory");
>  }
>  
> -static inline int
> +static inline bool
>  test_and_set_bit(unsigned int bit, volatile unsigned long *p)
>  {
>  	unsigned long tmp, value;
> @@ -175,7 +175,7 @@ test_and_set_bit(unsigned int bit, volatile unsigned long *p)
>  	return tmp & mask;
>  }
>  
> -static inline int
> +static inline bool
>  test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
>  {
>  	unsigned long tmp, value;
> @@ -196,7 +196,7 @@ test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
>  	return tmp & mask;
>  }
>  
> -static inline int
> +static inline bool
>  test_and_change_bit(unsigned int bit, volatile unsigned long *p)
>  {
>  	unsigned long tmp, value;
> diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
> index 4967351..eb68d8d 100644
> --- a/include/asm-generic/bitops/atomic.h
> +++ b/include/asm-generic/bitops/atomic.h
> @@ -124,7 +124,7 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
>   * It may be reordered on other architectures than x86.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
> +static inline bool test_and_set_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
> @@ -148,7 +148,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
>   * It can be reorderdered on other architectures other than x86.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
> +static inline bool test_and_clear_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
> @@ -171,7 +171,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
>   * This operation is atomic and cannot be reordered.
>   * It also implies a memory barrier.
>   */
> -static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
> +static inline bool test_and_change_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
> diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
> index 6173154..c610b99 100644
> --- a/include/asm-generic/bitops/le.h
> +++ b/include/asm-generic/bitops/le.h
> @@ -49,7 +49,7 @@ extern unsigned long find_next_bit_le(const void *addr,
>  #error "Please fix <asm/byteorder.h>"
>  #endif
>  
> -static inline int test_bit_le(int nr, const void *addr)
> +static inline bool test_bit_le(int nr, const void *addr)
>  {
>  	return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
>  }
> @@ -74,22 +74,22 @@ static inline void __clear_bit_le(int nr, void *addr)
>  	__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
>  }
>  
> -static inline int test_and_set_bit_le(int nr, void *addr)
> +static inline bool test_and_set_bit_le(int nr, void *addr)
>  {
>  	return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
>  }
>  
> -static inline int test_and_clear_bit_le(int nr, void *addr)
> +static inline bool test_and_clear_bit_le(int nr, void *addr)
>  {
>  	return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
>  }
>  
> -static inline int __test_and_set_bit_le(int nr, void *addr)
> +static inline bool __test_and_set_bit_le(int nr, void *addr)
>  {
>  	return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
>  }
>  
> -static inline int __test_and_clear_bit_le(int nr, void *addr)
> +static inline bool __test_and_clear_bit_le(int nr, void *addr)
>  {
>  	return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
>  }
> diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
> index 697cc2b..fea2b40 100644
> --- a/include/asm-generic/bitops/non-atomic.h
> +++ b/include/asm-generic/bitops/non-atomic.h
> @@ -54,7 +54,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
>   * If two examples of this operation race, one can appear to succeed
>   * but actually fail.  You must protect multiple accesses with a lock.
>   */
> -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
> +static inline bool __test_and_set_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
> @@ -73,7 +73,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
>   * If two examples of this operation race, one can appear to succeed
>   * but actually fail.  You must protect multiple accesses with a lock.
>   */
> -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
> +static inline bool __test_and_clear_bit(int nr, volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
>  	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
> @@ -84,7 +84,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
>  }
>  
>  /* WARNING: non atomic and it can be reordered! */
> -static inline int __test_and_change_bit(int nr,
> +static inline bool __test_and_change_bit(int nr,
>  					    volatile unsigned long *addr)
>  {
>  	unsigned long mask = BIT_MASK(nr);
> @@ -100,7 +100,7 @@ static inline int __test_and_change_bit(int nr,
>   * @nr: bit number to test
>   * @addr: Address to start counting from
>   */
> -static inline int test_bit(int nr, const volatile unsigned long *addr)
> +static inline bool test_bit(int nr, const volatile unsigned long *addr)
>  {
>  	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
>  }
> -- 
> 1.9.3
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-28  5:39 [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions chengang
                   ` (3 preceding siblings ...)
  2016-08-29  8:45 ` Michal Hocko
@ 2016-08-29 13:03 ` Arnd Bergmann
  2016-08-29 13:46   ` Peter Zijlstra
                     ` (2 more replies)
  4 siblings, 3 replies; 19+ messages in thread
From: Arnd Bergmann @ 2016-08-29 13:03 UTC (permalink / raw)
  To: chengang
  Cc: akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim, hillf.zj,
	mgorman, mhocko, rientjes, linux-kernel, rth, ink, mattst88,
	vgupta, linux, catalin.marinas, will.deacon, hskinnemoen,
	egtvedt, realmz6, ysato, rkuo, tony.luck, fenghua.yu, geert,
	james.hogan, ralf, dhowells, deller, benh, paulus, mpe,
	schwidefsky, heiko.carstens, dalias, davem, cmetcalf, chris,
	jcmvbkbc, noamc, brueckner, mingo, peterz, linux-arch, Chen Gang

On Sunday 28 August 2016, chengang@emindsoft.com.cn wrote:
> From: Chen Gang <chengang@emindsoft.com.cn>
> 
> Also use the same changing to asm-generic, and also use bool variable
> instead of int variable for mips, mn10300, parisc and tile related
> functions, and also avoid checkpatch.pl to report ERROR.
> 
> Originally, except powerpc and xtensa, all another architectures intend
> to return 0 or 1. After this patch, also let powerpc and xtensa return 0
> or 1.
> 
> The patch passes cross building for mips and parisc with default config.
> All related contents are found by "grep test_bit, grep test_and" under
> arch sub-directory.
> 
> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>

This seems like a good idea overall, and I'm fine with the asm-generic
contents. If there is consensus on changing this, we probably also want
to do some other steps:

- Change the Documentation/atomic_ops.txt file accordingly
- split up the series per architecture (I don't think there are any
  interdependencies)
- For the architectures on which the definition changes (at least
  x86 and ARM), do some more sanity checks and see if there are
  noticeable changes in object code, and if so whether it looks
  better or worse (I'm guessing it will be better if anything)
- See which architectures can still get converted to using the
  asm-generic headers instead of providing their own, I think at
  least for the nonatomic ones, there are a couple.

	Arnd

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-29 13:03 ` Arnd Bergmann
@ 2016-08-29 13:46   ` Peter Zijlstra
  2016-08-29 13:54     ` Geert Uytterhoeven
  2016-08-29 16:48   ` Vineet Gupta
  2016-08-29 22:01   ` [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions Chen Gang
  2 siblings, 1 reply; 19+ messages in thread
From: Peter Zijlstra @ 2016-08-29 13:46 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: chengang, akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim,
	hillf.zj, mgorman, mhocko, rientjes, linux-kernel, rth, ink,
	mattst88, vgupta, linux, catalin.marinas, will.deacon,
	hskinnemoen, egtvedt, realmz6, ysato, rkuo, tony.luck,
	fenghua.yu, geert, james.hogan, ralf, dhowells, deller, benh,
	paulus, mpe, schwidefsky, heiko.carstens, dalias, davem,
	cmetcalf, chris, jcmvbkbc, noamc, brueckner, mingo, linux-arch,
	Chen Gang

On Mon, Aug 29, 2016 at 03:03:41PM +0200, Arnd Bergmann wrote:
> - Change the Documentation/atomic_ops.txt file accordingly

Not sure that really matters; that document is so out of date its nearly
useless :-(

Rewriting it is somewhere on the TODO list...

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-29 13:46   ` Peter Zijlstra
@ 2016-08-29 13:54     ` Geert Uytterhoeven
  0 siblings, 0 replies; 19+ messages in thread
From: Geert Uytterhoeven @ 2016-08-29 13:54 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Arnd Bergmann, chengang, Andrew Morton, Minchan Kim,
	Vlastimil Babka, gi-oh.kim, Joonsoo Kim, hillf.zj, Mel Gorman,
	Michal Hocko, David Rientjes, linux-kernel, Richard Henderson,
	Ivan Kokshaysky, Matt Turner, Vineet Gupta, Russell King,
	Catalin Marinas, Will Deacon, Håvard Skinnemoen,
	Hans-Christian Noren Egtvedt, Miao Steven, Yoshinori Sato,
	Richard Kuo, Tony Luck, Fenghua Yu, James Hogan, Ralf Baechle,
	David Howells, Helge Deller, Benjamin Herrenschmidt,
	Paul Mackerras, Michael Ellerman, Martin Schwidefsky,
	Heiko Carstens, Rich Felker, David S. Miller, cmetcalf,
	Chris Zankel, Max Filippov, Noam Camus, brueckner, Ingo Molnar,
	Linux-Arch, Chen Gang

On Mon, Aug 29, 2016 at 3:46 PM, Peter Zijlstra <peterz@infradead.org> wrote:
> On Mon, Aug 29, 2016 at 03:03:41PM +0200, Arnd Bergmann wrote:
>> - Change the Documentation/atomic_ops.txt file accordingly
>
> Not sure that really matters; that document is so out of date its nearly
> useless :-(
>
> Rewriting it is somewhere on the TODO list...

Resphinxing?

Gr{oetje,eeting}s,

                        Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-29 13:03 ` Arnd Bergmann
  2016-08-29 13:46   ` Peter Zijlstra
@ 2016-08-29 16:48   ` Vineet Gupta
  2016-08-29 21:49     ` Chen Gang
  2016-08-29 22:01   ` [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions Chen Gang
  2 siblings, 1 reply; 19+ messages in thread
From: Vineet Gupta @ 2016-08-29 16:48 UTC (permalink / raw)
  To: Arnd Bergmann, chengang
  Cc: akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim, hillf.zj,
	mgorman, mhocko, rientjes, linux-kernel, rth, ink, mattst88,
	linux, catalin.marinas, will.deacon, hskinnemoen, egtvedt,
	realmz6, ysato, rkuo, tony.luck, fenghua.yu, geert, james.hogan,
	ralf, dhowells, deller, benh, paulus, mpe, schwidefsky,
	heiko.carstens, dalias, davem

On 08/29/2016 06:03 AM, Arnd Bergmann wrote:
> On Sunday 28 August 2016, chengang@emindsoft.com.cn wrote:
>> From: Chen Gang <chengang@emindsoft.com.cn>
>>
>> Also use the same changing to asm-generic, and also use bool variable
>> instead of int variable for mips, mn10300, parisc and tile related
>> functions, and also avoid checkpatch.pl to report ERROR.
>>
>> Originally, except powerpc and xtensa, all another architectures intend
>> to return 0 or 1. After this patch, also let powerpc and xtensa return 0
>> or 1.
>>
>> The patch passes cross building for mips and parisc with default config.
>> All related contents are found by "grep test_bit, grep test_and" under
>> arch sub-directory.
>>
>> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
> 
> This seems like a good idea overall, and I'm fine with the asm-generic
> contents. If there is consensus on changing this, we probably also want
> to do some other steps:
> 
> - Change the Documentation/atomic_ops.txt file accordingly
> - split up the series per architecture (I don't think there are any
>   interdependencies)
> - For the architectures on which the definition changes (at least
>   x86 and ARM), do some more sanity checks and see if there are
>   noticeable changes in object code, and if so whether it looks
>   better or worse (I'm guessing it will be better if anything)

For ARC atleast, it will be slightly worse. As bool is promoted to int in various
expressions, gcc generates an additional EXTB (extend byte) instruction.

> - See which architectures can still get converted to using the
>   asm-generic headers instead of providing their own, I think at
>   least for the nonatomic ones, there are a couple.
> 
> 	Arnd
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-29 16:48   ` Vineet Gupta
@ 2016-08-29 21:49     ` Chen Gang
  2016-09-01 20:43       ` cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions) Al Viro
  0 siblings, 1 reply; 19+ messages in thread
From: Chen Gang @ 2016-08-29 21:49 UTC (permalink / raw)
  To: Vineet Gupta, Arnd Bergmann
  Cc: akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim, hillf.zj,
	mgorman, mhocko, rientjes, linux-kernel, rth, ink, mattst88,
	linux, catalin.marinas, will.deacon, hskinnemoen, egtvedt,
	realmz6, ysato, rkuo, tony.luck, fenghua.yu, geert, james.hogan,
	ralf, dhowells, deller, benh, paulus, mpe, schwidefsky,
	heiko.carstens, dalias, David S. Miller


On 8/30/16 00:48, Vineet Gupta wrote:
> On 08/29/2016 06:03 AM, Arnd Bergmann wrote:
>> On Sunday 28 August 2016, chengang@emindsoft.com.cn wrote:
>>> From: Chen Gang <chengang@emindsoft.com.cn>
>>>
>>> Also use the same changing to asm-generic, and also use bool variable
>>> instead of int variable for mips, mn10300, parisc and tile related
>>> functions, and also avoid checkpatch.pl to report ERROR.
>>>
>>> Originally, except powerpc and xtensa, all another architectures intend
>>> to return 0 or 1. After this patch, also let powerpc and xtensa return 0
>>> or 1.
>>>
>>> The patch passes cross building for mips and parisc with default config.
>>> All related contents are found by "grep test_bit, grep test_and" under
>>> arch sub-directory.
>>>
>>> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
>>
>> This seems like a good idea overall, and I'm fine with the asm-generic
>> contents. If there is consensus on changing this, we probably also want
>> to do some other steps:
>>
>> - Change the Documentation/atomic_ops.txt file accordingly
>> - split up the series per architecture (I don't think there are any
>>   interdependencies)
>> - For the architectures on which the definition changes (at least
>>   x86 and ARM), do some more sanity checks and see if there are
>>   noticeable changes in object code, and if so whether it looks
>>   better or worse (I'm guessing it will be better if anything)
> 
> For ARC atleast, it will be slightly worse. As bool is promoted to int in various
> expressions, gcc generates an additional EXTB (extend byte) instruction.
> 

Could you provide the related proof?

Or shall I try to analyze about it and get proof?

Thanks.
-- 
Chen Gang (陈刚)

Managing Natural Environments is the Duty of Human Beings.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions
  2016-08-29 13:03 ` Arnd Bergmann
  2016-08-29 13:46   ` Peter Zijlstra
  2016-08-29 16:48   ` Vineet Gupta
@ 2016-08-29 22:01   ` Chen Gang
  2 siblings, 0 replies; 19+ messages in thread
From: Chen Gang @ 2016-08-29 22:01 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim, hillf.zj,
	mgorman, mhocko, rientjes, linux-kernel, rth, ink, mattst88,
	vgupta, linux, catalin.marinas, will.deacon, hskinnemoen,
	egtvedt, realmz6, ysato, rkuo, tony.luck, fenghua.yu, geert,
	james.hogan, ralf, dhowells, deller, benh, paulus, mpe,
	schwidefsky, heiko.carstens, dalias, davem, cmetcalf, chris,
	jcmvbkbc, noamc, brueckner, mingo, peterz, linux-arch, Chen Gang


On 8/29/16 21:03, Arnd Bergmann wrote:
> On Sunday 28 August 2016, chengang@emindsoft.com.cn wrote:
>> From: Chen Gang <chengang@emindsoft.com.cn>
>>
>> Also use the same changing to asm-generic, and also use bool variable
>> instead of int variable for mips, mn10300, parisc and tile related
>> functions, and also avoid checkpatch.pl to report ERROR.
>>
>> Originally, except powerpc and xtensa, all another architectures intend
>> to return 0 or 1. After this patch, also let powerpc and xtensa return 0
>> or 1.
>>
>> The patch passes cross building for mips and parisc with default config.
>> All related contents are found by "grep test_bit, grep test_and" under
>> arch sub-directory.
>>
>> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
> 
> This seems like a good idea overall, and I'm fine with the asm-generic
> contents. If there is consensus on changing this, we probably also want
> to do some other steps:
> 
> - Change the Documentation/atomic_ops.txt file accordingly
> - split up the series per architecture (I don't think there are any
>   interdependencies)
> - For the architectures on which the definition changes (at least
>   x86 and ARM), do some more sanity checks and see if there are
>   noticeable changes in object code, and if so whether it looks
>   better or worse (I'm guessing it will be better if anything)
> - See which architectures can still get converted to using the
>   asm-generic headers instead of providing their own, I think at
>   least for the nonatomic ones, there are a couple.
> 

Thank you for your ideas, suggestions, and completions.

And I guess, at least for arc, I or another related members will try to
check the object code.


Thanks.
-- 
Chen Gang (陈刚)

Managing Natural Environments is the Duty of Human Beings.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions)
  2016-08-29 21:49     ` Chen Gang
@ 2016-09-01 20:43       ` Al Viro
  2016-09-02 23:33         ` Chen Gang
  0 siblings, 1 reply; 19+ messages in thread
From: Al Viro @ 2016-09-01 20:43 UTC (permalink / raw)
  To: Chen Gang
  Cc: Vineet Gupta, Arnd Bergmann, akpm, minchan, vbabka, gi-oh.kim,
	iamjoonsoo.kim, hillf.zj, mgorman, mhocko, rientjes,
	linux-kernel, rth, ink, mattst88, linux, catalin.marinas,
	will.deacon, hskinnemoen, egtvedt, realmz6, ysato, rkuo,
	tony.luck, fenghua.yu, geert, james.hogan, ralf, dhowells,
	deller, benh, paulus, mpe, schwidefsky, heiko.carstens, dalias,
	David S. Miller

On Tue, Aug 30, 2016 at 05:49:05AM +0800, Chen Gang wrote:

> Could you provide the related proof?
> 
> Or shall I try to analyze about it and get proof?

Can you show a proof that it actually improves anything?  He who proposes
a patch gets to defend it, not the other way round...

Al, bloody annoyed

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions)
  2016-09-01 20:43       ` cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions) Al Viro
@ 2016-09-02 23:33         ` Chen Gang
  2016-09-03  0:07           ` Vineet Gupta
  0 siblings, 1 reply; 19+ messages in thread
From: Chen Gang @ 2016-09-02 23:33 UTC (permalink / raw)
  To: Al Viro
  Cc: Vineet Gupta, Arnd Bergmann, akpm, minchan, vbabka, gi-oh.kim,
	iamjoonsoo.kim, hillf.zj, mgorman, mhocko, rientjes,
	linux-kernel, rth, ink, mattst88, linux, catalin.marinas,
	will.deacon, hskinnemoen, egtvedt, realmz6, ysato, rkuo,
	tony.luck, fenghua.yu, geert, james.hogan, ralf, dhowells,
	deller, benh, paulus, mpe, schwidefsky, heiko.carstens, dalias,
	David S. Miller


On 9/2/16 04:43, Al Viro wrote:
> On Tue, Aug 30, 2016 at 05:49:05AM +0800, Chen Gang wrote:
> 
>> Could you provide the related proof?
>>
>> Or shall I try to analyze about it and get proof?
> 
> Can you show a proof that it actually improves anything?  He who proposes
> a patch gets to defend it, not the other way round...
> 
> Al, bloody annoyed
> 

OK, what you said sounds reasonable to me.

It makes the code more readable since they are really pure Boolean
functions, and let the functions are precisely same in all archs. But
really, I shall try to prove that it has no negative effect.

e.g. for arc arch. now, I have built the arc raw compiler to build arc
kernel, but excuse me, I plan to finish proof next week, because during
these days, I have to work, buy house, and focus on my father's health.


Thanks.
-- 
Chen Gang (陈刚)

Managing Natural Environments is the Duty of Human Beings.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions)
  2016-09-02 23:33         ` Chen Gang
@ 2016-09-03  0:07           ` Vineet Gupta
  2016-09-03 22:36             ` Chen Gang
  0 siblings, 1 reply; 19+ messages in thread
From: Vineet Gupta @ 2016-09-03  0:07 UTC (permalink / raw)
  To: Chen Gang, Al Viro
  Cc: Arnd Bergmann, akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim,
	hillf.zj, mgorman, mhocko, rientjes, linux-kernel, rth, ink,
	mattst88, linux, catalin.marinas, will.deacon, hskinnemoen,
	egtvedt, realmz6, ysato, rkuo, tony.luck, fenghua.yu, geert,
	james.hogan, ralf, dhowells, deller, benh, paulus, mpe,
	schwidefsky, heiko.carstens, dalias, David S. Miller

On 09/02/2016 04:33 PM, Chen Gang wrote:
> On 9/2/16 04:43, Al Viro wrote:
>> > On Tue, Aug 30, 2016 at 05:49:05AM +0800, Chen Gang wrote:
>> > 
>>> >> Could you provide the related proof?
>>> >>
>>> >> Or shall I try to analyze about it and get proof?
>> > 
>> > Can you show a proof that it actually improves anything?  He who proposes
>> > a patch gets to defend it, not the other way round...
>> > 
>> > Al, bloody annoyed
>> > 
> OK, what you said sounds reasonable to me.
>
> It makes the code more readable since they are really pure Boolean
> functions, and let the functions are precisely same in all archs. But
> really, I shall try to prove that it has no negative effect.
>
> e.g. for arc arch. now, I have built the arc raw compiler to build arc
> kernel, but excuse me, I plan to finish proof next week, because during
> these days, I have to work, buy house, and focus on my father's health.

Since you seem to be have so much stuff to do I decided to help. I did a quick
compile of kernel with and w/o your changes

bloat-o-meter vmlinux-v4.8rc4-baseline vmlinux-v4.8rc4-bool-in-atomics
add/remove: 0/0 grow/shrink: 1/0 up/down: 6/0 (6)
function                                     old     new   delta
vermagic                                      49      55      +6
Total: Before=5967447, After=5967453, chg 0.000000%

I'm mildly surprised that there is no difference so yeah this change is fine as
far as I'm concerned.

-Vineet

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions)
  2016-09-03  0:07           ` Vineet Gupta
@ 2016-09-03 22:36             ` Chen Gang
  2016-09-04  1:01               ` Al Viro
  0 siblings, 1 reply; 19+ messages in thread
From: Chen Gang @ 2016-09-03 22:36 UTC (permalink / raw)
  To: Vineet Gupta, Al Viro
  Cc: Arnd Bergmann, akpm, minchan, vbabka, gi-oh.kim, iamjoonsoo.kim,
	hillf.zj, mgorman, mhocko, rientjes, linux-kernel, rth, ink,
	mattst88, linux, catalin.marinas, will.deacon, hskinnemoen,
	egtvedt, realmz6, ysato, rkuo, tony.luck, fenghua.yu, geert,
	james.hogan, ralf, dhowells, deller, benh, paulus, mpe,
	schwidefsky, heiko.carstens, dalias, David S. Miller


On 9/3/16 08:07, Vineet Gupta wrote:
> On 09/02/2016 04:33 PM, Chen Gang wrote:
>> On 9/2/16 04:43, Al Viro wrote:
>>>>
>>>> Can you show a proof that it actually improves anything?  He who proposes
>>>> a patch gets to defend it, not the other way round...
>>>>
>>>> Al, bloody annoyed
>>>>
>> OK, what you said sounds reasonable to me.
>>
>> It makes the code more readable since they are really pure Boolean
>> functions, and let the functions are precisely same in all archs. But
>> really, I shall try to prove that it has no negative effect.
>>
>> e.g. for arc arch. now, I have built the arc raw compiler to build arc
>> kernel, but excuse me, I plan to finish proof next week, because during
>> these days, I have to work, buy house, and focus on my father's health.
> 
> Since you seem to be have so much stuff to do I decided to help. I did a quick
> compile of kernel with and w/o your changes
> 
> bloat-o-meter vmlinux-v4.8rc4-baseline vmlinux-v4.8rc4-bool-in-atomics
> add/remove: 0/0 grow/shrink: 1/0 up/down: 6/0 (6)
> function                                     old     new   delta
> vermagic                                      49      55      +6
> Total: Before=5967447, After=5967453, chg 0.000000%
> 
> I'm mildly surprised that there is no difference so yeah this change is fine as
> far as I'm concerned.
> 

Thank you for your reply :-)


And for all: shall I provide the proof for another archs?

For me, Boolean gives additional chance to compiler to improve the code.
If the compiler can not improve the code, it can treat it as int simply.
So theoretically, at least, Boolean should not be worse than int.


Thanks.
-- 
Chen Gang (陈刚)

Managing Natural Environments is the Duty of Human Beings.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions)
  2016-09-03 22:36             ` Chen Gang
@ 2016-09-04  1:01               ` Al Viro
  2016-09-07 15:58                 ` Chen Gang
  2016-09-11 12:27                 ` Chen Gang
  0 siblings, 2 replies; 19+ messages in thread
From: Al Viro @ 2016-09-04  1:01 UTC (permalink / raw)
  To: Chen Gang
  Cc: Vineet Gupta, Arnd Bergmann, akpm, minchan, vbabka, gi-oh.kim,
	iamjoonsoo.kim, hillf.zj, mgorman, mhocko, rientjes,
	linux-kernel, rth, ink, mattst88, linux, catalin.marinas,
	will.deacon, hskinnemoen, egtvedt, realmz6, ysato, rkuo,
	tony.luck, fenghua.yu, geert, james.hogan, ralf, dhowells,
	deller, benh, paulus, mpe, schwidefsky, heiko.carstens, dalias,
	David S. Miller

On Sun, Sep 04, 2016 at 06:36:56AM +0800, Chen Gang wrote:

> And for all: shall I provide the proof for another archs?
> 
> For me, Boolean gives additional chance to compiler to improve the code.

Whereas for compiler it gives nothing.  Not in those cases.

> If the compiler can not improve the code, it can treat it as int simply.
> So theoretically, at least, Boolean should not be worse than int.

Except for pointless code churn and pandering to irrational beliefs, that is...
Please, RTFISO9899 and learn the semantics of _Bool; it's not that complicated.
Start with 6.2.5[2,6] and 6.3.1.2, then look through 6.8.4 and 6.8.5 to
figure out the semantics of conditions in if/while/for.  Note also 6.5.8,
6.5.9, 6.5.13 and 6.5.14 and observe that type of (x > 5 && y < 1) is *NOT* 
_Bool; it's int.

If you can show any improvement or loss in code generation in this case
(static inline int converted to static inline bool), I would really like to
see the details.  As in .config/file/function/gcc version/target architecture.
Optimizer bugs happens, but they should be reported when found, and I would
expect _Bool handling to be _less_ exercised than that of normal logical
expressions, so loss is probably more likely.  And yes, it also should be
reported.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions)
  2016-09-04  1:01               ` Al Viro
@ 2016-09-07 15:58                 ` Chen Gang
  2016-09-11 12:27                 ` Chen Gang
  1 sibling, 0 replies; 19+ messages in thread
From: Chen Gang @ 2016-09-07 15:58 UTC (permalink / raw)
  To: Al Viro
  Cc: Vineet Gupta, Arnd Bergmann, akpm, minchan, vbabka, gi-oh.kim,
	iamjoonsoo.kim, hillf.zj, mgorman, mhocko, rientjes,
	linux-kernel, rth, ink, mattst88, linux, catalin.marinas,
	will.deacon, hskinnemoen, egtvedt, realmz6, ysato, rkuo,
	tony.luck, fenghua.yu, geert, james.hogan, ralf, dhowells,
	deller, benh, paulus, mpe, schwidefsky, heiko.carstens, dalias,
	David S. Miller



On 9/4/16 09:01, Al Viro wrote:
> On Sun, Sep 04, 2016 at 06:36:56AM +0800, Chen Gang wrote:
> 
>> And for all: shall I provide the proof for another archs?
>>
>> For me, Boolean gives additional chance to compiler to improve the code.
> 
> Whereas for compiler it gives nothing.  Not in those cases.
> 
>> If the compiler can not improve the code, it can treat it as int simply.
>> So theoretically, at least, Boolean should not be worse than int.
> 
> Except for pointless code churn and pandering to irrational beliefs, that is...
> Please, RTFISO9899 and learn the semantics of _Bool; it's not that complicated.
> Start with 6.2.5[2,6] and 6.3.1.2, then look through 6.8.4 and 6.8.5 to
> figure out the semantics of conditions in if/while/for.  Note also 6.5.8,
> 6.5.9, 6.5.13 and 6.5.14 and observe that type of (x > 5 && y < 1) is *NOT* 
> _Bool; it's int.
> 
> If you can show any improvement or loss in code generation in this case
> (static inline int converted to static inline bool), I would really like to
> see the details.  As in .config/file/function/gcc version/target architecture.
> Optimizer bugs happens, but they should be reported when found, and I would
> expect _Bool handling to be _less_ exercised than that of normal logical
> expressions, so loss is probably more likely.  And yes, it also should be
> reported.
> 

Sorry for replying late, and excuse me, I did not read the details more.
During these days I have no enough time on it (working, buying house,
and catching a cold, but lucky enough that my father's health is OK).

I shall try to read the details and analyze it within next weekend (I
guess I can not finish within this week end, sorry again for I really
have no time during these days).

But all together, for me, I guess our discussion can not 'prevent' that
bool return value instead of int return value for pure bool function in
our kernel. :-)


Thanks.
-- 
Chen Gang (陈刚)

Managing Natural Environments is the Duty of Human Beings.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions)
  2016-09-04  1:01               ` Al Viro
  2016-09-07 15:58                 ` Chen Gang
@ 2016-09-11 12:27                 ` Chen Gang
  1 sibling, 0 replies; 19+ messages in thread
From: Chen Gang @ 2016-09-11 12:27 UTC (permalink / raw)
  To: Al Viro
  Cc: Vineet Gupta, Arnd Bergmann, akpm, minchan, vbabka, gi-oh.kim,
	iamjoonsoo.kim, hillf.zj, mgorman, mhocko, rientjes,
	linux-kernel, rth, ink, mattst88, linux, catalin.marinas,
	will.deacon, hskinnemoen, egtvedt, realmz6, ysato, rkuo,
	tony.luck, fenghua.yu, geert, james.hogan, ralf, dhowells,
	deller, benh, paulus, mpe, schwidefsky, heiko.carstens, dalias,
	David S. Miller



On 9/4/16 09:01, Al Viro wrote:
> On Sun, Sep 04, 2016 at 06:36:56AM +0800, Chen Gang wrote:
> 
>> And for all: shall I provide the proof for another archs?
>>
>> For me, Boolean gives additional chance to compiler to improve the code.
> 
> Whereas for compiler it gives nothing.  Not in those cases.
> 
>> If the compiler can not improve the code, it can treat it as int simply.
>> So theoretically, at least, Boolean should not be worse than int.
> 
> Except for pointless code churn and pandering to irrational beliefs, that is...

For me, it is not quite suitable to get conclusion during discussing.

> Please, RTFISO9899 and learn the semantics of _Bool; it's not that complicated.
> Start with 6.2.5[2,6] and 6.3.1.2, then look through 6.8.4 and 6.8.5 to
> figure out the semantics of conditions in if/while/for.  Note also 6.5.8,
> 6.5.9, 6.5.13 and 6.5.14 and observe that type of (x > 5 && y < 1) is *NOT* 
> _Bool; it's int.
>

Yes, what you said above is true to me. But for (x > 5 && y < 1) will
return 1 for true and 0 for false, although its' type is still int. So
for compiler, it can simply treat it as Boolean type internally.

For my original saying, I assume 2 things (excuse me, I did not mention
originally):

 - I assume what I said is for pure Boolean functions, in our case, all
   functions intend to return 'Boolean' value (0 or 1) in most of archs,
   although they use int type as return value.

 - What I said is for compiler's optimization at middle language level
   and at instruction level (internal processing), not for language
   definition (interface for outside -- for C developers).

For me, one way is: in middle language level, bool can be treated as int
or long to be processed firstly, then in instruction level, the compiler
performs additional optimization and qualification for bool.

 - So the compiler has additional chance for optimizing in instruction
   level.

 - Since for pure Boolean function, it is already sure that related int
   value must be 0 or 1, and the compiler should be smart enough to know
   that, so the output code need not additional qualifications.

 - So, for me, theoretically, bool is equal or better than int for pure
   bool functions, unless the compiler has performance bugs.

> If you can show any improvement or loss in code generation in this case
> (static inline int converted to static inline bool), I would really like to
> see the details.  As in .config/file/function/gcc version/target architecture.
> Optimizer bugs happens, but they should be reported when found, and I would
> expect _Bool handling to be _less_ exercised than that of normal logical
> expressions, so loss is probably more likely.  And yes, it also should be
> reported.
> 

At least for x86_64 arch, as far as I know, I can find the case which
bool is better than int, but I can not find the case which worse than
int.

Thanks.
-- 
Chen Gang (陈刚)

Managing Natural Environments is the Duty of Human Beings.

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2016-09-11 12:26 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-28  5:39 [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions chengang
2016-08-28  7:02 ` kbuild test robot
2016-08-28  7:10 ` kbuild test robot
2016-08-28  7:10 ` kbuild test robot
2016-08-28 14:54   ` Chen Gang
2016-08-29  8:45 ` Michal Hocko
2016-08-29 13:03 ` Arnd Bergmann
2016-08-29 13:46   ` Peter Zijlstra
2016-08-29 13:54     ` Geert Uytterhoeven
2016-08-29 16:48   ` Vineet Gupta
2016-08-29 21:49     ` Chen Gang
2016-09-01 20:43       ` cmsg newgroup alt.sex.fetish.bool (was Re: [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions) Al Viro
2016-09-02 23:33         ` Chen Gang
2016-09-03  0:07           ` Vineet Gupta
2016-09-03 22:36             ` Chen Gang
2016-09-04  1:01               ` Al Viro
2016-09-07 15:58                 ` Chen Gang
2016-09-11 12:27                 ` Chen Gang
2016-08-29 22:01   ` [PATCH] arch: all: include: asm: bitops: Use bool instead of int for all bit test functions Chen Gang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).