All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/5] rtth: Radix Tree Test Harness fixes
@ 2011-12-17  7:51 Hugh Dickins
  2011-12-17  7:53 ` [PATCH 1/5] rtth: fix Segmentation fault Hugh Dickins
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Hugh Dickins @ 2011-12-17  7:51 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Randy Dunlap, Naveen Yadav, linux-kernel

Several months ago I promised Randy that I would update the
Radix Tree Test Harness rtth, to reflect recent kernel changes.
That fell off my radar for a while, but I've another little
change coming up (taking the radix_tree_path off the stack),
and really needed a working rtth to validate that change.
Here's a set of fixes to rtth; then when I've written that up
(hopefully tomorrow), I'll send a kernel patch and a matching
rtth patch.

Hugh

 Makefile                  |    2 
 linux.c                   |    5 -
 linux/bitops.h            |  122 ++++++++++++++++++++++++++++----
 linux/bitops/__ffs.h      |   43 -----------
 linux/bitops/ffs.h        |   41 ----------
 linux/bitops/ffz.h        |   12 ---
 linux/bitops/find.h       |   13 ---
 linux/bitops/fls.h        |   41 ----------
 linux/bitops/fls64.h      |   14 ---
 linux/bitops/hweight.h    |   11 --
 linux/bitops/le.h         |   53 -------------
 linux/bitops/non-atomic.h |  111 -----------------------------
 linux/notifier.h          |    4 +
 linux/radix-tree.h        |   58 +++++++++++++--
 linux/rcupdate.h          |   15 ---
 linux/types.h             |    9 +-
 main.c                    |    7 +
 radix-tree.c              |  137 ++++++++++++++++++++++++++++++------
 rcupdate.c                |   86 ----------------------
 regression1.c             |   24 ++++--
 test.c                    |    2 
 test.h                    |   26 ++++--
 22 files changed, 329 insertions(+), 507 deletions(-)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/5] rtth: fix Segmentation fault
  2011-12-17  7:51 [PATCH 0/5] rtth: Radix Tree Test Harness fixes Hugh Dickins
@ 2011-12-17  7:53 ` Hugh Dickins
  2011-12-17  7:55 ` [PATCH 2/5] rtth: fix 32-bit build Hugh Dickins
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Hugh Dickins @ 2011-12-17  7:53 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Randy Dunlap, Naveen Yadav, linux-kernel

rtth radix_tree source was updated but never tested after the update??
Currently gives Segmentation fault after big_gang_check() because we
forget to remove the low bitflag from the node address in rnode:
for which we need indirect_to_ptr() not radix_tree_deref_slot().

Signed-off-by: Hugh Dickins <hughd@google.com>
---

 test.c |    2 +-
 test.h |    5 +++++
 2 files changed, 6 insertions(+), 1 deletion(-)

--- rtth0/test.c	2010-11-10 16:35:29.000000000 -0800
+++ rtth1/test.c	2011-12-16 18:44:02.475897094 -0800
@@ -184,7 +184,7 @@ void verify_tag_consistency(struct radix
 {
 	if (!root->height)
 		return;
-	verify_node(radix_tree_deref_slot((void **)&root->rnode),
+	verify_node(indirect_to_ptr(root->rnode),
 			tag, root->height, !!root_tag_get(root, tag));
 }
 
--- rtth0/test.h	2010-08-25 13:30:45.000000000 -0700
+++ rtth1/test.h	2011-12-16 18:44:02.475897094 -0800
@@ -18,6 +18,11 @@ struct radix_tree_node {
         unsigned long   tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
+static inline void *indirect_to_ptr(void *ptr)
+{
+	return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
+}
+
 unsigned long radix_tree_maxindex(unsigned int height);
 int root_tag_get(struct radix_tree_root *root, unsigned int tag);
 /* Upto here */

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 2/5] rtth: fix 32-bit build
  2011-12-17  7:51 [PATCH 0/5] rtth: Radix Tree Test Harness fixes Hugh Dickins
  2011-12-17  7:53 ` [PATCH 1/5] rtth: fix Segmentation fault Hugh Dickins
@ 2011-12-17  7:55 ` Hugh Dickins
  2011-12-17  7:56 ` [PATCH 3/5] rtth: copy current radix_tree source Hugh Dickins
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Hugh Dickins @ 2011-12-17  7:55 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Randy Dunlap, Naveen Yadav, linux-kernel

rtth has BITS_PER_LONG 64 hard-coded, and so quickly crashes with
Segmentation fault when built 32-bit: fix to depend on sizeof(long).

But then what should we do about the #ifdef BITS_PER_LONG == 64
in linux/bitops/__ffs.h, and the "right shift count >= width of type"
warning we still get if it's a runtime check?  Oh, delete all of that,
the only thing we need is non-atomic.h, which might as well be copied
directly to bitops.h.

Signed-off-by: Hugh Dickins <hughd@google.com>
---

 linux/bitops.h            |  122 +++++++++++++++++++++++++++++++-----
 linux/bitops/__ffs.h      |   43 ------------
 linux/bitops/ffs.h        |   41 ------------
 linux/bitops/ffz.h        |   12 ---
 linux/bitops/find.h       |   13 ---
 linux/bitops/fls.h        |   41 ------------
 linux/bitops/fls64.h      |   14 ----
 linux/bitops/hweight.h    |   11 ---
 linux/bitops/le.h         |   53 ---------------
 linux/bitops/non-atomic.h |  111 --------------------------------
 linux/types.h             |    2 
 11 files changed, 108 insertions(+), 355 deletions(-)

--- rtth1/linux/bitops/__ffs.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/__ffs.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,43 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS___FFS_H_
-#define _ASM_GENERIC_BITOPS___FFS_H_
-
-#include <asm/types.h>
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
-	int num = 0;
-
-#if BITS_PER_LONG == 64
-	if ((word & 0xffffffff) == 0) {
-		num += 32;
-		word >>= 32;
-	}
-#endif
-	if ((word & 0xffff) == 0) {
-		num += 16;
-		word >>= 16;
-	}
-	if ((word & 0xff) == 0) {
-		num += 8;
-		word >>= 8;
-	}
-	if ((word & 0xf) == 0) {
-		num += 4;
-		word >>= 4;
-	}
-	if ((word & 0x3) == 0) {
-		num += 2;
-		word >>= 2;
-	}
-	if ((word & 0x1) == 0)
-		num += 1;
-	return num;
-}
-
-#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
--- rtth1/linux/bitops/ffs.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/ffs.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,41 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_FFS_H_
-#define _ASM_GENERIC_BITOPS_FFS_H_
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static inline int ffs(int x)
-{
-	int r = 1;
-
-	if (!x)
-		return 0;
-	if (!(x & 0xffff)) {
-		x >>= 16;
-		r += 16;
-	}
-	if (!(x & 0xff)) {
-		x >>= 8;
-		r += 8;
-	}
-	if (!(x & 0xf)) {
-		x >>= 4;
-		r += 4;
-	}
-	if (!(x & 3)) {
-		x >>= 2;
-		r += 2;
-	}
-	if (!(x & 1)) {
-		x >>= 1;
-		r += 1;
-	}
-	return r;
-}
-
-#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
--- rtth1/linux/bitops/ffz.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/ffz.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,12 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
-#define _ASM_GENERIC_BITOPS_FFZ_H_
-
-/*
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-#define ffz(x)  __ffs(~(x))
-
-#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
--- rtth1/linux/bitops/find.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/find.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,13 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_FIND_H_
-#define _ASM_GENERIC_BITOPS_FIND_H_
-
-extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
-		size, unsigned long offset);
-
-extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
-		long size, unsigned long offset);
-
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
-#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
-
-#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
--- rtth1/linux/bitops/fls.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/fls.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,41 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_FLS_H_
-#define _ASM_GENERIC_BITOPS_FLS_H_
-
-/**
- * fls - find last (most-significant) bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-
-static inline int fls(int x)
-{
-	int r = 32;
-
-	if (!x)
-		return 0;
-	if (!(x & 0xffff0000u)) {
-		x <<= 16;
-		r -= 16;
-	}
-	if (!(x & 0xff000000u)) {
-		x <<= 8;
-		r -= 8;
-	}
-	if (!(x & 0xf0000000u)) {
-		x <<= 4;
-		r -= 4;
-	}
-	if (!(x & 0xc0000000u)) {
-		x <<= 2;
-		r -= 2;
-	}
-	if (!(x & 0x80000000u)) {
-		x <<= 1;
-		r -= 1;
-	}
-	return r;
-}
-
-#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
--- rtth1/linux/bitops/fls64.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/fls64.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,14 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
-#define _ASM_GENERIC_BITOPS_FLS64_H_
-
-#include <asm/types.h>
-
-static inline int fls64(__u64 x)
-{
-	__u32 h = x >> 32;
-	if (h)
-		return fls(h) + 32;
-	return fls(x);
-}
-
-#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
--- rtth1/linux/bitops/hweight.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/hweight.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,11 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
-#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
-
-#include <asm/types.h>
-
-extern unsigned int hweight32(unsigned int w);
-extern unsigned int hweight16(unsigned int w);
-extern unsigned int hweight8(unsigned int w);
-extern unsigned long hweight64(__u64 w);
-
-#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
--- rtth1/linux/bitops/le.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/le.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,53 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_LE_H_
-#define _ASM_GENERIC_BITOPS_LE_H_
-
-#include <asm/types.h>
-#include <asm/byteorder.h>
-
-#define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
-#define BITOP_LE_SWIZZLE	((BITS_PER_LONG-1) & ~0x7)
-
-#if defined(__LITTLE_ENDIAN)
-
-#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
-#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
-#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
-
-#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
-#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
-
-#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
-#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
-
-#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
-
-#elif defined(__BIG_ENDIAN)
-
-#define generic_test_le_bit(nr, addr) \
-	test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___set_le_bit(nr, addr) \
-	__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___clear_le_bit(nr, addr) \
-	__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define generic_test_and_set_le_bit(nr, addr) \
-	test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic_test_and_clear_le_bit(nr, addr) \
-	test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define generic___test_and_set_le_bit(nr, addr) \
-	__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___test_and_clear_le_bit(nr, addr) \
-	__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
-		unsigned long size, unsigned long offset);
-
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-
-#define generic_find_first_zero_le_bit(addr, size) \
-        generic_find_next_zero_le_bit((addr), (size), 0)
-
-#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
--- rtth1/linux/bitops/non-atomic.h	2006-04-22 01:40:19.000000000 -0700
+++ rtth2/linux/bitops/non-atomic.h	1969-12-31 16:00:00.000000000 -0800
@@ -1,111 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-
-#include <asm/types.h>
-
-#define BITOP_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
-#define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
-
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __set_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
-	*p  |= mask;
-}
-
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
-	*p &= ~mask;
-}
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __change_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
-	*p ^= mask;
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-	unsigned long old = *p;
-
-	*p = old | mask;
-	return (old & mask) != 0;
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-	unsigned long old = *p;
-
-	*p = old & ~mask;
-	return (old & mask) != 0;
-}
-
-/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
-					    volatile unsigned long *addr)
-{
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-	unsigned long old = *p;
-
-	*p = old ^ mask;
-	return (old & mask) != 0;
-}
-
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
-{
-	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
-
-#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
--- rtth1/linux/bitops.h	2006-04-22 01:39:48.000000000 -0700
+++ rtth2/linux/bitops.h	2011-12-16 18:44:03.515896619 -0800
@@ -1,19 +1,111 @@
-#ifndef _ASM_GENERIC_BITOPS_H_
-#define _ASM_GENERIC_BITOPS_H_
+#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
 
-/*
- * For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents.  You should
- * recode these in the native assembly language, if at all possible.
- * 
- * C language equivalents written by Theodore Ts'o, 9/26/92
+#include <asm/types.h>
+
+#define BITOP_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
+#define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
  */
+static inline void __set_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BITOP_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+	*p  |= mask;
+}
+
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BITOP_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+	*p &= ~mask;
+}
 
-#include <linux/bitops/non-atomic.h>
-#include <linux/bitops/__ffs.h>
-#include <linux/bitops/ffz.h>
-#include <linux/bitops/fls.h>
-#include <linux/bitops/fls64.h>
-#include <linux/bitops/find.h>
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __change_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BITOP_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+	*p ^= mask;
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BITOP_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long old = *p;
+
+	*p = old | mask;
+	return (old & mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BITOP_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long old = *p;
+
+	*p = old & ~mask;
+	return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static inline int __test_and_change_bit(int nr,
+					    volatile unsigned long *addr)
+{
+	unsigned long mask = BITOP_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long old = *p;
+
+	*p = old ^ mask;
+	return (old & mask) != 0;
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit(int nr, const volatile unsigned long *addr)
+{
+	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
 
-#endif /* _ASM_GENERIC_BITOPS_H */
+#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
--- rtth1/linux/types.h	2010-08-25 13:30:45.000000000 -0700
+++ rtth2/linux/types.h	2011-12-16 18:44:03.515896619 -0800
@@ -4,7 +4,7 @@
 #define __nocast
 #define __read_mostly
 
-#define BITS_PER_LONG 64
+#define BITS_PER_LONG (sizeof(long) * 8)
 
 typedef unsigned __nocast gfp_t;
 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 3/5] rtth: copy current radix_tree source
  2011-12-17  7:51 [PATCH 0/5] rtth: Radix Tree Test Harness fixes Hugh Dickins
  2011-12-17  7:53 ` [PATCH 1/5] rtth: fix Segmentation fault Hugh Dickins
  2011-12-17  7:55 ` [PATCH 2/5] rtth: fix 32-bit build Hugh Dickins
@ 2011-12-17  7:56 ` Hugh Dickins
  2011-12-17  7:57 ` [PATCH 4/5] rtth: maintain nr_allocated atomically Hugh Dickins
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Hugh Dickins @ 2011-12-17  7:56 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Randy Dunlap, Naveen Yadav, linux-kernel

Update rtth's radix-tree.c and radix-tree.h with versions from 3.2 kernel
(omitting radix_tree_indirect_to_ptr() which will vanish soon after).
As before, just a very few differences for the build here, with more
hacks in types.h and now notifier.h to minimize those.

Similarly update regression1.c's find_get_pages() loop to match
that in mm/filemap.c (but we shouldn't see any exceptional entries),
and test.h's definitions and radix_tree_node (mostly just whitespace).

Signed-off-by: Hugh Dickins <hughd@google.com>
---

 linux/notifier.h   |    4 +
 linux/radix-tree.h |   58 ++++++++++++++++--
 linux/types.h      |    7 +-
 radix-tree.c       |  137 ++++++++++++++++++++++++++++++++++++-------
 regression1.c      |   20 ++++--
 test.h             |   21 +++---
 6 files changed, 204 insertions(+), 43 deletions(-)

--- rtth2/linux/notifier.h	1969-12-31 16:00:00.000000000 -0800
+++ rtth3/linux/notifier.h	2011-12-16 18:44:04.551896997 -0800
@@ -0,0 +1,4 @@
+#ifndef _NOTIFIER_H
+#define _NOTIFIER_H
+
+#endif
--- rtth2/linux/radix-tree.h	2010-11-10 16:35:29.000000000 -0800
+++ rtth3/linux/radix-tree.h	2011-12-16 18:44:04.551896997 -0800
@@ -24,7 +24,6 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/rcupdate.h>
-#include <linux/gfp.h>
 
 /*
  * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
@@ -40,7 +39,15 @@
  * when it is shrunk, before we rcu free the node. See shrink code for
  * details.
  */
-#define RADIX_TREE_INDIRECT_PTR	1
+#define RADIX_TREE_INDIRECT_PTR		1
+/*
+ * A common use of the radix tree is to store pointers to struct pages;
+ * but shmem/tmpfs needs also to store swap entries in the same tree:
+ * those are marked as exceptional entries to distinguish them.
+ * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
+ */
+#define RADIX_TREE_EXCEPTIONAL_ENTRY	2
+#define RADIX_TREE_EXCEPTIONAL_SHIFT	2
 
 static inline int radix_tree_is_indirect_ptr(void *ptr)
 {
@@ -55,7 +62,7 @@ static inline int radix_tree_is_indirect
 struct radix_tree_root {
 	unsigned int		height;
 	gfp_t			gfp_mask;
-	struct radix_tree_node	*rnode;
+	struct radix_tree_node	__rcu *rnode;
 };
 
 #define RADIX_TREE_INIT(mask)	{					\
@@ -144,6 +151,24 @@ static inline void *radix_tree_deref_slo
 }
 
 /**
+ * radix_tree_deref_slot_protected	- dereference a slot without RCU lock but with tree lock held
+ * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
+ * Returns:	item that was stored in that slot with any direct pointer flag
+ *		removed.
+ *
+ * Similar to radix_tree_deref_slot but only used during migration when a pages
+ * mapping is being moved. The caller does not hold the RCU read lock but it
+ * must hold the tree lock to prevent parallel updates.
+ */
+#if 0
+static inline void *radix_tree_deref_slot_protected(void **pslot,
+							spinlock_t *treelock)
+{
+	return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
+}
+#endif
+
+/**
  * radix_tree_deref_retry	- check radix_tree_deref_slot
  * @arg:	pointer returned by radix_tree_deref_slot
  * Returns:	0 if retry is not required, otherwise retry is required
@@ -156,6 +181,28 @@ static inline int radix_tree_deref_retry
 }
 
 /**
+ * radix_tree_exceptional_entry	- radix_tree_deref_slot gave exceptional entry?
+ * @arg:	value returned by radix_tree_deref_slot
+ * Returns:	0 if well-aligned pointer, non-0 if exceptional entry.
+ */
+static inline int radix_tree_exceptional_entry(void *arg)
+{
+	/* Not unlikely because radix_tree_exception often tested first */
+	return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
+}
+
+/**
+ * radix_tree_exception	- radix_tree_deref_slot returned either exception?
+ * @arg:	value returned by radix_tree_deref_slot
+ * Returns:	0 if well-aligned pointer, non-0 if either kind of exception.
+ */
+static inline int radix_tree_exception(void *arg)
+{
+	return unlikely((unsigned long)arg &
+		(RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY));
+}
+
+/**
  * radix_tree_replace_slot	- replace item in a slot
  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
  * @item:	new item to store in the slot.
@@ -176,8 +223,8 @@ void *radix_tree_delete(struct radix_tre
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
 			unsigned long first_index, unsigned int max_items);
-unsigned int
-radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
+			void ***results, unsigned long *indices,
 			unsigned long first_index, unsigned int max_items);
 unsigned long radix_tree_next_hole(struct radix_tree_root *root,
 				unsigned long index, unsigned long max_scan);
@@ -204,6 +251,7 @@ unsigned long radix_tree_range_tag_if_ta
 		unsigned long nr_to_tag,
 		unsigned int fromtag, unsigned int totag);
 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
+unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
 
 static inline void radix_tree_preload_end(void)
 {
--- rtth2/linux/types.h	2011-12-16 18:44:03.515896619 -0800
+++ rtth3/linux/types.h	2011-12-16 18:44:04.551896997 -0800
@@ -1,11 +1,14 @@
 #ifndef _TYPES_H
 #define _TYPES_H
 
-#define __nocast
+#define __rcu
 #define __read_mostly
 
 #define BITS_PER_LONG (sizeof(long) * 8)
 
-typedef unsigned __nocast gfp_t;
+#define uninitialized_var(x) x = x
+
+typedef unsigned gfp_t;
+#include <linux/gfp.h>
 
 #endif
--- rtth2/radix-tree.c	2011-01-24 22:15:32.000000000 -0800
+++ rtth3/radix-tree.c	2011-12-16 18:44:04.551896997 -0800
@@ -26,7 +26,7 @@
 #include <linux/radix-tree.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
-/* #include <linux/notifier.h> */
+#include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
@@ -49,7 +49,7 @@ struct radix_tree_node {
 	unsigned int	height;		/* Height from the bottom */
 	unsigned int	count;
 	struct rcu_head	rcu_head;
-	void		*slots[RADIX_TREE_MAP_SIZE];
+	void __rcu	*slots[RADIX_TREE_MAP_SIZE];
 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
@@ -576,7 +576,6 @@ int radix_tree_tag_get(struct radix_tree
 {
 	unsigned int height, shift;
 	struct radix_tree_node *node;
-	int saw_unset_tag = 0;
 
 	/* check the root's tag bit */
 	if (!root_tag_get(root, tag))
@@ -603,15 +602,10 @@ int radix_tree_tag_get(struct radix_tree
 			return 0;
 
 		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
-
-		/*
-		 * This is just a debug check.  Later, we can bale as soon as
-		 * we see an unset tag.
-		 */
 		if (!tag_get(node, tag, offset))
-			saw_unset_tag = 1;
+			return 0;
 		if (height == 1)
-			return !!tag_get(node, tag, offset);
+			return 1;
 		node = rcu_dereference_raw(node->slots[offset]);
 		shift -= RADIX_TREE_MAP_SHIFT;
 		height--;
@@ -823,8 +817,8 @@ unsigned long radix_tree_prev_hole(struc
 EXPORT_SYMBOL(radix_tree_prev_hole);
 
 static unsigned int
-__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
-	unsigned int max_items, unsigned long *next_index)
+__lookup(struct radix_tree_node *slot, void ***results, unsigned long *indices,
+	unsigned long index, unsigned int max_items, unsigned long *next_index)
 {
 	unsigned int nr_found = 0;
 	unsigned int shift, height;
@@ -857,12 +851,16 @@ __lookup(struct radix_tree_node *slot, v
 
 	/* Bottom level: grab some items */
 	for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
-		index++;
 		if (slot->slots[i]) {
-			results[nr_found++] = &(slot->slots[i]);
-			if (nr_found == max_items)
+			results[nr_found] = &(slot->slots[i]);
+			if (indices)
+				indices[nr_found] = index;
+			if (++nr_found == max_items) {
+				index++;
 				goto out;
+			}
 		}
+		index++;
 	}
 out:
 	*next_index = index;
@@ -918,8 +916,8 @@ radix_tree_gang_lookup(struct radix_tree
 
 		if (cur_index > max_index)
 			break;
-		slots_found = __lookup(node, (void ***)results + ret, cur_index,
-					max_items - ret, &next_index);
+		slots_found = __lookup(node, (void ***)results + ret, NULL,
+				cur_index, max_items - ret, &next_index);
 		nr_found = 0;
 		for (i = 0; i < slots_found; i++) {
 			struct radix_tree_node *slot;
@@ -944,6 +942,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
  *	radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
  *	@root:		radix tree root
  *	@results:	where the results of the lookup are placed
+ *	@indices:	where their indices should be placed (but usually NULL)
  *	@first_index:	start the lookup from this key
  *	@max_items:	place up to this many items at *results
  *
@@ -958,7 +957,8 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
  *	protection, radix_tree_deref_slot may fail requiring a retry.
  */
 unsigned int
-radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+radix_tree_gang_lookup_slot(struct radix_tree_root *root,
+			void ***results, unsigned long *indices,
 			unsigned long first_index, unsigned int max_items)
 {
 	unsigned long max_index;
@@ -974,6 +974,8 @@ radix_tree_gang_lookup_slot(struct radix
 		if (first_index > 0)
 			return 0;
 		results[0] = (void **)&root->rnode;
+		if (indices)
+			indices[0] = 0;
 		return 1;
 	}
 	node = indirect_to_ptr(node);
@@ -987,8 +989,9 @@ radix_tree_gang_lookup_slot(struct radix
 
 		if (cur_index > max_index)
 			break;
-		slots_found = __lookup(node, results + ret, cur_index,
-					max_items - ret, &next_index);
+		slots_found = __lookup(node, results + ret,
+				indices ? indices + ret : NULL,
+				cur_index, max_items - ret, &next_index);
 		ret += slots_found;
 		if (next_index == 0)
 			break;
@@ -1194,6 +1197,98 @@ radix_tree_gang_lookup_tag_slot(struct r
 }
 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
 
+#if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
+#include <linux/sched.h> /* for cond_resched() */
+
+/*
+ * This linear search is at present only useful to shmem_unuse_inode().
+ */
+static unsigned long __locate(struct radix_tree_node *slot, void *item,
+			      unsigned long index, unsigned long *found_index)
+{
+	unsigned int shift, height;
+	unsigned long i;
+
+	height = slot->height;
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+
+	for ( ; height > 1; height--) {
+		i = (index >> shift) & RADIX_TREE_MAP_MASK;
+		for (;;) {
+			if (slot->slots[i] != NULL)
+				break;
+			index &= ~((1UL << shift) - 1);
+			index += 1UL << shift;
+			if (index == 0)
+				goto out;	/* 32-bit wraparound */
+			i++;
+			if (i == RADIX_TREE_MAP_SIZE)
+				goto out;
+		}
+
+		shift -= RADIX_TREE_MAP_SHIFT;
+		slot = rcu_dereference_raw(slot->slots[i]);
+		if (slot == NULL)
+			goto out;
+	}
+
+	/* Bottom level: check items */
+	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
+		if (slot->slots[i] == item) {
+			*found_index = index + i;
+			index = 0;
+			goto out;
+		}
+	}
+	index += RADIX_TREE_MAP_SIZE;
+out:
+	return index;
+}
+
+/**
+ *	radix_tree_locate_item - search through radix tree for item
+ *	@root:		radix tree root
+ *	@item:		item to be found
+ *
+ *	Returns index where item was found, or -1 if not found.
+ *	Caller must hold no lock (since this time-consuming function needs
+ *	to be preemptible), and must check afterwards if item is still there.
+ */
+unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
+{
+	struct radix_tree_node *node;
+	unsigned long max_index;
+	unsigned long cur_index = 0;
+	unsigned long found_index = -1;
+
+	do {
+		rcu_read_lock();
+		node = rcu_dereference_raw(root->rnode);
+		if (!radix_tree_is_indirect_ptr(node)) {
+			rcu_read_unlock();
+			if (node == item)
+				found_index = 0;
+			break;
+		}
+
+		node = indirect_to_ptr(node);
+		max_index = radix_tree_maxindex(node->height);
+		if (cur_index > max_index)
+			break;
+
+		cur_index = __locate(node, item, cur_index, &found_index);
+		rcu_read_unlock();
+		cond_resched();
+	} while (cur_index != 0 && cur_index <= max_index);
+
+	return found_index;
+}
+#else
+unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
+{
+	return -1;
+}
+#endif /* CONFIG_SHMEM && CONFIG_SWAP */
 
 /**
  *	radix_tree_shrink    -    shrink height of a radix tree to minimal
@@ -1418,5 +1513,5 @@ void __init radix_tree_init(void)
 			SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
 			radix_tree_node_ctor);
 	radix_tree_init_maxindex();
-	/* hotcpu_notifier(radix_tree_callback, 0); */
+	hotcpu_notifier(radix_tree_callback, 0);
 }
--- rtth2/regression1.c	2010-11-10 16:35:48.000000000 -0800
+++ rtth3/regression1.c	2011-12-16 18:44:04.551896997 -0800
@@ -86,7 +86,7 @@ static unsigned find_get_pages(unsigned
 	rcu_read_lock();
 restart:
 	nr_found = radix_tree_gang_lookup_slot(&mt_tree,
-				(void ***)pages, start, nr_pages);
+				(void ***)pages, NULL, start, nr_pages);
 	ret = 0;
 	for (i = 0; i < nr_found; i++) {
 		struct page *page;
@@ -95,10 +95,20 @@ repeat:
 		if (unlikely(!page))
 			continue;
 
-		if (radix_tree_deref_retry(page)) {
-			if (ret)
-				start = pages[ret-1]->index;
-			goto restart;
+		if (radix_tree_exception(page)) {
+			if (radix_tree_deref_retry(page)) {
+				/*
+				 * Transient condition which can only trigger
+				 * when entry at index 0 moves out of or back
+				 * to root: none yet gotten, safe to restart.
+				 */
+				assert((start | i) == 0);
+				goto restart;
+			}
+			/*
+			 * No exceptional entries are inserted in this test.
+			 */
+			assert(0);
 		}
 
 		pthread_mutex_lock(&page->lock);
--- rtth2/test.h	2011-12-16 18:44:02.475897094 -0800
+++ rtth3/test.h	2011-12-16 18:44:04.551896997 -0800
@@ -4,18 +4,19 @@
 #include <linux/rcupdate.h>
 
 /* hack: keep this in synch with radix-tree.c */
-#define RADIX_TREE_MAP_SHIFT 3
-#define RADIX_TREE_MAP_SIZE     (1UL << RADIX_TREE_MAP_SHIFT)
-#define RADIX_TREE_MAP_MASK     (RADIX_TREE_MAP_SIZE-1)
-#define RADIX_TREE_TAG_LONGS    \
-        ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define RADIX_TREE_MAP_SHIFT	3	/* For more stressful testing */
+#define RADIX_TREE_MAP_SIZE	(1UL << RADIX_TREE_MAP_SHIFT)
+#define RADIX_TREE_MAP_MASK	(RADIX_TREE_MAP_SIZE-1)
+
+#define RADIX_TREE_TAG_LONGS	\
+	((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
 struct radix_tree_node {
-        unsigned int    height;         /* Height from the bottom */
-        unsigned int    count;
-        struct rcu_head rcu_head;
-        void            *slots[RADIX_TREE_MAP_SIZE];
-        unsigned long   tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
+	unsigned int	height;		/* Height from the bottom */
+	unsigned int	count;
+	struct rcu_head	rcu_head;
+	void __rcu	*slots[RADIX_TREE_MAP_SIZE];
+	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
 static inline void *indirect_to_ptr(void *ptr)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 4/5] rtth: maintain nr_allocated atomically
  2011-12-17  7:51 [PATCH 0/5] rtth: Radix Tree Test Harness fixes Hugh Dickins
                   ` (2 preceding siblings ...)
  2011-12-17  7:56 ` [PATCH 3/5] rtth: copy current radix_tree source Hugh Dickins
@ 2011-12-17  7:57 ` Hugh Dickins
  2011-12-17  7:59 ` [PATCH 5/5] rtth: advance to userspace-rcu-0.6.7 Hugh Dickins
  2011-12-19  6:43 ` [PATCH 6/5] rtth: copy latest radix_tree source Hugh Dickins
  5 siblings, 0 replies; 7+ messages in thread
From: Hugh Dickins @ 2011-12-17  7:57 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Randy Dunlap, Naveen Yadav, linux-kernel

Someone has been worrying about radix_tree_node leaks, and inserted a
lot of nr_allocated printfs; but it often came out negative because it
was not manipulated atomically.  We're already using userspace-rcu, so
use its uatomic implementation on nr_allocated (but the count still
doesn't go down to zero at the end).

Signed-off-by: Hugh Dickins <hughd@google.com>
---

 linux.c |    5 +++--
 main.c  |    4 +++-
 2 files changed, 6 insertions(+), 3 deletions(-)

--- rtth3/linux.c	2010-08-25 13:30:45.000000000 -0700
+++ rtth4/linux.c	2011-12-16 18:44:05.587897075 -0800
@@ -6,6 +6,7 @@
 
 #include <linux/mempool.h>
 #include <linux/slab.h>
+#include <urcu/uatomic_arch.h>
 
 int nr_allocated;
 
@@ -35,14 +36,14 @@ void *kmem_cache_alloc(struct kmem_cache
 	void *ret = malloc(cachep->size);
 	if (cachep->ctor)
 		cachep->ctor(ret);
-	nr_allocated++;
+	uatomic_inc(&nr_allocated);
 	return ret;
 }
 
 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 {
 	assert(objp);
-	nr_allocated--;
+	uatomic_dec(&nr_allocated);
 	memset(objp, 0, cachep->size);
 	free(objp);
 }
--- rtth3/main.c	2011-01-24 22:12:10.000000000 -0800
+++ rtth4/main.c	2011-12-16 18:44:05.587897075 -0800
@@ -261,8 +261,10 @@ int main()
 
 	regression1_test();
 	regression2_test();
-
 	single_thread_tests();
 
+	sleep(1);
+	printf("after sleep(1): %d allocated\n", nr_allocated);
+
 	exit(0);
 }

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 5/5] rtth: advance to userspace-rcu-0.6.7
  2011-12-17  7:51 [PATCH 0/5] rtth: Radix Tree Test Harness fixes Hugh Dickins
                   ` (3 preceding siblings ...)
  2011-12-17  7:57 ` [PATCH 4/5] rtth: maintain nr_allocated atomically Hugh Dickins
@ 2011-12-17  7:59 ` Hugh Dickins
  2011-12-19  6:43 ` [PATCH 6/5] rtth: copy latest radix_tree source Hugh Dickins
  5 siblings, 0 replies; 7+ messages in thread
From: Hugh Dickins @ 2011-12-17  7:59 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Randy Dunlap, Naveen Yadav, linux-kernel

Up to here I've been building rtth with userspace-rcu-0.5.4,
since 0.6 introduces a conflicting declaration of struct rcu_head:
rtth has its own little implementation of call_rcu(), with the
comment /* switch to urcu implementation when it is merged. */

userspace-rcu-0.6 (available since June) implements call_rcu() where
0.5 did not, so remove rtth's rcupdate.c and conflicting declarations,
converting to rely on userspace-rcu-0.6.7 for call_rcu().

And now the nr_allocated count of radix_tree_nodes does go down to zero.

Signed-off-by: Hugh Dickins <hughd@google.com>
---

 Makefile         |    2 -
 linux.c          |    2 -
 linux/rcupdate.h |   15 -------
 main.c           |    3 +
 rcupdate.c       |   86 ---------------------------------------------
 regression1.c    |    4 +-
 6 files changed, 6 insertions(+), 106 deletions(-)

--- rtth4/Makefile	2011-01-24 22:12:10.000000000 -0800
+++ rtth5/Makefile	2011-12-16 18:44:06.623896843 -0800
@@ -2,7 +2,7 @@
 CFLAGS += -I. -g -Wall -D_LGPL_SOURCE
 LDFLAGS += -lpthread -lurcu
 TARGETS = main
-OFILES = main.o rcupdate.o radix-tree.o linux.o test.o tag_check.o regression1.o regression2.o
+OFILES = main.o radix-tree.o linux.o test.o tag_check.o regression1.o regression2.o
 
 targets: $(TARGETS)
 
--- rtth4/linux/rcupdate.h	2010-11-10 16:35:29.000000000 -0800
+++ rtth5/linux/rcupdate.h	2011-12-16 18:44:06.623896843 -0800
@@ -3,21 +3,6 @@
 
 #include <urcu.h>
 
-void rcupdate_init(void);
-void rcupdate_thread_init(void);
-void rcupdate_thread_exit(void);
-
-/**
- * struct rcu_head - callback structure for use with RCU
- * @next: next update requests in a list
- * @func: actual update function to call after the grace period.
- */
-struct rcu_head {
-        struct rcu_head *next;
-        void (*func)(struct rcu_head *head);
-};
-
 #define rcu_dereference_raw(p) rcu_dereference(p)
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head));
 
 #endif
--- rtth4/linux.c	2011-12-16 18:44:05.587897075 -0800
+++ rtth5/linux.c	2011-12-16 18:44:05.587897075 -0800
@@ -6,7 +6,7 @@
 
 #include <linux/mempool.h>
 #include <linux/slab.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 int nr_allocated;
 
--- rtth4/main.c	2011-12-16 18:44:05.587897075 -0800
+++ rtth5/main.c	2011-12-16 18:44:06.623896843 -0800
@@ -256,7 +256,7 @@ static void single_thread_tests(void)
 
 int main()
 {
-	rcupdate_init();
+	rcu_register_thread();
 	radix_tree_init();
 
 	regression1_test();
@@ -265,6 +265,7 @@ int main()
 
 	sleep(1);
 	printf("after sleep(1): %d allocated\n", nr_allocated);
+	rcu_unregister_thread();
 
 	exit(0);
 }
--- rtth4/rcupdate.c	2010-11-10 16:35:48.000000000 -0800
+++ rtth5/rcupdate.c	1969-12-31 16:00:00.000000000 -0800
@@ -1,86 +0,0 @@
-#include <linux/rcupdate.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <assert.h>
-
-static pthread_mutex_t rculock = PTHREAD_MUTEX_INITIALIZER;
-static struct rcu_head *rcuhead_global = NULL;
-static __thread int nr_rcuhead = 0;
-static __thread struct rcu_head *rcuhead = NULL;
-static __thread struct rcu_head *rcutail = NULL;
-
-static pthread_cond_t rcu_worker_cond = PTHREAD_COND_INITIALIZER;
-
-/* switch to urcu implementation when it is merged. */
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head))
-{
-	head->func = func;
-	head->next = rcuhead;
-	rcuhead = head;
-	if (!rcutail)
-		rcutail = head;
-	nr_rcuhead++;
-	if (nr_rcuhead >= 1000) {
-		int signal = 0;
-
-		pthread_mutex_lock(&rculock);
-		if (!rcuhead_global)
-			signal = 1;
-		rcutail->next = rcuhead_global;
-		rcuhead_global = head;
-		pthread_mutex_unlock(&rculock);
-
-		nr_rcuhead = 0;
-		rcuhead = NULL;
-		rcutail = NULL;
-
-		if (signal) {
-			pthread_cond_signal(&rcu_worker_cond);
-		}
-	}
-}
-
-static void *rcu_worker(void *arg)
-{
-	struct rcu_head *r;
-
-	rcupdate_thread_init();
-
-	while (1) {
-		pthread_mutex_lock(&rculock);
-		while (!rcuhead_global) {
-			pthread_cond_wait(&rcu_worker_cond, &rculock);
-		}
-		r = rcuhead_global;
-		rcuhead_global = NULL;
-
-		pthread_mutex_unlock(&rculock);
-
-		synchronize_rcu();
-
-		while (r) {
-			struct rcu_head *tmp = r->next;
-			r->func(r);
-			r = tmp;
-		}
-	}
-
-	rcupdate_thread_exit();
-
-	return NULL;
-}
-
-static pthread_t worker_thread;
-void rcupdate_init(void)
-{
-	pthread_create(&worker_thread, NULL, rcu_worker, NULL);
-}
-
-void rcupdate_thread_init(void)
-{
-	rcu_register_thread();
-}
-void rcupdate_thread_exit(void)
-{
-	rcu_unregister_thread();
-}
--- rtth4/regression1.c	2011-12-16 18:44:04.551896997 -0800
+++ rtth5/regression1.c	2011-12-16 18:44:06.627896873 -0800
@@ -135,7 +135,7 @@ static pthread_barrier_t worker_barrier;
 
 static void *regression1_fn(void *arg)
 {
-	rcupdate_thread_init();
+	rcu_register_thread();
 
 	if (pthread_barrier_wait(&worker_barrier) ==
 			PTHREAD_BARRIER_SERIAL_THREAD) {
@@ -180,7 +180,7 @@ static void *regression1_fn(void *arg)
 		}
 	}
 
-	rcupdate_thread_exit();
+	rcu_unregister_thread();
 
 	return NULL;
 }

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 6/5] rtth: copy latest radix_tree source
  2011-12-17  7:51 [PATCH 0/5] rtth: Radix Tree Test Harness fixes Hugh Dickins
                   ` (4 preceding siblings ...)
  2011-12-17  7:59 ` [PATCH 5/5] rtth: advance to userspace-rcu-0.6.7 Hugh Dickins
@ 2011-12-19  6:43 ` Hugh Dickins
  5 siblings, 0 replies; 7+ messages in thread
From: Hugh Dickins @ 2011-12-19  6:43 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Randy Dunlap, Naveen Yadav, linux-kernel

Update rtth's radix-tree.c and test.h from the kernel version using parent
pointer in struct radix_tree_node instead of radix_tree_path array on stack.

Signed-off-by: Hugh Dickins <hughd@google.com>
---

 radix-tree.c |  148 +++++++++++++++++++++++--------------------------
 test.h       |    5 +
 2 files changed, 74 insertions(+), 79 deletions(-)

--- rtth5/radix-tree.c	2011-12-16 18:44:04.551896997 -0800
+++ rtth6/radix-tree.c	2011-12-16 18:44:07.659897014 -0800
@@ -48,16 +48,14 @@
 struct radix_tree_node {
 	unsigned int	height;		/* Height from the bottom */
 	unsigned int	count;
-	struct rcu_head	rcu_head;
+	union {
+		struct radix_tree_node *parent;	/* Used when ascending tree */
+		struct rcu_head	rcu_head;	/* Used when freeing node */
+	};
 	void __rcu	*slots[RADIX_TREE_MAP_SIZE];
 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
-struct radix_tree_path {
-	struct radix_tree_node *node;
-	int offset;
-};
-
 #define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
 					  RADIX_TREE_MAP_SHIFT))
@@ -256,6 +254,7 @@ unsigned long radix_tree_maxindex(unsign
 static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
 {
 	struct radix_tree_node *node;
+	struct radix_tree_node *slot;
 	unsigned int height;
 	int tag;
 
@@ -274,18 +273,23 @@ static int radix_tree_extend(struct radi
 		if (!(node = radix_tree_node_alloc(root)))
 			return -ENOMEM;
 
-		/* Increase the height.  */
-		node->slots[0] = indirect_to_ptr(root->rnode);
-
 		/* Propagate the aggregated tag info into the new root */
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
 			if (root_tag_get(root, tag))
 				tag_set(node, tag, 0);
 		}
 
+		/* Increase the height.  */
 		newheight = root->height+1;
 		node->height = newheight;
 		node->count = 1;
+		node->parent = NULL;
+		slot = root->rnode;
+		if (newheight > 1) {
+			slot = indirect_to_ptr(slot);
+			slot->parent = node;
+		}
+		node->slots[0] = slot;
 		node = ptr_to_indirect(node);
 		rcu_assign_pointer(root->rnode, node);
 		root->height = newheight;
@@ -331,6 +335,7 @@ int radix_tree_insert(struct radix_tree_
 			if (!(slot = radix_tree_node_alloc(root)))
 				return -ENOMEM;
 			slot->height = height;
+			slot->parent = node;
 			if (node) {
 				rcu_assign_pointer(node->slots[offset], slot);
 				node->count++;
@@ -504,47 +509,41 @@ EXPORT_SYMBOL(radix_tree_tag_set);
 void *radix_tree_tag_clear(struct radix_tree_root *root,
 			unsigned long index, unsigned int tag)
 {
-	/*
-	 * The radix tree path needs to be one longer than the maximum path
-	 * since the "list" is null terminated.
-	 */
-	struct radix_tree_path path[RADIX_TREE_MAX_PATH + 1], *pathp = path;
+	struct radix_tree_node *node = NULL;
 	struct radix_tree_node *slot = NULL;
 	unsigned int height, shift;
+	int uninitialized_var(offset);
 
 	height = root->height;
 	if (index > radix_tree_maxindex(height))
 		goto out;
 
-	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-	pathp->node = NULL;
+	shift = height * RADIX_TREE_MAP_SHIFT;
 	slot = indirect_to_ptr(root->rnode);
 
-	while (height > 0) {
-		int offset;
-
+	while (shift) {
 		if (slot == NULL)
 			goto out;
 
+		shift -= RADIX_TREE_MAP_SHIFT;
 		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
-		pathp[1].offset = offset;
-		pathp[1].node = slot;
+		node = slot;
 		slot = slot->slots[offset];
-		pathp++;
-		shift -= RADIX_TREE_MAP_SHIFT;
-		height--;
 	}
 
 	if (slot == NULL)
 		goto out;
 
-	while (pathp->node) {
-		if (!tag_get(pathp->node, tag, pathp->offset))
+	while (node) {
+		if (!tag_get(node, tag, offset))
 			goto out;
-		tag_clear(pathp->node, tag, pathp->offset);
-		if (any_tag_set(pathp->node, tag))
+		tag_clear(node, tag, offset);
+		if (any_tag_set(node, tag))
 			goto out;
-		pathp--;
+
+		index >>= RADIX_TREE_MAP_SHIFT;
+		offset = index & RADIX_TREE_MAP_MASK;
+		node = node->parent;
 	}
 
 	/* clear the root's tag bit */
@@ -646,8 +645,7 @@ unsigned long radix_tree_range_tag_if_ta
 		unsigned int iftag, unsigned int settag)
 {
 	unsigned int height = root->height;
-	struct radix_tree_path path[height];
-	struct radix_tree_path *pathp = path;
+	struct radix_tree_node *node = NULL;
 	struct radix_tree_node *slot;
 	unsigned int shift;
 	unsigned long tagged = 0;
@@ -671,14 +669,8 @@ unsigned long radix_tree_range_tag_if_ta
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 	slot = indirect_to_ptr(root->rnode);
 
-	/*
-	 * we fill the path from (root->height - 2) to 0, leaving the index at
-	 * (root->height - 1) as a terminator. Zero the node in the terminator
-	 * so that we can use this to end walk loops back up the path.
-	 */
-	path[height - 1].node = NULL;
-
 	for (;;) {
+		unsigned long upindex;
 		int offset;
 
 		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
@@ -686,12 +678,10 @@ unsigned long radix_tree_range_tag_if_ta
 			goto next;
 		if (!tag_get(slot, iftag, offset))
 			goto next;
-		if (height > 1) {
+		if (shift) {
 			/* Go down one level */
-			height--;
 			shift -= RADIX_TREE_MAP_SHIFT;
-			path[height - 1].node = slot;
-			path[height - 1].offset = offset;
+			node = slot;
 			slot = slot->slots[offset];
 			continue;
 		}
@@ -701,15 +691,21 @@ unsigned long radix_tree_range_tag_if_ta
 		tag_set(slot, settag, offset);
 
 		/* walk back up the path tagging interior nodes */
-		pathp = &path[0];
-		while (pathp->node) {
+		upindex = index;
+		while (node) {
+			upindex >>= RADIX_TREE_MAP_SHIFT;
+			offset = upindex & RADIX_TREE_MAP_MASK;
+
 			/* stop if we find a node with the tag already set */
-			if (tag_get(pathp->node, settag, pathp->offset))
+			if (tag_get(node, settag, offset))
 				break;
-			tag_set(pathp->node, settag, pathp->offset);
-			pathp++;
+			tag_set(node, settag, offset);
+			node = node->parent;
 		}
 
+		/* optimization: no need to walk up from this node again */
+		node = NULL;
+
 next:
 		/* Go to next item at level determined by 'shift' */
 		index = ((index >> shift) + 1) << shift;
@@ -724,8 +720,7 @@ next:
 			 * last_index is guaranteed to be in the tree, what
 			 * we do below cannot wander astray.
 			 */
-			slot = path[height - 1].node;
-			height++;
+			slot = slot->parent;
 			shift += RADIX_TREE_MAP_SHIFT;
 		}
 	}
@@ -1299,7 +1294,7 @@ static inline void radix_tree_shrink(str
 	/* try to shrink tree height */
 	while (root->height > 0) {
 		struct radix_tree_node *to_free = root->rnode;
-		void *newptr;
+		struct radix_tree_node *slot;
 
 		BUG_ON(!radix_tree_is_indirect_ptr(to_free));
 		to_free = indirect_to_ptr(to_free);
@@ -1320,10 +1315,12 @@ static inline void radix_tree_shrink(str
 		 * (to_free->slots[0]), it will be safe to dereference the new
 		 * one (root->rnode) as far as dependent read barriers go.
 		 */
-		newptr = to_free->slots[0];
-		if (root->height > 1)
-			newptr = ptr_to_indirect(newptr);
-		root->rnode = newptr;
+		slot = to_free->slots[0];
+		if (root->height > 1) {
+			slot->parent = NULL;
+			slot = ptr_to_indirect(slot);
+		}
+		root->rnode = slot;
 		root->height--;
 
 		/*
@@ -1363,16 +1360,12 @@ static inline void radix_tree_shrink(str
  */
 void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
 {
-	/*
-	 * The radix tree path needs to be one longer than the maximum path
-	 * since the "list" is null terminated.
-	 */
-	struct radix_tree_path path[RADIX_TREE_MAX_PATH + 1], *pathp = path;
+	struct radix_tree_node *node = NULL;
 	struct radix_tree_node *slot = NULL;
 	struct radix_tree_node *to_free;
 	unsigned int height, shift;
 	int tag;
-	int offset;
+	int uninitialized_var(offset);
 
 	height = root->height;
 	if (index > radix_tree_maxindex(height))
@@ -1385,39 +1378,35 @@ void *radix_tree_delete(struct radix_tre
 		goto out;
 	}
 	slot = indirect_to_ptr(slot);
-
-	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-	pathp->node = NULL;
+	shift = height * RADIX_TREE_MAP_SHIFT;
 
 	do {
 		if (slot == NULL)
 			goto out;
 
-		pathp++;
+		shift -= RADIX_TREE_MAP_SHIFT;
 		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
-		pathp->offset = offset;
-		pathp->node = slot;
+		node = slot;
 		slot = slot->slots[offset];
-		shift -= RADIX_TREE_MAP_SHIFT;
-		height--;
-	} while (height > 0);
+	} while (shift);
 
 	if (slot == NULL)
 		goto out;
 
 	/*
-	 * Clear all tags associated with the just-deleted item
+	 * Clear all tags associated with the item to be deleted.
+	 * This way of doing it would be inefficient, but seldom is any set.
 	 */
 	for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
-		if (tag_get(pathp->node, tag, pathp->offset))
+		if (tag_get(node, tag, offset))
 			radix_tree_tag_clear(root, index, tag);
 	}
 
 	to_free = NULL;
 	/* Now free the nodes we do not need anymore */
-	while (pathp->node) {
-		pathp->node->slots[pathp->offset] = NULL;
-		pathp->node->count--;
+	while (node) {
+		node->slots[offset] = NULL;
+		node->count--;
 		/*
 		 * Queue the node for deferred freeing after the
 		 * last reference to it disappears (set NULL, above).
@@ -1425,17 +1414,20 @@ void *radix_tree_delete(struct radix_tre
 		if (to_free)
 			radix_tree_node_free(to_free);
 
-		if (pathp->node->count) {
-			if (pathp->node == indirect_to_ptr(root->rnode))
+		if (node->count) {
+			if (node == indirect_to_ptr(root->rnode))
 				radix_tree_shrink(root);
 			goto out;
 		}
 
 		/* Node with zero slots in use so free it */
-		to_free = pathp->node;
-		pathp--;
+		to_free = node;
 
+		index >>= RADIX_TREE_MAP_SHIFT;
+		offset = index & RADIX_TREE_MAP_MASK;
+		node = node->parent;
 	}
+
 	root_tag_clear_all(root);
 	root->height = 0;
 	root->rnode = NULL;
--- rtth5/test.h	2011-12-16 18:44:04.551896997 -0800
+++ rtth6/test.h	2011-12-16 18:44:07.659897014 -0800
@@ -14,7 +14,10 @@
 struct radix_tree_node {
 	unsigned int	height;		/* Height from the bottom */
 	unsigned int	count;
-	struct rcu_head	rcu_head;
+	union {
+		struct radix_tree_node *parent;	/* Used when ascending tree */
+		struct rcu_head	rcu_head;	/* Used when freeing node */
+	};
 	void __rcu	*slots[RADIX_TREE_MAP_SIZE];
 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2011-12-19  6:43 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-12-17  7:51 [PATCH 0/5] rtth: Radix Tree Test Harness fixes Hugh Dickins
2011-12-17  7:53 ` [PATCH 1/5] rtth: fix Segmentation fault Hugh Dickins
2011-12-17  7:55 ` [PATCH 2/5] rtth: fix 32-bit build Hugh Dickins
2011-12-17  7:56 ` [PATCH 3/5] rtth: copy current radix_tree source Hugh Dickins
2011-12-17  7:57 ` [PATCH 4/5] rtth: maintain nr_allocated atomically Hugh Dickins
2011-12-17  7:59 ` [PATCH 5/5] rtth: advance to userspace-rcu-0.6.7 Hugh Dickins
2011-12-19  6:43 ` [PATCH 6/5] rtth: copy latest radix_tree source Hugh Dickins

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.