All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: unlisted-recipients:; (no To-header on input)
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 09/73] xarray: Add xa_load
Date: Tue,  5 Dec 2017 16:40:55 -0800	[thread overview]
Message-ID: <20171206004159.3755-10-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

This first function in the XArray API brings with it a lot of support
infrastructure.  The advanced API is based around the xa_state which is
a more capable version of the radix_tree_iter.

As the test-suite demonstrates, it is possible to use the xarray and
radix tree APIs on the same data structure.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h                      | 235 ++++++++++++++++++++++++++++
 lib/radix-tree.c                            |  43 -----
 lib/xarray.c                                | 160 +++++++++++++++++++
 tools/testing/radix-tree/.gitignore         |   1 +
 tools/testing/radix-tree/Makefile           |   7 +-
 tools/testing/radix-tree/linux/radix-tree.h |   1 -
 tools/testing/radix-tree/linux/rcupdate.h   |   1 +
 tools/testing/radix-tree/linux/xarray.h     |   1 +
 tools/testing/radix-tree/xarray-test.c      |  56 +++++++
 9 files changed, 459 insertions(+), 46 deletions(-)
 create mode 100644 tools/testing/radix-tree/xarray-test.c

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 1aff0069458b..af52ba75e6a3 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -21,6 +21,8 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
@@ -67,6 +69,8 @@ static inline void xa_init(struct xarray *xa)
 	__xa_init(xa, 0);
 }
 
+void *xa_load(struct xarray *, unsigned long index);
+
 #define BITS_PER_XA_VALUE	(BITS_PER_LONG - 1)
 
 /**
@@ -158,6 +162,46 @@ struct xa_node {
 	unsigned long	tags[XA_MAX_TAGS][XA_TAG_LONGS];
 };
 
+#ifdef XA_DEBUG
+void xa_dump(const struct xarray *);
+void xa_dump_node(const struct xa_node *);
+#define XA_BUG_ON(node, x) do { \
+		if ((x) && (node)) \
+			xa_dump_node(node); \
+		BUG_ON(x); \
+	} while (0)
+#else
+#define XA_BUG_ON(node, x)	do { } while (0)
+#endif
+
+/* Private */
+static inline void *xa_head(struct xarray *xa)
+{
+	return rcu_dereference_check(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_head_locked(struct xarray *xa)
+{
+	return rcu_dereference_protected(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_check(node->slots[offset], xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry_locked(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_protected(node->slots[offset], xa_lock_held(xa));
+}
+
 /*
  * Internal entries have the bottom two bits set to the value 10b.  Most
  * internal entries are pointers to the next node in the tree.  Since the
@@ -189,6 +233,12 @@ static inline bool xa_is_internal(void *entry)
 	return ((unsigned long)entry & 3) == 2;
 }
 
+/* Private */
+static inline struct xa_node *xa_to_node(void *entry)
+{
+	return (struct xa_node *)((unsigned long)entry & ~3UL);
+}
+
 /* Private */
 static inline bool xa_is_node(void *entry)
 {
@@ -222,4 +272,189 @@ static inline bool xa_is_sibling(void *entry)
 
 #define XA_RETRY_ENTRY		xa_mk_internal(256)
 
+/**
+ * xa_is_retry() - Is the entry a retry entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a retry entry.
+ */
+static inline bool xa_is_retry(void *entry)
+{
+	return unlikely(entry == XA_RETRY_ENTRY);
+}
+
+/**
+ * typedef xa_update_node_t - A callback function from the XArray.
+ * @node: The node which is being processed
+ *
+ * This function is called every time the XArray updates the count of
+ * present and value entries in a node.  It allows advanced users to
+ * maintain the private_list in the node.
+ */
+typedef void (*xa_update_node_t)(struct xa_node *node);
+
+/*
+ * The xa_state is opaque to its users.  It contains various different pieces
+ * of state involved in the current operation on the XArray.  It should be
+ * declared on the stack and passed between the various internal routines.
+ * The various elements in it should not be accessed directly, but only
+ * through the provided accessor functions.  The below documentation is for
+ * the benefit of those working on the code, not for users of the XArray.
+ *
+ * @xa_node usually points to the xa_node containing the slot we're operating
+ * on (and @xa_offset is the offset in the slots array).  If there is a
+ * single entry in the array at index 0, there are no allocated xa_nodes to
+ * point to, and so we store %NULL in @xa_node.  @xa_node is set to
+ * the value %XAS_RESTART if the xa_state is not walked to the correct
+ * position in the tree of nodes for this operation.  If an error occurs
+ * during an operation, it is set to an %XAS_ERROR value.  If we run off the
+ * end of the allocated nodes, it is set to %XAS_BOUNDS.
+ */
+struct xa_state {
+	struct xarray *xa;
+	unsigned long xa_index;
+	unsigned char xa_shift;
+	unsigned char xa_sibs;
+	unsigned char xa_offset;
+	unsigned char xa_pad;		/* Helps gcc generate better code */
+	struct xa_node *xa_node;
+	struct xa_node *xa_alloc;
+	xa_update_node_t xa_update;
+};
+
+/*
+ * We encode errnos in the xas->xa_node.  If an error has happened, we need to
+ * drop the lock to fix it, and once we've done so the xa_state is invalid.
+ */
+#define XAS_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 1) | 1))
+#define XAS_RESTART	 XAS_ERROR(0)
+#define XAS_BOUNDS	 ((struct xa_node *)2UL)
+
+#define __XA_STATE(array, index)  {			\
+	.xa = array,					\
+	.xa_index = index,				\
+	.xa_shift = 0,					\
+	.xa_sibs = 0,					\
+	.xa_offset = 0,					\
+	.xa_pad = 0,					\
+	.xa_node = XAS_RESTART,				\
+	.xa_alloc = NULL,				\
+	.xa_update = NULL				\
+}
+
+/**
+ * XA_STATE() - Declare an XArray operation state.
+ * @name: Name of this operation state (usually xas).
+ * @index: Initial index of interest.
+ *
+ * Declare and initialise an xa_state on the stack.
+ */
+#define XA_STATE(name, array, index)			\
+	struct xa_state name = __XA_STATE(array, index)
+
+#define xas_tagged(xas, tag)	xa_tagged((xas)->xa, (tag))
+#define xas_trylock(xas)	xa_trylock((xas)->xa)
+#define xas_lock(xas)		xa_lock((xas)->xa)
+#define xas_unlock(xas)		xa_unlock((xas)->xa)
+#define xas_lock_bh(xas)	xa_lock_bh((xas)->xa)
+#define xas_unlock_bh(xas)	xa_unlock_bh((xas)->xa)
+#define xas_lock_irq(xas)	xa_lock_irq((xas)->xa)
+#define xas_unlock_irq(xas)	xa_unlock_irq((xas)->xa)
+#define xas_lock_irqsave(xas, flags) \
+				xa_lock_irqsave((xas)->xa, flags)
+#define xas_unlock_irqrestore(xas, flags) \
+				xa_unlock_irqrestore((xas)->xa, flags)
+
+/**
+ * xas_error() - Return an errno stored in the xa_state.
+ * @xas: XArray operation state.
+ *
+ * Return: 0 if no error has been noted.  A negative errno if one has.
+ */
+static inline int xas_error(const struct xa_state *xas)
+{
+	unsigned long v = (unsigned long)xas->xa_node;
+	return (v & 1) ? -(v >> 1) : 0;
+}
+
+/**
+ * xas_set_err() - Note an error in the xa_state.
+ * @xas: XArray operation state.
+ * @err: Negative error number.
+ *
+ * You can call this function with @err set to 0 to take the xa_state
+ * out of the error state.  The next operation will walk it to the correct
+ * location.
+ */
+static inline void xas_set_err(struct xa_state *xas, long err)
+{
+	xas->xa_node = XAS_ERROR(-err);
+}
+
+/**
+ * xas_invalid() - Is the xas in a retry or error state?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas cannot be used for operations.
+ */
+static inline bool xas_invalid(const struct xa_state *xas)
+{
+	return (unsigned long)xas->xa_node & 3;
+}
+
+/**
+ * xas_valid() - Is the xas a valid cursor into the array?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas can be used for operations.
+ */
+static inline bool xas_valid(const struct xa_state *xas)
+{
+	return !xas_invalid(xas);
+}
+
+/**
+ * xas_retry() - Handle a retry entry.
+ * @xas: XArray operation state.
+ * @entry: Entry from xarray.
+ *
+ * An RCU-protected read may see a retry entry as a side-effect of a
+ * simultaneous modification.  This function sets up the @xas to retry
+ * the walk from the head of the array.
+ *
+ * Return: true if the operation needs to be retried.
+ */
+static inline bool xas_retry(struct xa_state *xas, void *entry)
+{
+	if (!xa_is_retry(entry))
+		return false;
+	xas->xa_node = XAS_RESTART;
+	return true;
+}
+
+void *xas_load(struct xa_state *);
+
+/**
+ * xas_reload() - Refetch an entry from the xarray.
+ * @xas: XArray operation state.
+ *
+ * Use this function to check that a previously loaded entry still has
+ * the same value.  This is useful for the lockless pagecache lookup where
+ * we walk the array with only the RCU lock to protect us, lock the page,
+ * then check that the page hasn't moved since we looked it up.
+ *
+ * The caller guarantees that @xas is still valid.  If it may be in an
+ * error or restart state, call xas_load() instead.
+ *
+ * Return: The entry at this location in the xarray.
+ */
+static inline void *xas_reload(struct xa_state *xas)
+{
+	struct xa_node *node = xas->xa_node;
+
+	if (node)
+		return xa_entry(xas->xa, node, xas->xa_offset);
+	return xa_head(xas->xa);
+}
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 930eb7d298d7..a919c60b10a4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -255,49 +255,6 @@ static unsigned long next_index(unsigned long index,
 }
 
 #ifndef __KERNEL__
-static void dump_node(struct radix_tree_node *node, unsigned long index)
-{
-	unsigned long i;
-
-	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
-		node, node->offset, index, index | node_maxindex(node),
-		node->parent,
-		node->tags[0][0], node->tags[1][0], node->tags[2][0],
-		node->shift, node->count, node->exceptional);
-
-	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
-		unsigned long first = index | (i << node->shift);
-		unsigned long last = first | ((1UL << node->shift) - 1);
-		void *entry = node->slots[i];
-		if (!entry)
-			continue;
-		if (entry == RADIX_TREE_RETRY) {
-			pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
-					i, first, last, node);
-		} else if (!radix_tree_is_internal_node(entry)) {
-			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
-					entry, i, first, last, node);
-		} else if (xa_is_sibling(entry)) {
-			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
-					entry, i, first, last, node,
-					node->slots[xa_to_sibling(entry)]);
-		} else {
-			dump_node(entry_to_node(entry), first);
-		}
-	}
-}
-
-/* For debug */
-static void radix_tree_dump(struct radix_tree_root *root)
-{
-	pr_debug("radix root: %p xa_head %p tags %x\n",
-			root, root->xa_head,
-			root->xa_flags >> ROOT_TAG_SHIFT);
-	if (!radix_tree_is_internal_node(root->xa_head))
-		return;
-	dump_node(entry_to_node(root->xa_head), 0);
-}
-
 static void dump_ida_node(void *entry, unsigned long index)
 {
 	unsigned long i;
diff --git a/lib/xarray.c b/lib/xarray.c
index 67ddcb3e630c..2f77e4c5d0b8 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -31,6 +31,94 @@
  * @entry refers to something stored in a slot in the xarray
  */
 
+/* extracts the offset within this node from the index */
+static unsigned int get_offset(unsigned long index, struct xa_node *node)
+{
+	return (index >> node->shift) & XA_CHUNK_MASK;
+}
+
+static void *set_bounds(struct xa_state *xas)
+{
+	xas->xa_node = XAS_BOUNDS;
+	return NULL;
+}
+
+/*
+ * Starts a walk.  If the @xas is already valid, we assume that it's on
+ * the right path and just return where we've got to.  If we're in an
+ * error state, return NULL.  If the index is outside the current scope
+ * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
+ * set @xas->xa_node to NULL and return the current head of the array.
+ */
+static void *xas_start(struct xa_state *xas)
+{
+	void *entry;
+
+	if (xas_valid(xas))
+		return xas_reload(xas);
+	if (xas_error(xas))
+		return NULL;
+
+	entry = xa_head(xas->xa);
+	if (!xa_is_node(entry)) {
+		if (xas->xa_index)
+			return set_bounds(xas);
+	} else {
+		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
+			return set_bounds(xas);
+	}
+
+	xas->xa_node = NULL;
+	return entry;
+}
+
+static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+{
+	unsigned int offset = get_offset(xas->xa_index, node);
+	void *entry = xa_entry(xas->xa, node, offset);
+
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = xa_entry(xas->xa, node, offset);
+		/* Move xa_index to the first index of this entry */
+		xas->xa_index = (((xas->xa_index >> node->shift) &
+				  ~XA_CHUNK_MASK) | offset) << node->shift;
+	}
+
+	xas->xa_node = node;
+	xas->xa_offset = offset;
+	return entry;
+}
+
+/**
+ * xas_load() - Load an entry from the XArray (advanced).
+ * @xas: XArray operation state.
+ *
+ * Usually walks the @xas to the appropriate state to load the entry stored
+ * at xa_index.  However, it will do nothing and return NULL  if @xas is
+ * holding an error.  If the xa_shift indicates we're operating on a
+ * multislot entry, it will terminate early and potentially return an
+ * internal entry.  xas_load() will never expand the tree (see xas_create()).
+ *
+ * The caller should hold the xa_lock or the RCU lock.
+ *
+ * Return: Usually an entry in the XArray, but see description for exceptions.
+ */
+void *xas_load(struct xa_state *xas)
+{
+	void *entry = xas_start(xas);
+
+	while (xa_is_node(entry)) {
+		struct xa_node *node = xa_to_node(entry);
+
+		if (xas->xa_shift > node->shift)
+			break;
+		entry = xas_descend(xas, node);
+	}
+	return entry;
+}
+EXPORT_SYMBOL_GPL(xas_load);
+
 /**
  * __xa_init() - Initialise an empty XArray.
  * @xa: XArray.
@@ -45,3 +133,75 @@ void __xa_init(struct xarray *xa, gfp_t flags)
 	xa->xa_head = NULL;
 }
 EXPORT_SYMBOL(__xa_init);
+
+/**
+ * xa_load() - Load an entry from an XArray.
+ * @xa: XArray.
+ * @index: index into array.
+ *
+ * Return: The entry at @index in @xa.
+ */
+void *xa_load(struct xarray *xa, unsigned long index)
+{
+	XA_STATE(xas, xa, index);
+	void *entry;
+
+	rcu_read_lock();
+	do {
+		entry = xas_load(&xas);
+	} while (xas_retry(&xas, entry));
+	rcu_read_unlock();
+
+	return entry;
+}
+EXPORT_SYMBOL(xa_load);
+
+#ifdef XA_DEBUG
+void xa_dump_entry(void *entry, unsigned long index)
+{
+	if (!entry)
+		return;
+
+	if (xa_is_value(entry))
+		printk("%lu: value %#lx\n", index, xa_to_value(entry));
+	else if (!xa_is_internal(entry))
+		printk("%lu: %p\n", index, entry);
+	else if (xa_is_node(entry)) {
+		unsigned long i;
+		struct xa_node *node = xa_to_node(entry);
+		printk("node %p %s %d parent %p shift %d count %d "
+			"exceptional %d tags %lx %lx %lx indices %lu-%lu\n",
+			node, node->parent ? "offset" : "max", node->offset,
+			node->parent, node->shift, node->count,
+			node->exceptional,
+			node->tags[0][0], node->tags[1][0], node->tags[2][0],
+			index, index |
+			(((unsigned long)XA_CHUNK_SIZE << node->shift) - 1));
+		for (i = 0; i < XA_CHUNK_SIZE; i++)
+			xa_dump_entry(node->slots[i],
+					index + (i << node->shift));
+	} else if (xa_is_retry(entry))
+		printk("%lu: retry (%ld)\n", index, xa_to_internal(entry));
+	else if (xa_is_sibling(entry))
+		printk("%lu: sibling (%ld)\n", index, xa_to_sibling(entry));
+	else
+		printk("%lu: UNKNOWN ENTRY (%p)\n", index, entry);
+}
+
+void xa_dump_node(const struct xa_node *node)
+{
+	printk("xadn: node %p %s %d parent %p shift %d count %d "
+		"exceptional %d array %p list %p %p tags %lx %lx %lx\n",
+		node, node->parent ? "offset" : "max", node->offset,
+		node->parent, node->shift, node->count,
+		node->exceptional, node->root, node->private_list.prev,
+		node->private_list.next,
+		node->tags[0][0], node->tags[1][0], node->tags[2][0]);
+}
+
+void xa_dump(const struct xarray *xa)
+{
+	printk("xarray: %p %x %p\n", xa, xa->xa_flags, xa->xa_head);
+	xa_dump_entry(xa->xa_head, 0);
+}
+#endif
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index 8d4df7a72a8e..833136896b91 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -5,3 +5,4 @@ main
 multiorder
 radix-tree.c
 xarray.c
+xarray-test
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 3868bc189199..749ef734a87c 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -3,10 +3,11 @@
 CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
 LDFLAGS += -fsanitize=address
 LDLIBS+= -lpthread -lurcu
-TARGETS = main idr-test multiorder
+TARGETS = main idr-test multiorder xarray-test
 CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o
 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
-	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
+	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o \
+	 xarray-test.o
 
 ifndef SHIFT
 	SHIFT=3
@@ -23,6 +24,8 @@ main:	$(OFILES)
 
 idr-test: idr-test.o $(CORE_OFILES)
 
+xarray-test: idr-test.o $(CORE_OFILES)
+
 multiorder: multiorder.o $(CORE_OFILES)
 
 clean:
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h
index 40c9671ee365..36fb716d5557 100644
--- a/tools/testing/radix-tree/linux/radix-tree.h
+++ b/tools/testing/radix-tree/linux/radix-tree.h
@@ -5,7 +5,6 @@
 #include "generated/map-shift.h"
 #include "linux/bug.h"
 #include "../../../../include/linux/radix-tree.h"
-#include <linux/xarray.h>
 
 extern int kmalloc_verbose;
 extern int test_verbose;
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/radix-tree/linux/rcupdate.h
index 73ed33658203..25010bf86c1d 100644
--- a/tools/testing/radix-tree/linux/rcupdate.h
+++ b/tools/testing/radix-tree/linux/rcupdate.h
@@ -6,5 +6,6 @@
 
 #define rcu_dereference_raw(p) rcu_dereference(p)
 #define rcu_dereference_protected(p, cond) rcu_dereference(p)
+#define rcu_dereference_check(p, cond) rcu_dereference(p)
 
 #endif
diff --git a/tools/testing/radix-tree/linux/xarray.h b/tools/testing/radix-tree/linux/xarray.h
index df3812cda376..3eaf9596c2a6 100644
--- a/tools/testing/radix-tree/linux/xarray.h
+++ b/tools/testing/radix-tree/linux/xarray.h
@@ -1,2 +1,3 @@
 #include "generated/map-shift.h"
+#define XA_DEBUG
 #include "../../../../include/linux/xarray.h"
diff --git a/tools/testing/radix-tree/xarray-test.c b/tools/testing/radix-tree/xarray-test.c
new file mode 100644
index 000000000000..3f8f19cb3739
--- /dev/null
+++ b/tools/testing/radix-tree/xarray-test.c
@@ -0,0 +1,56 @@
+/*
+ * xarray-test.c: Test the XArray API
+ * Copyright (c) 2017 Microsoft Corporation <mawilcox@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/bitmap.h>
+#include <linux/xarray.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "test.h"
+
+void check_xa_load(struct xarray *xa)
+{
+	unsigned long i, j;
+
+	for (i = 0; i < 1024; i++) {
+		for (j = 0; j < 1024; j++) {
+			void *entry = xa_load(xa, j);
+			if (j < i)
+				assert(xa_to_value(entry) == j);
+			else
+				assert(!entry);
+		}
+		radix_tree_insert(xa, i, xa_mk_value(i));
+	}
+}
+
+void xarray_checks(void)
+{
+	RADIX_TREE(array, GFP_KERNEL);
+
+	check_xa_load(&array);
+
+	item_kill_tree(&array);
+}
+
+int __weak main(void)
+{
+	radix_tree_init();
+	xarray_checks();
+	radix_tree_cpu_dead(1);
+	rcu_barrier();
+	if (nr_allocated)
+		printf("nr_allocated = %d\n", nr_allocated);
+	return 0;
+}
-- 
2.15.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 09/73] xarray: Add xa_load
Date: Tue,  5 Dec 2017 16:40:55 -0800	[thread overview]
Message-ID: <20171206004159.3755-10-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

This first function in the XArray API brings with it a lot of support
infrastructure.  The advanced API is based around the xa_state which is
a more capable version of the radix_tree_iter.

As the test-suite demonstrates, it is possible to use the xarray and
radix tree APIs on the same data structure.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h                      | 235 ++++++++++++++++++++++++++++
 lib/radix-tree.c                            |  43 -----
 lib/xarray.c                                | 160 +++++++++++++++++++
 tools/testing/radix-tree/.gitignore         |   1 +
 tools/testing/radix-tree/Makefile           |   7 +-
 tools/testing/radix-tree/linux/radix-tree.h |   1 -
 tools/testing/radix-tree/linux/rcupdate.h   |   1 +
 tools/testing/radix-tree/linux/xarray.h     |   1 +
 tools/testing/radix-tree/xarray-test.c      |  56 +++++++
 9 files changed, 459 insertions(+), 46 deletions(-)
 create mode 100644 tools/testing/radix-tree/xarray-test.c

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 1aff0069458b..af52ba75e6a3 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -21,6 +21,8 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
@@ -67,6 +69,8 @@ static inline void xa_init(struct xarray *xa)
 	__xa_init(xa, 0);
 }
 
+void *xa_load(struct xarray *, unsigned long index);
+
 #define BITS_PER_XA_VALUE	(BITS_PER_LONG - 1)
 
 /**
@@ -158,6 +162,46 @@ struct xa_node {
 	unsigned long	tags[XA_MAX_TAGS][XA_TAG_LONGS];
 };
 
+#ifdef XA_DEBUG
+void xa_dump(const struct xarray *);
+void xa_dump_node(const struct xa_node *);
+#define XA_BUG_ON(node, x) do { \
+		if ((x) && (node)) \
+			xa_dump_node(node); \
+		BUG_ON(x); \
+	} while (0)
+#else
+#define XA_BUG_ON(node, x)	do { } while (0)
+#endif
+
+/* Private */
+static inline void *xa_head(struct xarray *xa)
+{
+	return rcu_dereference_check(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_head_locked(struct xarray *xa)
+{
+	return rcu_dereference_protected(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_check(node->slots[offset], xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry_locked(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_protected(node->slots[offset], xa_lock_held(xa));
+}
+
 /*
  * Internal entries have the bottom two bits set to the value 10b.  Most
  * internal entries are pointers to the next node in the tree.  Since the
@@ -189,6 +233,12 @@ static inline bool xa_is_internal(void *entry)
 	return ((unsigned long)entry & 3) == 2;
 }
 
+/* Private */
+static inline struct xa_node *xa_to_node(void *entry)
+{
+	return (struct xa_node *)((unsigned long)entry & ~3UL);
+}
+
 /* Private */
 static inline bool xa_is_node(void *entry)
 {
@@ -222,4 +272,189 @@ static inline bool xa_is_sibling(void *entry)
 
 #define XA_RETRY_ENTRY		xa_mk_internal(256)
 
+/**
+ * xa_is_retry() - Is the entry a retry entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a retry entry.
+ */
+static inline bool xa_is_retry(void *entry)
+{
+	return unlikely(entry == XA_RETRY_ENTRY);
+}
+
+/**
+ * typedef xa_update_node_t - A callback function from the XArray.
+ * @node: The node which is being processed
+ *
+ * This function is called every time the XArray updates the count of
+ * present and value entries in a node.  It allows advanced users to
+ * maintain the private_list in the node.
+ */
+typedef void (*xa_update_node_t)(struct xa_node *node);
+
+/*
+ * The xa_state is opaque to its users.  It contains various different pieces
+ * of state involved in the current operation on the XArray.  It should be
+ * declared on the stack and passed between the various internal routines.
+ * The various elements in it should not be accessed directly, but only
+ * through the provided accessor functions.  The below documentation is for
+ * the benefit of those working on the code, not for users of the XArray.
+ *
+ * @xa_node usually points to the xa_node containing the slot we're operating
+ * on (and @xa_offset is the offset in the slots array).  If there is a
+ * single entry in the array at index 0, there are no allocated xa_nodes to
+ * point to, and so we store %NULL in @xa_node.  @xa_node is set to
+ * the value %XAS_RESTART if the xa_state is not walked to the correct
+ * position in the tree of nodes for this operation.  If an error occurs
+ * during an operation, it is set to an %XAS_ERROR value.  If we run off the
+ * end of the allocated nodes, it is set to %XAS_BOUNDS.
+ */
+struct xa_state {
+	struct xarray *xa;
+	unsigned long xa_index;
+	unsigned char xa_shift;
+	unsigned char xa_sibs;
+	unsigned char xa_offset;
+	unsigned char xa_pad;		/* Helps gcc generate better code */
+	struct xa_node *xa_node;
+	struct xa_node *xa_alloc;
+	xa_update_node_t xa_update;
+};
+
+/*
+ * We encode errnos in the xas->xa_node.  If an error has happened, we need to
+ * drop the lock to fix it, and once we've done so the xa_state is invalid.
+ */
+#define XAS_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 1) | 1))
+#define XAS_RESTART	 XAS_ERROR(0)
+#define XAS_BOUNDS	 ((struct xa_node *)2UL)
+
+#define __XA_STATE(array, index)  {			\
+	.xa = array,					\
+	.xa_index = index,				\
+	.xa_shift = 0,					\
+	.xa_sibs = 0,					\
+	.xa_offset = 0,					\
+	.xa_pad = 0,					\
+	.xa_node = XAS_RESTART,				\
+	.xa_alloc = NULL,				\
+	.xa_update = NULL				\
+}
+
+/**
+ * XA_STATE() - Declare an XArray operation state.
+ * @name: Name of this operation state (usually xas).
+ * @index: Initial index of interest.
+ *
+ * Declare and initialise an xa_state on the stack.
+ */
+#define XA_STATE(name, array, index)			\
+	struct xa_state name = __XA_STATE(array, index)
+
+#define xas_tagged(xas, tag)	xa_tagged((xas)->xa, (tag))
+#define xas_trylock(xas)	xa_trylock((xas)->xa)
+#define xas_lock(xas)		xa_lock((xas)->xa)
+#define xas_unlock(xas)		xa_unlock((xas)->xa)
+#define xas_lock_bh(xas)	xa_lock_bh((xas)->xa)
+#define xas_unlock_bh(xas)	xa_unlock_bh((xas)->xa)
+#define xas_lock_irq(xas)	xa_lock_irq((xas)->xa)
+#define xas_unlock_irq(xas)	xa_unlock_irq((xas)->xa)
+#define xas_lock_irqsave(xas, flags) \
+				xa_lock_irqsave((xas)->xa, flags)
+#define xas_unlock_irqrestore(xas, flags) \
+				xa_unlock_irqrestore((xas)->xa, flags)
+
+/**
+ * xas_error() - Return an errno stored in the xa_state.
+ * @xas: XArray operation state.
+ *
+ * Return: 0 if no error has been noted.  A negative errno if one has.
+ */
+static inline int xas_error(const struct xa_state *xas)
+{
+	unsigned long v = (unsigned long)xas->xa_node;
+	return (v & 1) ? -(v >> 1) : 0;
+}
+
+/**
+ * xas_set_err() - Note an error in the xa_state.
+ * @xas: XArray operation state.
+ * @err: Negative error number.
+ *
+ * You can call this function with @err set to 0 to take the xa_state
+ * out of the error state.  The next operation will walk it to the correct
+ * location.
+ */
+static inline void xas_set_err(struct xa_state *xas, long err)
+{
+	xas->xa_node = XAS_ERROR(-err);
+}
+
+/**
+ * xas_invalid() - Is the xas in a retry or error state?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas cannot be used for operations.
+ */
+static inline bool xas_invalid(const struct xa_state *xas)
+{
+	return (unsigned long)xas->xa_node & 3;
+}
+
+/**
+ * xas_valid() - Is the xas a valid cursor into the array?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas can be used for operations.
+ */
+static inline bool xas_valid(const struct xa_state *xas)
+{
+	return !xas_invalid(xas);
+}
+
+/**
+ * xas_retry() - Handle a retry entry.
+ * @xas: XArray operation state.
+ * @entry: Entry from xarray.
+ *
+ * An RCU-protected read may see a retry entry as a side-effect of a
+ * simultaneous modification.  This function sets up the @xas to retry
+ * the walk from the head of the array.
+ *
+ * Return: true if the operation needs to be retried.
+ */
+static inline bool xas_retry(struct xa_state *xas, void *entry)
+{
+	if (!xa_is_retry(entry))
+		return false;
+	xas->xa_node = XAS_RESTART;
+	return true;
+}
+
+void *xas_load(struct xa_state *);
+
+/**
+ * xas_reload() - Refetch an entry from the xarray.
+ * @xas: XArray operation state.
+ *
+ * Use this function to check that a previously loaded entry still has
+ * the same value.  This is useful for the lockless pagecache lookup where
+ * we walk the array with only the RCU lock to protect us, lock the page,
+ * then check that the page hasn't moved since we looked it up.
+ *
+ * The caller guarantees that @xas is still valid.  If it may be in an
+ * error or restart state, call xas_load() instead.
+ *
+ * Return: The entry at this location in the xarray.
+ */
+static inline void *xas_reload(struct xa_state *xas)
+{
+	struct xa_node *node = xas->xa_node;
+
+	if (node)
+		return xa_entry(xas->xa, node, xas->xa_offset);
+	return xa_head(xas->xa);
+}
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 930eb7d298d7..a919c60b10a4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -255,49 +255,6 @@ static unsigned long next_index(unsigned long index,
 }
 
 #ifndef __KERNEL__
-static void dump_node(struct radix_tree_node *node, unsigned long index)
-{
-	unsigned long i;
-
-	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
-		node, node->offset, index, index | node_maxindex(node),
-		node->parent,
-		node->tags[0][0], node->tags[1][0], node->tags[2][0],
-		node->shift, node->count, node->exceptional);
-
-	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
-		unsigned long first = index | (i << node->shift);
-		unsigned long last = first | ((1UL << node->shift) - 1);
-		void *entry = node->slots[i];
-		if (!entry)
-			continue;
-		if (entry == RADIX_TREE_RETRY) {
-			pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
-					i, first, last, node);
-		} else if (!radix_tree_is_internal_node(entry)) {
-			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
-					entry, i, first, last, node);
-		} else if (xa_is_sibling(entry)) {
-			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
-					entry, i, first, last, node,
-					node->slots[xa_to_sibling(entry)]);
-		} else {
-			dump_node(entry_to_node(entry), first);
-		}
-	}
-}
-
-/* For debug */
-static void radix_tree_dump(struct radix_tree_root *root)
-{
-	pr_debug("radix root: %p xa_head %p tags %x\n",
-			root, root->xa_head,
-			root->xa_flags >> ROOT_TAG_SHIFT);
-	if (!radix_tree_is_internal_node(root->xa_head))
-		return;
-	dump_node(entry_to_node(root->xa_head), 0);
-}
-
 static void dump_ida_node(void *entry, unsigned long index)
 {
 	unsigned long i;
diff --git a/lib/xarray.c b/lib/xarray.c
index 67ddcb3e630c..2f77e4c5d0b8 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -31,6 +31,94 @@
  * @entry refers to something stored in a slot in the xarray
  */
 
+/* extracts the offset within this node from the index */
+static unsigned int get_offset(unsigned long index, struct xa_node *node)
+{
+	return (index >> node->shift) & XA_CHUNK_MASK;
+}
+
+static void *set_bounds(struct xa_state *xas)
+{
+	xas->xa_node = XAS_BOUNDS;
+	return NULL;
+}
+
+/*
+ * Starts a walk.  If the @xas is already valid, we assume that it's on
+ * the right path and just return where we've got to.  If we're in an
+ * error state, return NULL.  If the index is outside the current scope
+ * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
+ * set @xas->xa_node to NULL and return the current head of the array.
+ */
+static void *xas_start(struct xa_state *xas)
+{
+	void *entry;
+
+	if (xas_valid(xas))
+		return xas_reload(xas);
+	if (xas_error(xas))
+		return NULL;
+
+	entry = xa_head(xas->xa);
+	if (!xa_is_node(entry)) {
+		if (xas->xa_index)
+			return set_bounds(xas);
+	} else {
+		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
+			return set_bounds(xas);
+	}
+
+	xas->xa_node = NULL;
+	return entry;
+}
+
+static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+{
+	unsigned int offset = get_offset(xas->xa_index, node);
+	void *entry = xa_entry(xas->xa, node, offset);
+
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = xa_entry(xas->xa, node, offset);
+		/* Move xa_index to the first index of this entry */
+		xas->xa_index = (((xas->xa_index >> node->shift) &
+				  ~XA_CHUNK_MASK) | offset) << node->shift;
+	}
+
+	xas->xa_node = node;
+	xas->xa_offset = offset;
+	return entry;
+}
+
+/**
+ * xas_load() - Load an entry from the XArray (advanced).
+ * @xas: XArray operation state.
+ *
+ * Usually walks the @xas to the appropriate state to load the entry stored
+ * at xa_index.  However, it will do nothing and return NULL  if @xas is
+ * holding an error.  If the xa_shift indicates we're operating on a
+ * multislot entry, it will terminate early and potentially return an
+ * internal entry.  xas_load() will never expand the tree (see xas_create()).
+ *
+ * The caller should hold the xa_lock or the RCU lock.
+ *
+ * Return: Usually an entry in the XArray, but see description for exceptions.
+ */
+void *xas_load(struct xa_state *xas)
+{
+	void *entry = xas_start(xas);
+
+	while (xa_is_node(entry)) {
+		struct xa_node *node = xa_to_node(entry);
+
+		if (xas->xa_shift > node->shift)
+			break;
+		entry = xas_descend(xas, node);
+	}
+	return entry;
+}
+EXPORT_SYMBOL_GPL(xas_load);
+
 /**
  * __xa_init() - Initialise an empty XArray.
  * @xa: XArray.
@@ -45,3 +133,75 @@ void __xa_init(struct xarray *xa, gfp_t flags)
 	xa->xa_head = NULL;
 }
 EXPORT_SYMBOL(__xa_init);
+
+/**
+ * xa_load() - Load an entry from an XArray.
+ * @xa: XArray.
+ * @index: index into array.
+ *
+ * Return: The entry at @index in @xa.
+ */
+void *xa_load(struct xarray *xa, unsigned long index)
+{
+	XA_STATE(xas, xa, index);
+	void *entry;
+
+	rcu_read_lock();
+	do {
+		entry = xas_load(&xas);
+	} while (xas_retry(&xas, entry));
+	rcu_read_unlock();
+
+	return entry;
+}
+EXPORT_SYMBOL(xa_load);
+
+#ifdef XA_DEBUG
+void xa_dump_entry(void *entry, unsigned long index)
+{
+	if (!entry)
+		return;
+
+	if (xa_is_value(entry))
+		printk("%lu: value %#lx\n", index, xa_to_value(entry));
+	else if (!xa_is_internal(entry))
+		printk("%lu: %p\n", index, entry);
+	else if (xa_is_node(entry)) {
+		unsigned long i;
+		struct xa_node *node = xa_to_node(entry);
+		printk("node %p %s %d parent %p shift %d count %d "
+			"exceptional %d tags %lx %lx %lx indices %lu-%lu\n",
+			node, node->parent ? "offset" : "max", node->offset,
+			node->parent, node->shift, node->count,
+			node->exceptional,
+			node->tags[0][0], node->tags[1][0], node->tags[2][0],
+			index, index |
+			(((unsigned long)XA_CHUNK_SIZE << node->shift) - 1));
+		for (i = 0; i < XA_CHUNK_SIZE; i++)
+			xa_dump_entry(node->slots[i],
+					index + (i << node->shift));
+	} else if (xa_is_retry(entry))
+		printk("%lu: retry (%ld)\n", index, xa_to_internal(entry));
+	else if (xa_is_sibling(entry))
+		printk("%lu: sibling (%ld)\n", index, xa_to_sibling(entry));
+	else
+		printk("%lu: UNKNOWN ENTRY (%p)\n", index, entry);
+}
+
+void xa_dump_node(const struct xa_node *node)
+{
+	printk("xadn: node %p %s %d parent %p shift %d count %d "
+		"exceptional %d array %p list %p %p tags %lx %lx %lx\n",
+		node, node->parent ? "offset" : "max", node->offset,
+		node->parent, node->shift, node->count,
+		node->exceptional, node->root, node->private_list.prev,
+		node->private_list.next,
+		node->tags[0][0], node->tags[1][0], node->tags[2][0]);
+}
+
+void xa_dump(const struct xarray *xa)
+{
+	printk("xarray: %p %x %p\n", xa, xa->xa_flags, xa->xa_head);
+	xa_dump_entry(xa->xa_head, 0);
+}
+#endif
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index 8d4df7a72a8e..833136896b91 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -5,3 +5,4 @@ main
 multiorder
 radix-tree.c
 xarray.c
+xarray-test
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 3868bc189199..749ef734a87c 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -3,10 +3,11 @@
 CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
 LDFLAGS += -fsanitize=address
 LDLIBS+= -lpthread -lurcu
-TARGETS = main idr-test multiorder
+TARGETS = main idr-test multiorder xarray-test
 CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o
 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
-	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
+	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o \
+	 xarray-test.o
 
 ifndef SHIFT
 	SHIFT=3
@@ -23,6 +24,8 @@ main:	$(OFILES)
 
 idr-test: idr-test.o $(CORE_OFILES)
 
+xarray-test: idr-test.o $(CORE_OFILES)
+
 multiorder: multiorder.o $(CORE_OFILES)
 
 clean:
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h
index 40c9671ee365..36fb716d5557 100644
--- a/tools/testing/radix-tree/linux/radix-tree.h
+++ b/tools/testing/radix-tree/linux/radix-tree.h
@@ -5,7 +5,6 @@
 #include "generated/map-shift.h"
 #include "linux/bug.h"
 #include "../../../../include/linux/radix-tree.h"
-#include <linux/xarray.h>
 
 extern int kmalloc_verbose;
 extern int test_verbose;
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/radix-tree/linux/rcupdate.h
index 73ed33658203..25010bf86c1d 100644
--- a/tools/testing/radix-tree/linux/rcupdate.h
+++ b/tools/testing/radix-tree/linux/rcupdate.h
@@ -6,5 +6,6 @@
 
 #define rcu_dereference_raw(p) rcu_dereference(p)
 #define rcu_dereference_protected(p, cond) rcu_dereference(p)
+#define rcu_dereference_check(p, cond) rcu_dereference(p)
 
 #endif
diff --git a/tools/testing/radix-tree/linux/xarray.h b/tools/testing/radix-tree/linux/xarray.h
index df3812cda376..3eaf9596c2a6 100644
--- a/tools/testing/radix-tree/linux/xarray.h
+++ b/tools/testing/radix-tree/linux/xarray.h
@@ -1,2 +1,3 @@
 #include "generated/map-shift.h"
+#define XA_DEBUG
 #include "../../../../include/linux/xarray.h"
diff --git a/tools/testing/radix-tree/xarray-test.c b/tools/testing/radix-tree/xarray-test.c
new file mode 100644
index 000000000000..3f8f19cb3739
--- /dev/null
+++ b/tools/testing/radix-tree/xarray-test.c
@@ -0,0 +1,56 @@
+/*
+ * xarray-test.c: Test the XArray API
+ * Copyright (c) 2017 Microsoft Corporation <mawilcox@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/bitmap.h>
+#include <linux/xarray.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "test.h"
+
+void check_xa_load(struct xarray *xa)
+{
+	unsigned long i, j;
+
+	for (i = 0; i < 1024; i++) {
+		for (j = 0; j < 1024; j++) {
+			void *entry = xa_load(xa, j);
+			if (j < i)
+				assert(xa_to_value(entry) == j);
+			else
+				assert(!entry);
+		}
+		radix_tree_insert(xa, i, xa_mk_value(i));
+	}
+}
+
+void xarray_checks(void)
+{
+	RADIX_TREE(array, GFP_KERNEL);
+
+	check_xa_load(&array);
+
+	item_kill_tree(&array);
+}
+
+int __weak main(void)
+{
+	radix_tree_init();
+	xarray_checks();
+	radix_tree_cpu_dead(1);
+	rcu_barrier();
+	if (nr_allocated)
+		printf("nr_allocated = %d\n", nr_allocated);
+	return 0;
+}
-- 
2.15.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 09/73] xarray: Add xa_load
Date: Tue,  5 Dec 2017 16:40:55 -0800	[thread overview]
Message-ID: <20171206004159.3755-10-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

This first function in the XArray API brings with it a lot of support
infrastructure.  The advanced API is based around the xa_state which is
a more capable version of the radix_tree_iter.

As the test-suite demonstrates, it is possible to use the xarray and
radix tree APIs on the same data structure.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h                      | 235 ++++++++++++++++++++++++++++
 lib/radix-tree.c                            |  43 -----
 lib/xarray.c                                | 160 +++++++++++++++++++
 tools/testing/radix-tree/.gitignore         |   1 +
 tools/testing/radix-tree/Makefile           |   7 +-
 tools/testing/radix-tree/linux/radix-tree.h |   1 -
 tools/testing/radix-tree/linux/rcupdate.h   |   1 +
 tools/testing/radix-tree/linux/xarray.h     |   1 +
 tools/testing/radix-tree/xarray-test.c      |  56 +++++++
 9 files changed, 459 insertions(+), 46 deletions(-)
 create mode 100644 tools/testing/radix-tree/xarray-test.c

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 1aff0069458b..af52ba75e6a3 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -21,6 +21,8 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
@@ -67,6 +69,8 @@ static inline void xa_init(struct xarray *xa)
 	__xa_init(xa, 0);
 }
 
+void *xa_load(struct xarray *, unsigned long index);
+
 #define BITS_PER_XA_VALUE	(BITS_PER_LONG - 1)
 
 /**
@@ -158,6 +162,46 @@ struct xa_node {
 	unsigned long	tags[XA_MAX_TAGS][XA_TAG_LONGS];
 };
 
+#ifdef XA_DEBUG
+void xa_dump(const struct xarray *);
+void xa_dump_node(const struct xa_node *);
+#define XA_BUG_ON(node, x) do { \
+		if ((x) && (node)) \
+			xa_dump_node(node); \
+		BUG_ON(x); \
+	} while (0)
+#else
+#define XA_BUG_ON(node, x)	do { } while (0)
+#endif
+
+/* Private */
+static inline void *xa_head(struct xarray *xa)
+{
+	return rcu_dereference_check(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_head_locked(struct xarray *xa)
+{
+	return rcu_dereference_protected(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_check(node->slots[offset], xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry_locked(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_protected(node->slots[offset], xa_lock_held(xa));
+}
+
 /*
  * Internal entries have the bottom two bits set to the value 10b.  Most
  * internal entries are pointers to the next node in the tree.  Since the
@@ -189,6 +233,12 @@ static inline bool xa_is_internal(void *entry)
 	return ((unsigned long)entry & 3) == 2;
 }
 
+/* Private */
+static inline struct xa_node *xa_to_node(void *entry)
+{
+	return (struct xa_node *)((unsigned long)entry & ~3UL);
+}
+
 /* Private */
 static inline bool xa_is_node(void *entry)
 {
@@ -222,4 +272,189 @@ static inline bool xa_is_sibling(void *entry)
 
 #define XA_RETRY_ENTRY		xa_mk_internal(256)
 
+/**
+ * xa_is_retry() - Is the entry a retry entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a retry entry.
+ */
+static inline bool xa_is_retry(void *entry)
+{
+	return unlikely(entry == XA_RETRY_ENTRY);
+}
+
+/**
+ * typedef xa_update_node_t - A callback function from the XArray.
+ * @node: The node which is being processed
+ *
+ * This function is called every time the XArray updates the count of
+ * present and value entries in a node.  It allows advanced users to
+ * maintain the private_list in the node.
+ */
+typedef void (*xa_update_node_t)(struct xa_node *node);
+
+/*
+ * The xa_state is opaque to its users.  It contains various different pieces
+ * of state involved in the current operation on the XArray.  It should be
+ * declared on the stack and passed between the various internal routines.
+ * The various elements in it should not be accessed directly, but only
+ * through the provided accessor functions.  The below documentation is for
+ * the benefit of those working on the code, not for users of the XArray.
+ *
+ * @xa_node usually points to the xa_node containing the slot we're operating
+ * on (and @xa_offset is the offset in the slots array).  If there is a
+ * single entry in the array at index 0, there are no allocated xa_nodes to
+ * point to, and so we store %NULL in @xa_node.  @xa_node is set to
+ * the value %XAS_RESTART if the xa_state is not walked to the correct
+ * position in the tree of nodes for this operation.  If an error occurs
+ * during an operation, it is set to an %XAS_ERROR value.  If we run off the
+ * end of the allocated nodes, it is set to %XAS_BOUNDS.
+ */
+struct xa_state {
+	struct xarray *xa;
+	unsigned long xa_index;
+	unsigned char xa_shift;
+	unsigned char xa_sibs;
+	unsigned char xa_offset;
+	unsigned char xa_pad;		/* Helps gcc generate better code */
+	struct xa_node *xa_node;
+	struct xa_node *xa_alloc;
+	xa_update_node_t xa_update;
+};
+
+/*
+ * We encode errnos in the xas->xa_node.  If an error has happened, we need to
+ * drop the lock to fix it, and once we've done so the xa_state is invalid.
+ */
+#define XAS_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 1) | 1))
+#define XAS_RESTART	 XAS_ERROR(0)
+#define XAS_BOUNDS	 ((struct xa_node *)2UL)
+
+#define __XA_STATE(array, index)  {			\
+	.xa = array,					\
+	.xa_index = index,				\
+	.xa_shift = 0,					\
+	.xa_sibs = 0,					\
+	.xa_offset = 0,					\
+	.xa_pad = 0,					\
+	.xa_node = XAS_RESTART,				\
+	.xa_alloc = NULL,				\
+	.xa_update = NULL				\
+}
+
+/**
+ * XA_STATE() - Declare an XArray operation state.
+ * @name: Name of this operation state (usually xas).
+ * @index: Initial index of interest.
+ *
+ * Declare and initialise an xa_state on the stack.
+ */
+#define XA_STATE(name, array, index)			\
+	struct xa_state name = __XA_STATE(array, index)
+
+#define xas_tagged(xas, tag)	xa_tagged((xas)->xa, (tag))
+#define xas_trylock(xas)	xa_trylock((xas)->xa)
+#define xas_lock(xas)		xa_lock((xas)->xa)
+#define xas_unlock(xas)		xa_unlock((xas)->xa)
+#define xas_lock_bh(xas)	xa_lock_bh((xas)->xa)
+#define xas_unlock_bh(xas)	xa_unlock_bh((xas)->xa)
+#define xas_lock_irq(xas)	xa_lock_irq((xas)->xa)
+#define xas_unlock_irq(xas)	xa_unlock_irq((xas)->xa)
+#define xas_lock_irqsave(xas, flags) \
+				xa_lock_irqsave((xas)->xa, flags)
+#define xas_unlock_irqrestore(xas, flags) \
+				xa_unlock_irqrestore((xas)->xa, flags)
+
+/**
+ * xas_error() - Return an errno stored in the xa_state.
+ * @xas: XArray operation state.
+ *
+ * Return: 0 if no error has been noted.  A negative errno if one has.
+ */
+static inline int xas_error(const struct xa_state *xas)
+{
+	unsigned long v = (unsigned long)xas->xa_node;
+	return (v & 1) ? -(v >> 1) : 0;
+}
+
+/**
+ * xas_set_err() - Note an error in the xa_state.
+ * @xas: XArray operation state.
+ * @err: Negative error number.
+ *
+ * You can call this function with @err set to 0 to take the xa_state
+ * out of the error state.  The next operation will walk it to the correct
+ * location.
+ */
+static inline void xas_set_err(struct xa_state *xas, long err)
+{
+	xas->xa_node = XAS_ERROR(-err);
+}
+
+/**
+ * xas_invalid() - Is the xas in a retry or error state?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas cannot be used for operations.
+ */
+static inline bool xas_invalid(const struct xa_state *xas)
+{
+	return (unsigned long)xas->xa_node & 3;
+}
+
+/**
+ * xas_valid() - Is the xas a valid cursor into the array?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas can be used for operations.
+ */
+static inline bool xas_valid(const struct xa_state *xas)
+{
+	return !xas_invalid(xas);
+}
+
+/**
+ * xas_retry() - Handle a retry entry.
+ * @xas: XArray operation state.
+ * @entry: Entry from xarray.
+ *
+ * An RCU-protected read may see a retry entry as a side-effect of a
+ * simultaneous modification.  This function sets up the @xas to retry
+ * the walk from the head of the array.
+ *
+ * Return: true if the operation needs to be retried.
+ */
+static inline bool xas_retry(struct xa_state *xas, void *entry)
+{
+	if (!xa_is_retry(entry))
+		return false;
+	xas->xa_node = XAS_RESTART;
+	return true;
+}
+
+void *xas_load(struct xa_state *);
+
+/**
+ * xas_reload() - Refetch an entry from the xarray.
+ * @xas: XArray operation state.
+ *
+ * Use this function to check that a previously loaded entry still has
+ * the same value.  This is useful for the lockless pagecache lookup where
+ * we walk the array with only the RCU lock to protect us, lock the page,
+ * then check that the page hasn't moved since we looked it up.
+ *
+ * The caller guarantees that @xas is still valid.  If it may be in an
+ * error or restart state, call xas_load() instead.
+ *
+ * Return: The entry at this location in the xarray.
+ */
+static inline void *xas_reload(struct xa_state *xas)
+{
+	struct xa_node *node = xas->xa_node;
+
+	if (node)
+		return xa_entry(xas->xa, node, xas->xa_offset);
+	return xa_head(xas->xa);
+}
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 930eb7d298d7..a919c60b10a4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -255,49 +255,6 @@ static unsigned long next_index(unsigned long index,
 }
 
 #ifndef __KERNEL__
-static void dump_node(struct radix_tree_node *node, unsigned long index)
-{
-	unsigned long i;
-
-	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
-		node, node->offset, index, index | node_maxindex(node),
-		node->parent,
-		node->tags[0][0], node->tags[1][0], node->tags[2][0],
-		node->shift, node->count, node->exceptional);
-
-	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
-		unsigned long first = index | (i << node->shift);
-		unsigned long last = first | ((1UL << node->shift) - 1);
-		void *entry = node->slots[i];
-		if (!entry)
-			continue;
-		if (entry == RADIX_TREE_RETRY) {
-			pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
-					i, first, last, node);
-		} else if (!radix_tree_is_internal_node(entry)) {
-			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
-					entry, i, first, last, node);
-		} else if (xa_is_sibling(entry)) {
-			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
-					entry, i, first, last, node,
-					node->slots[xa_to_sibling(entry)]);
-		} else {
-			dump_node(entry_to_node(entry), first);
-		}
-	}
-}
-
-/* For debug */
-static void radix_tree_dump(struct radix_tree_root *root)
-{
-	pr_debug("radix root: %p xa_head %p tags %x\n",
-			root, root->xa_head,
-			root->xa_flags >> ROOT_TAG_SHIFT);
-	if (!radix_tree_is_internal_node(root->xa_head))
-		return;
-	dump_node(entry_to_node(root->xa_head), 0);
-}
-
 static void dump_ida_node(void *entry, unsigned long index)
 {
 	unsigned long i;
diff --git a/lib/xarray.c b/lib/xarray.c
index 67ddcb3e630c..2f77e4c5d0b8 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -31,6 +31,94 @@
  * @entry refers to something stored in a slot in the xarray
  */
 
+/* extracts the offset within this node from the index */
+static unsigned int get_offset(unsigned long index, struct xa_node *node)
+{
+	return (index >> node->shift) & XA_CHUNK_MASK;
+}
+
+static void *set_bounds(struct xa_state *xas)
+{
+	xas->xa_node = XAS_BOUNDS;
+	return NULL;
+}
+
+/*
+ * Starts a walk.  If the @xas is already valid, we assume that it's on
+ * the right path and just return where we've got to.  If we're in an
+ * error state, return NULL.  If the index is outside the current scope
+ * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
+ * set @xas->xa_node to NULL and return the current head of the array.
+ */
+static void *xas_start(struct xa_state *xas)
+{
+	void *entry;
+
+	if (xas_valid(xas))
+		return xas_reload(xas);
+	if (xas_error(xas))
+		return NULL;
+
+	entry = xa_head(xas->xa);
+	if (!xa_is_node(entry)) {
+		if (xas->xa_index)
+			return set_bounds(xas);
+	} else {
+		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
+			return set_bounds(xas);
+	}
+
+	xas->xa_node = NULL;
+	return entry;
+}
+
+static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+{
+	unsigned int offset = get_offset(xas->xa_index, node);
+	void *entry = xa_entry(xas->xa, node, offset);
+
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = xa_entry(xas->xa, node, offset);
+		/* Move xa_index to the first index of this entry */
+		xas->xa_index = (((xas->xa_index >> node->shift) &
+				  ~XA_CHUNK_MASK) | offset) << node->shift;
+	}
+
+	xas->xa_node = node;
+	xas->xa_offset = offset;
+	return entry;
+}
+
+/**
+ * xas_load() - Load an entry from the XArray (advanced).
+ * @xas: XArray operation state.
+ *
+ * Usually walks the @xas to the appropriate state to load the entry stored
+ * at xa_index.  However, it will do nothing and return NULL  if @xas is
+ * holding an error.  If the xa_shift indicates we're operating on a
+ * multislot entry, it will terminate early and potentially return an
+ * internal entry.  xas_load() will never expand the tree (see xas_create()).
+ *
+ * The caller should hold the xa_lock or the RCU lock.
+ *
+ * Return: Usually an entry in the XArray, but see description for exceptions.
+ */
+void *xas_load(struct xa_state *xas)
+{
+	void *entry = xas_start(xas);
+
+	while (xa_is_node(entry)) {
+		struct xa_node *node = xa_to_node(entry);
+
+		if (xas->xa_shift > node->shift)
+			break;
+		entry = xas_descend(xas, node);
+	}
+	return entry;
+}
+EXPORT_SYMBOL_GPL(xas_load);
+
 /**
  * __xa_init() - Initialise an empty XArray.
  * @xa: XArray.
@@ -45,3 +133,75 @@ void __xa_init(struct xarray *xa, gfp_t flags)
 	xa->xa_head = NULL;
 }
 EXPORT_SYMBOL(__xa_init);
+
+/**
+ * xa_load() - Load an entry from an XArray.
+ * @xa: XArray.
+ * @index: index into array.
+ *
+ * Return: The entry at @index in @xa.
+ */
+void *xa_load(struct xarray *xa, unsigned long index)
+{
+	XA_STATE(xas, xa, index);
+	void *entry;
+
+	rcu_read_lock();
+	do {
+		entry = xas_load(&xas);
+	} while (xas_retry(&xas, entry));
+	rcu_read_unlock();
+
+	return entry;
+}
+EXPORT_SYMBOL(xa_load);
+
+#ifdef XA_DEBUG
+void xa_dump_entry(void *entry, unsigned long index)
+{
+	if (!entry)
+		return;
+
+	if (xa_is_value(entry))
+		printk("%lu: value %#lx\n", index, xa_to_value(entry));
+	else if (!xa_is_internal(entry))
+		printk("%lu: %p\n", index, entry);
+	else if (xa_is_node(entry)) {
+		unsigned long i;
+		struct xa_node *node = xa_to_node(entry);
+		printk("node %p %s %d parent %p shift %d count %d "
+			"exceptional %d tags %lx %lx %lx indices %lu-%lu\n",
+			node, node->parent ? "offset" : "max", node->offset,
+			node->parent, node->shift, node->count,
+			node->exceptional,
+			node->tags[0][0], node->tags[1][0], node->tags[2][0],
+			index, index |
+			(((unsigned long)XA_CHUNK_SIZE << node->shift) - 1));
+		for (i = 0; i < XA_CHUNK_SIZE; i++)
+			xa_dump_entry(node->slots[i],
+					index + (i << node->shift));
+	} else if (xa_is_retry(entry))
+		printk("%lu: retry (%ld)\n", index, xa_to_internal(entry));
+	else if (xa_is_sibling(entry))
+		printk("%lu: sibling (%ld)\n", index, xa_to_sibling(entry));
+	else
+		printk("%lu: UNKNOWN ENTRY (%p)\n", index, entry);
+}
+
+void xa_dump_node(const struct xa_node *node)
+{
+	printk("xadn: node %p %s %d parent %p shift %d count %d "
+		"exceptional %d array %p list %p %p tags %lx %lx %lx\n",
+		node, node->parent ? "offset" : "max", node->offset,
+		node->parent, node->shift, node->count,
+		node->exceptional, node->root, node->private_list.prev,
+		node->private_list.next,
+		node->tags[0][0], node->tags[1][0], node->tags[2][0]);
+}
+
+void xa_dump(const struct xarray *xa)
+{
+	printk("xarray: %p %x %p\n", xa, xa->xa_flags, xa->xa_head);
+	xa_dump_entry(xa->xa_head, 0);
+}
+#endif
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index 8d4df7a72a8e..833136896b91 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -5,3 +5,4 @@ main
 multiorder
 radix-tree.c
 xarray.c
+xarray-test
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 3868bc189199..749ef734a87c 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -3,10 +3,11 @@
 CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
 LDFLAGS += -fsanitize=address
 LDLIBS+= -lpthread -lurcu
-TARGETS = main idr-test multiorder
+TARGETS = main idr-test multiorder xarray-test
 CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o
 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
-	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
+	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o \
+	 xarray-test.o
 
 ifndef SHIFT
 	SHIFT=3
@@ -23,6 +24,8 @@ main:	$(OFILES)
 
 idr-test: idr-test.o $(CORE_OFILES)
 
+xarray-test: idr-test.o $(CORE_OFILES)
+
 multiorder: multiorder.o $(CORE_OFILES)
 
 clean:
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h
index 40c9671ee365..36fb716d5557 100644
--- a/tools/testing/radix-tree/linux/radix-tree.h
+++ b/tools/testing/radix-tree/linux/radix-tree.h
@@ -5,7 +5,6 @@
 #include "generated/map-shift.h"
 #include "linux/bug.h"
 #include "../../../../include/linux/radix-tree.h"
-#include <linux/xarray.h>
 
 extern int kmalloc_verbose;
 extern int test_verbose;
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/radix-tree/linux/rcupdate.h
index 73ed33658203..25010bf86c1d 100644
--- a/tools/testing/radix-tree/linux/rcupdate.h
+++ b/tools/testing/radix-tree/linux/rcupdate.h
@@ -6,5 +6,6 @@
 
 #define rcu_dereference_raw(p) rcu_dereference(p)
 #define rcu_dereference_protected(p, cond) rcu_dereference(p)
+#define rcu_dereference_check(p, cond) rcu_dereference(p)
 
 #endif
diff --git a/tools/testing/radix-tree/linux/xarray.h b/tools/testing/radix-tree/linux/xarray.h
index df3812cda376..3eaf9596c2a6 100644
--- a/tools/testing/radix-tree/linux/xarray.h
+++ b/tools/testing/radix-tree/linux/xarray.h
@@ -1,2 +1,3 @@
 #include "generated/map-shift.h"
+#define XA_DEBUG
 #include "../../../../include/linux/xarray.h"
diff --git a/tools/testing/radix-tree/xarray-test.c b/tools/testing/radix-tree/xarray-test.c
new file mode 100644
index 000000000000..3f8f19cb3739
--- /dev/null
+++ b/tools/testing/radix-tree/xarray-test.c
@@ -0,0 +1,56 @@
+/*
+ * xarray-test.c: Test the XArray API
+ * Copyright (c) 2017 Microsoft Corporation <mawilcox@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/bitmap.h>
+#include <linux/xarray.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "test.h"
+
+void check_xa_load(struct xarray *xa)
+{
+	unsigned long i, j;
+
+	for (i = 0; i < 1024; i++) {
+		for (j = 0; j < 1024; j++) {
+			void *entry = xa_load(xa, j);
+			if (j < i)
+				assert(xa_to_value(entry) == j);
+			else
+				assert(!entry);
+		}
+		radix_tree_insert(xa, i, xa_mk_value(i));
+	}
+}
+
+void xarray_checks(void)
+{
+	RADIX_TREE(array, GFP_KERNEL);
+
+	check_xa_load(&array);
+
+	item_kill_tree(&array);
+}
+
+int __weak main(void)
+{
+	radix_tree_init();
+	xarray_checks();
+	radix_tree_cpu_dead(1);
+	rcu_barrier();
+	if (nr_allocated)
+		printf("nr_allocated = %d\n", nr_allocated);
+	return 0;
+}
-- 
2.15.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 09/73] xarray: Add xa_load
Date: Tue,  5 Dec 2017 16:40:55 -0800	[thread overview]
Message-ID: <20171206004159.3755-10-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

This first function in the XArray API brings with it a lot of support
infrastructure.  The advanced API is based around the xa_state which is
a more capable version of the radix_tree_iter.

As the test-suite demonstrates, it is possible to use the xarray and
radix tree APIs on the same data structure.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h                      | 235 ++++++++++++++++++++++++++++
 lib/radix-tree.c                            |  43 -----
 lib/xarray.c                                | 160 +++++++++++++++++++
 tools/testing/radix-tree/.gitignore         |   1 +
 tools/testing/radix-tree/Makefile           |   7 +-
 tools/testing/radix-tree/linux/radix-tree.h |   1 -
 tools/testing/radix-tree/linux/rcupdate.h   |   1 +
 tools/testing/radix-tree/linux/xarray.h     |   1 +
 tools/testing/radix-tree/xarray-test.c      |  56 +++++++
 9 files changed, 459 insertions(+), 46 deletions(-)
 create mode 100644 tools/testing/radix-tree/xarray-test.c

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 1aff0069458b..af52ba75e6a3 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -21,6 +21,8 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
@@ -67,6 +69,8 @@ static inline void xa_init(struct xarray *xa)
 	__xa_init(xa, 0);
 }
 
+void *xa_load(struct xarray *, unsigned long index);
+
 #define BITS_PER_XA_VALUE	(BITS_PER_LONG - 1)
 
 /**
@@ -158,6 +162,46 @@ struct xa_node {
 	unsigned long	tags[XA_MAX_TAGS][XA_TAG_LONGS];
 };
 
+#ifdef XA_DEBUG
+void xa_dump(const struct xarray *);
+void xa_dump_node(const struct xa_node *);
+#define XA_BUG_ON(node, x) do { \
+		if ((x) && (node)) \
+			xa_dump_node(node); \
+		BUG_ON(x); \
+	} while (0)
+#else
+#define XA_BUG_ON(node, x)	do { } while (0)
+#endif
+
+/* Private */
+static inline void *xa_head(struct xarray *xa)
+{
+	return rcu_dereference_check(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_head_locked(struct xarray *xa)
+{
+	return rcu_dereference_protected(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_check(node->slots[offset], xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry_locked(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_protected(node->slots[offset], xa_lock_held(xa));
+}
+
 /*
  * Internal entries have the bottom two bits set to the value 10b.  Most
  * internal entries are pointers to the next node in the tree.  Since the
@@ -189,6 +233,12 @@ static inline bool xa_is_internal(void *entry)
 	return ((unsigned long)entry & 3) == 2;
 }
 
+/* Private */
+static inline struct xa_node *xa_to_node(void *entry)
+{
+	return (struct xa_node *)((unsigned long)entry & ~3UL);
+}
+
 /* Private */
 static inline bool xa_is_node(void *entry)
 {
@@ -222,4 +272,189 @@ static inline bool xa_is_sibling(void *entry)
 
 #define XA_RETRY_ENTRY		xa_mk_internal(256)
 
+/**
+ * xa_is_retry() - Is the entry a retry entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a retry entry.
+ */
+static inline bool xa_is_retry(void *entry)
+{
+	return unlikely(entry == XA_RETRY_ENTRY);
+}
+
+/**
+ * typedef xa_update_node_t - A callback function from the XArray.
+ * @node: The node which is being processed
+ *
+ * This function is called every time the XArray updates the count of
+ * present and value entries in a node.  It allows advanced users to
+ * maintain the private_list in the node.
+ */
+typedef void (*xa_update_node_t)(struct xa_node *node);
+
+/*
+ * The xa_state is opaque to its users.  It contains various different pieces
+ * of state involved in the current operation on the XArray.  It should be
+ * declared on the stack and passed between the various internal routines.
+ * The various elements in it should not be accessed directly, but only
+ * through the provided accessor functions.  The below documentation is for
+ * the benefit of those working on the code, not for users of the XArray.
+ *
+ * @xa_node usually points to the xa_node containing the slot we're operating
+ * on (and @xa_offset is the offset in the slots array).  If there is a
+ * single entry in the array at index 0, there are no allocated xa_nodes to
+ * point to, and so we store %NULL in @xa_node.  @xa_node is set to
+ * the value %XAS_RESTART if the xa_state is not walked to the correct
+ * position in the tree of nodes for this operation.  If an error occurs
+ * during an operation, it is set to an %XAS_ERROR value.  If we run off the
+ * end of the allocated nodes, it is set to %XAS_BOUNDS.
+ */
+struct xa_state {
+	struct xarray *xa;
+	unsigned long xa_index;
+	unsigned char xa_shift;
+	unsigned char xa_sibs;
+	unsigned char xa_offset;
+	unsigned char xa_pad;		/* Helps gcc generate better code */
+	struct xa_node *xa_node;
+	struct xa_node *xa_alloc;
+	xa_update_node_t xa_update;
+};
+
+/*
+ * We encode errnos in the xas->xa_node.  If an error has happened, we need to
+ * drop the lock to fix it, and once we've done so the xa_state is invalid.
+ */
+#define XAS_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 1) | 1))
+#define XAS_RESTART	 XAS_ERROR(0)
+#define XAS_BOUNDS	 ((struct xa_node *)2UL)
+
+#define __XA_STATE(array, index)  {			\
+	.xa = array,					\
+	.xa_index = index,				\
+	.xa_shift = 0,					\
+	.xa_sibs = 0,					\
+	.xa_offset = 0,					\
+	.xa_pad = 0,					\
+	.xa_node = XAS_RESTART,				\
+	.xa_alloc = NULL,				\
+	.xa_update = NULL				\
+}
+
+/**
+ * XA_STATE() - Declare an XArray operation state.
+ * @name: Name of this operation state (usually xas).
+ * @index: Initial index of interest.
+ *
+ * Declare and initialise an xa_state on the stack.
+ */
+#define XA_STATE(name, array, index)			\
+	struct xa_state name = __XA_STATE(array, index)
+
+#define xas_tagged(xas, tag)	xa_tagged((xas)->xa, (tag))
+#define xas_trylock(xas)	xa_trylock((xas)->xa)
+#define xas_lock(xas)		xa_lock((xas)->xa)
+#define xas_unlock(xas)		xa_unlock((xas)->xa)
+#define xas_lock_bh(xas)	xa_lock_bh((xas)->xa)
+#define xas_unlock_bh(xas)	xa_unlock_bh((xas)->xa)
+#define xas_lock_irq(xas)	xa_lock_irq((xas)->xa)
+#define xas_unlock_irq(xas)	xa_unlock_irq((xas)->xa)
+#define xas_lock_irqsave(xas, flags) \
+				xa_lock_irqsave((xas)->xa, flags)
+#define xas_unlock_irqrestore(xas, flags) \
+				xa_unlock_irqrestore((xas)->xa, flags)
+
+/**
+ * xas_error() - Return an errno stored in the xa_state.
+ * @xas: XArray operation state.
+ *
+ * Return: 0 if no error has been noted.  A negative errno if one has.
+ */
+static inline int xas_error(const struct xa_state *xas)
+{
+	unsigned long v = (unsigned long)xas->xa_node;
+	return (v & 1) ? -(v >> 1) : 0;
+}
+
+/**
+ * xas_set_err() - Note an error in the xa_state.
+ * @xas: XArray operation state.
+ * @err: Negative error number.
+ *
+ * You can call this function with @err set to 0 to take the xa_state
+ * out of the error state.  The next operation will walk it to the correct
+ * location.
+ */
+static inline void xas_set_err(struct xa_state *xas, long err)
+{
+	xas->xa_node = XAS_ERROR(-err);
+}
+
+/**
+ * xas_invalid() - Is the xas in a retry or error state?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas cannot be used for operations.
+ */
+static inline bool xas_invalid(const struct xa_state *xas)
+{
+	return (unsigned long)xas->xa_node & 3;
+}
+
+/**
+ * xas_valid() - Is the xas a valid cursor into the array?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas can be used for operations.
+ */
+static inline bool xas_valid(const struct xa_state *xas)
+{
+	return !xas_invalid(xas);
+}
+
+/**
+ * xas_retry() - Handle a retry entry.
+ * @xas: XArray operation state.
+ * @entry: Entry from xarray.
+ *
+ * An RCU-protected read may see a retry entry as a side-effect of a
+ * simultaneous modification.  This function sets up the @xas to retry
+ * the walk from the head of the array.
+ *
+ * Return: true if the operation needs to be retried.
+ */
+static inline bool xas_retry(struct xa_state *xas, void *entry)
+{
+	if (!xa_is_retry(entry))
+		return false;
+	xas->xa_node = XAS_RESTART;
+	return true;
+}
+
+void *xas_load(struct xa_state *);
+
+/**
+ * xas_reload() - Refetch an entry from the xarray.
+ * @xas: XArray operation state.
+ *
+ * Use this function to check that a previously loaded entry still has
+ * the same value.  This is useful for the lockless pagecache lookup where
+ * we walk the array with only the RCU lock to protect us, lock the page,
+ * then check that the page hasn't moved since we looked it up.
+ *
+ * The caller guarantees that @xas is still valid.  If it may be in an
+ * error or restart state, call xas_load() instead.
+ *
+ * Return: The entry at this location in the xarray.
+ */
+static inline void *xas_reload(struct xa_state *xas)
+{
+	struct xa_node *node = xas->xa_node;
+
+	if (node)
+		return xa_entry(xas->xa, node, xas->xa_offset);
+	return xa_head(xas->xa);
+}
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 930eb7d298d7..a919c60b10a4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -255,49 +255,6 @@ static unsigned long next_index(unsigned long index,
 }
 
 #ifndef __KERNEL__
-static void dump_node(struct radix_tree_node *node, unsigned long index)
-{
-	unsigned long i;
-
-	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
-		node, node->offset, index, index | node_maxindex(node),
-		node->parent,
-		node->tags[0][0], node->tags[1][0], node->tags[2][0],
-		node->shift, node->count, node->exceptional);
-
-	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
-		unsigned long first = index | (i << node->shift);
-		unsigned long last = first | ((1UL << node->shift) - 1);
-		void *entry = node->slots[i];
-		if (!entry)
-			continue;
-		if (entry == RADIX_TREE_RETRY) {
-			pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
-					i, first, last, node);
-		} else if (!radix_tree_is_internal_node(entry)) {
-			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
-					entry, i, first, last, node);
-		} else if (xa_is_sibling(entry)) {
-			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
-					entry, i, first, last, node,
-					node->slots[xa_to_sibling(entry)]);
-		} else {
-			dump_node(entry_to_node(entry), first);
-		}
-	}
-}
-
-/* For debug */
-static void radix_tree_dump(struct radix_tree_root *root)
-{
-	pr_debug("radix root: %p xa_head %p tags %x\n",
-			root, root->xa_head,
-			root->xa_flags >> ROOT_TAG_SHIFT);
-	if (!radix_tree_is_internal_node(root->xa_head))
-		return;
-	dump_node(entry_to_node(root->xa_head), 0);
-}
-
 static void dump_ida_node(void *entry, unsigned long index)
 {
 	unsigned long i;
diff --git a/lib/xarray.c b/lib/xarray.c
index 67ddcb3e630c..2f77e4c5d0b8 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -31,6 +31,94 @@
  * @entry refers to something stored in a slot in the xarray
  */
 
+/* extracts the offset within this node from the index */
+static unsigned int get_offset(unsigned long index, struct xa_node *node)
+{
+	return (index >> node->shift) & XA_CHUNK_MASK;
+}
+
+static void *set_bounds(struct xa_state *xas)
+{
+	xas->xa_node = XAS_BOUNDS;
+	return NULL;
+}
+
+/*
+ * Starts a walk.  If the @xas is already valid, we assume that it's on
+ * the right path and just return where we've got to.  If we're in an
+ * error state, return NULL.  If the index is outside the current scope
+ * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
+ * set @xas->xa_node to NULL and return the current head of the array.
+ */
+static void *xas_start(struct xa_state *xas)
+{
+	void *entry;
+
+	if (xas_valid(xas))
+		return xas_reload(xas);
+	if (xas_error(xas))
+		return NULL;
+
+	entry = xa_head(xas->xa);
+	if (!xa_is_node(entry)) {
+		if (xas->xa_index)
+			return set_bounds(xas);
+	} else {
+		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
+			return set_bounds(xas);
+	}
+
+	xas->xa_node = NULL;
+	return entry;
+}
+
+static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+{
+	unsigned int offset = get_offset(xas->xa_index, node);
+	void *entry = xa_entry(xas->xa, node, offset);
+
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = xa_entry(xas->xa, node, offset);
+		/* Move xa_index to the first index of this entry */
+		xas->xa_index = (((xas->xa_index >> node->shift) &
+				  ~XA_CHUNK_MASK) | offset) << node->shift;
+	}
+
+	xas->xa_node = node;
+	xas->xa_offset = offset;
+	return entry;
+}
+
+/**
+ * xas_load() - Load an entry from the XArray (advanced).
+ * @xas: XArray operation state.
+ *
+ * Usually walks the @xas to the appropriate state to load the entry stored
+ * at xa_index.  However, it will do nothing and return NULL  if @xas is
+ * holding an error.  If the xa_shift indicates we're operating on a
+ * multislot entry, it will terminate early and potentially return an
+ * internal entry.  xas_load() will never expand the tree (see xas_create()).
+ *
+ * The caller should hold the xa_lock or the RCU lock.
+ *
+ * Return: Usually an entry in the XArray, but see description for exceptions.
+ */
+void *xas_load(struct xa_state *xas)
+{
+	void *entry = xas_start(xas);
+
+	while (xa_is_node(entry)) {
+		struct xa_node *node = xa_to_node(entry);
+
+		if (xas->xa_shift > node->shift)
+			break;
+		entry = xas_descend(xas, node);
+	}
+	return entry;
+}
+EXPORT_SYMBOL_GPL(xas_load);
+
 /**
  * __xa_init() - Initialise an empty XArray.
  * @xa: XArray.
@@ -45,3 +133,75 @@ void __xa_init(struct xarray *xa, gfp_t flags)
 	xa->xa_head = NULL;
 }
 EXPORT_SYMBOL(__xa_init);
+
+/**
+ * xa_load() - Load an entry from an XArray.
+ * @xa: XArray.
+ * @index: index into array.
+ *
+ * Return: The entry at @index in @xa.
+ */
+void *xa_load(struct xarray *xa, unsigned long index)
+{
+	XA_STATE(xas, xa, index);
+	void *entry;
+
+	rcu_read_lock();
+	do {
+		entry = xas_load(&xas);
+	} while (xas_retry(&xas, entry));
+	rcu_read_unlock();
+
+	return entry;
+}
+EXPORT_SYMBOL(xa_load);
+
+#ifdef XA_DEBUG
+void xa_dump_entry(void *entry, unsigned long index)
+{
+	if (!entry)
+		return;
+
+	if (xa_is_value(entry))
+		printk("%lu: value %#lx\n", index, xa_to_value(entry));
+	else if (!xa_is_internal(entry))
+		printk("%lu: %p\n", index, entry);
+	else if (xa_is_node(entry)) {
+		unsigned long i;
+		struct xa_node *node = xa_to_node(entry);
+		printk("node %p %s %d parent %p shift %d count %d "
+			"exceptional %d tags %lx %lx %lx indices %lu-%lu\n",
+			node, node->parent ? "offset" : "max", node->offset,
+			node->parent, node->shift, node->count,
+			node->exceptional,
+			node->tags[0][0], node->tags[1][0], node->tags[2][0],
+			index, index |
+			(((unsigned long)XA_CHUNK_SIZE << node->shift) - 1));
+		for (i = 0; i < XA_CHUNK_SIZE; i++)
+			xa_dump_entry(node->slots[i],
+					index + (i << node->shift));
+	} else if (xa_is_retry(entry))
+		printk("%lu: retry (%ld)\n", index, xa_to_internal(entry));
+	else if (xa_is_sibling(entry))
+		printk("%lu: sibling (%ld)\n", index, xa_to_sibling(entry));
+	else
+		printk("%lu: UNKNOWN ENTRY (%p)\n", index, entry);
+}
+
+void xa_dump_node(const struct xa_node *node)
+{
+	printk("xadn: node %p %s %d parent %p shift %d count %d "
+		"exceptional %d array %p list %p %p tags %lx %lx %lx\n",
+		node, node->parent ? "offset" : "max", node->offset,
+		node->parent, node->shift, node->count,
+		node->exceptional, node->root, node->private_list.prev,
+		node->private_list.next,
+		node->tags[0][0], node->tags[1][0], node->tags[2][0]);
+}
+
+void xa_dump(const struct xarray *xa)
+{
+	printk("xarray: %p %x %p\n", xa, xa->xa_flags, xa->xa_head);
+	xa_dump_entry(xa->xa_head, 0);
+}
+#endif
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index 8d4df7a72a8e..833136896b91 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -5,3 +5,4 @@ main
 multiorder
 radix-tree.c
 xarray.c
+xarray-test
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 3868bc189199..749ef734a87c 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -3,10 +3,11 @@
 CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
 LDFLAGS += -fsanitize=address
 LDLIBS+= -lpthread -lurcu
-TARGETS = main idr-test multiorder
+TARGETS = main idr-test multiorder xarray-test
 CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o
 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
-	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
+	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o \
+	 xarray-test.o
 
 ifndef SHIFT
 	SHIFT=3
@@ -23,6 +24,8 @@ main:	$(OFILES)
 
 idr-test: idr-test.o $(CORE_OFILES)
 
+xarray-test: idr-test.o $(CORE_OFILES)
+
 multiorder: multiorder.o $(CORE_OFILES)
 
 clean:
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h
index 40c9671ee365..36fb716d5557 100644
--- a/tools/testing/radix-tree/linux/radix-tree.h
+++ b/tools/testing/radix-tree/linux/radix-tree.h
@@ -5,7 +5,6 @@
 #include "generated/map-shift.h"
 #include "linux/bug.h"
 #include "../../../../include/linux/radix-tree.h"
-#include <linux/xarray.h>
 
 extern int kmalloc_verbose;
 extern int test_verbose;
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/radix-tree/linux/rcupdate.h
index 73ed33658203..25010bf86c1d 100644
--- a/tools/testing/radix-tree/linux/rcupdate.h
+++ b/tools/testing/radix-tree/linux/rcupdate.h
@@ -6,5 +6,6 @@
 
 #define rcu_dereference_raw(p) rcu_dereference(p)
 #define rcu_dereference_protected(p, cond) rcu_dereference(p)
+#define rcu_dereference_check(p, cond) rcu_dereference(p)
 
 #endif
diff --git a/tools/testing/radix-tree/linux/xarray.h b/tools/testing/radix-tree/linux/xarray.h
index df3812cda376..3eaf9596c2a6 100644
--- a/tools/testing/radix-tree/linux/xarray.h
+++ b/tools/testing/radix-tree/linux/xarray.h
@@ -1,2 +1,3 @@
 #include "generated/map-shift.h"
+#define XA_DEBUG
 #include "../../../../include/linux/xarray.h"
diff --git a/tools/testing/radix-tree/xarray-test.c b/tools/testing/radix-tree/xarray-test.c
new file mode 100644
index 000000000000..3f8f19cb3739
--- /dev/null
+++ b/tools/testing/radix-tree/xarray-test.c
@@ -0,0 +1,56 @@
+/*
+ * xarray-test.c: Test the XArray API
+ * Copyright (c) 2017 Microsoft Corporation <mawilcox@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/bitmap.h>
+#include <linux/xarray.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "test.h"
+
+void check_xa_load(struct xarray *xa)
+{
+	unsigned long i, j;
+
+	for (i = 0; i < 1024; i++) {
+		for (j = 0; j < 1024; j++) {
+			void *entry = xa_load(xa, j);
+			if (j < i)
+				assert(xa_to_value(entry) == j);
+			else
+				assert(!entry);
+		}
+		radix_tree_insert(xa, i, xa_mk_value(i));
+	}
+}
+
+void xarray_checks(void)
+{
+	RADIX_TREE(array, GFP_KERNEL);
+
+	check_xa_load(&array);
+
+	item_kill_tree(&array);
+}
+
+int __weak main(void)
+{
+	radix_tree_init();
+	xarray_checks();
+	radix_tree_cpu_dead(1);
+	rcu_barrier();
+	if (nr_allocated)
+		printf("nr_allocated = %d\n", nr_allocated);
+	return 0;
+}
-- 
2.15.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 09/73] xarray: Add xa_load
Date: Tue,  5 Dec 2017 16:40:55 -0800	[thread overview]
Message-ID: <20171206004159.3755-10-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

This first function in the XArray API brings with it a lot of support
infrastructure.  The advanced API is based around the xa_state which is
a more capable version of the radix_tree_iter.

As the test-suite demonstrates, it is possible to use the xarray and
radix tree APIs on the same data structure.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h                      | 235 ++++++++++++++++++++++++++++
 lib/radix-tree.c                            |  43 -----
 lib/xarray.c                                | 160 +++++++++++++++++++
 tools/testing/radix-tree/.gitignore         |   1 +
 tools/testing/radix-tree/Makefile           |   7 +-
 tools/testing/radix-tree/linux/radix-tree.h |   1 -
 tools/testing/radix-tree/linux/rcupdate.h   |   1 +
 tools/testing/radix-tree/linux/xarray.h     |   1 +
 tools/testing/radix-tree/xarray-test.c      |  56 +++++++
 9 files changed, 459 insertions(+), 46 deletions(-)
 create mode 100644 tools/testing/radix-tree/xarray-test.c

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 1aff0069458b..af52ba75e6a3 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -21,6 +21,8 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
@@ -67,6 +69,8 @@ static inline void xa_init(struct xarray *xa)
 	__xa_init(xa, 0);
 }
 
+void *xa_load(struct xarray *, unsigned long index);
+
 #define BITS_PER_XA_VALUE	(BITS_PER_LONG - 1)
 
 /**
@@ -158,6 +162,46 @@ struct xa_node {
 	unsigned long	tags[XA_MAX_TAGS][XA_TAG_LONGS];
 };
 
+#ifdef XA_DEBUG
+void xa_dump(const struct xarray *);
+void xa_dump_node(const struct xa_node *);
+#define XA_BUG_ON(node, x) do { \
+		if ((x) && (node)) \
+			xa_dump_node(node); \
+		BUG_ON(x); \
+	} while (0)
+#else
+#define XA_BUG_ON(node, x)	do { } while (0)
+#endif
+
+/* Private */
+static inline void *xa_head(struct xarray *xa)
+{
+	return rcu_dereference_check(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_head_locked(struct xarray *xa)
+{
+	return rcu_dereference_protected(xa->xa_head, xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_check(node->slots[offset], xa_lock_held(xa));
+}
+
+/* Private */
+static inline void *xa_entry_locked(struct xarray *xa,
+				const struct xa_node *node, unsigned int offset)
+{
+	XA_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+	return rcu_dereference_protected(node->slots[offset], xa_lock_held(xa));
+}
+
 /*
  * Internal entries have the bottom two bits set to the value 10b.  Most
  * internal entries are pointers to the next node in the tree.  Since the
@@ -189,6 +233,12 @@ static inline bool xa_is_internal(void *entry)
 	return ((unsigned long)entry & 3) == 2;
 }
 
+/* Private */
+static inline struct xa_node *xa_to_node(void *entry)
+{
+	return (struct xa_node *)((unsigned long)entry & ~3UL);
+}
+
 /* Private */
 static inline bool xa_is_node(void *entry)
 {
@@ -222,4 +272,189 @@ static inline bool xa_is_sibling(void *entry)
 
 #define XA_RETRY_ENTRY		xa_mk_internal(256)
 
+/**
+ * xa_is_retry() - Is the entry a retry entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a retry entry.
+ */
+static inline bool xa_is_retry(void *entry)
+{
+	return unlikely(entry == XA_RETRY_ENTRY);
+}
+
+/**
+ * typedef xa_update_node_t - A callback function from the XArray.
+ * @node: The node which is being processed
+ *
+ * This function is called every time the XArray updates the count of
+ * present and value entries in a node.  It allows advanced users to
+ * maintain the private_list in the node.
+ */
+typedef void (*xa_update_node_t)(struct xa_node *node);
+
+/*
+ * The xa_state is opaque to its users.  It contains various different pieces
+ * of state involved in the current operation on the XArray.  It should be
+ * declared on the stack and passed between the various internal routines.
+ * The various elements in it should not be accessed directly, but only
+ * through the provided accessor functions.  The below documentation is for
+ * the benefit of those working on the code, not for users of the XArray.
+ *
+ * @xa_node usually points to the xa_node containing the slot we're operating
+ * on (and @xa_offset is the offset in the slots array).  If there is a
+ * single entry in the array at index 0, there are no allocated xa_nodes to
+ * point to, and so we store %NULL in @xa_node.  @xa_node is set to
+ * the value %XAS_RESTART if the xa_state is not walked to the correct
+ * position in the tree of nodes for this operation.  If an error occurs
+ * during an operation, it is set to an %XAS_ERROR value.  If we run off the
+ * end of the allocated nodes, it is set to %XAS_BOUNDS.
+ */
+struct xa_state {
+	struct xarray *xa;
+	unsigned long xa_index;
+	unsigned char xa_shift;
+	unsigned char xa_sibs;
+	unsigned char xa_offset;
+	unsigned char xa_pad;		/* Helps gcc generate better code */
+	struct xa_node *xa_node;
+	struct xa_node *xa_alloc;
+	xa_update_node_t xa_update;
+};
+
+/*
+ * We encode errnos in the xas->xa_node.  If an error has happened, we need to
+ * drop the lock to fix it, and once we've done so the xa_state is invalid.
+ */
+#define XAS_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 1) | 1))
+#define XAS_RESTART	 XAS_ERROR(0)
+#define XAS_BOUNDS	 ((struct xa_node *)2UL)
+
+#define __XA_STATE(array, index)  {			\
+	.xa = array,					\
+	.xa_index = index,				\
+	.xa_shift = 0,					\
+	.xa_sibs = 0,					\
+	.xa_offset = 0,					\
+	.xa_pad = 0,					\
+	.xa_node = XAS_RESTART,				\
+	.xa_alloc = NULL,				\
+	.xa_update = NULL				\
+}
+
+/**
+ * XA_STATE() - Declare an XArray operation state.
+ * @name: Name of this operation state (usually xas).
+ * @index: Initial index of interest.
+ *
+ * Declare and initialise an xa_state on the stack.
+ */
+#define XA_STATE(name, array, index)			\
+	struct xa_state name = __XA_STATE(array, index)
+
+#define xas_tagged(xas, tag)	xa_tagged((xas)->xa, (tag))
+#define xas_trylock(xas)	xa_trylock((xas)->xa)
+#define xas_lock(xas)		xa_lock((xas)->xa)
+#define xas_unlock(xas)		xa_unlock((xas)->xa)
+#define xas_lock_bh(xas)	xa_lock_bh((xas)->xa)
+#define xas_unlock_bh(xas)	xa_unlock_bh((xas)->xa)
+#define xas_lock_irq(xas)	xa_lock_irq((xas)->xa)
+#define xas_unlock_irq(xas)	xa_unlock_irq((xas)->xa)
+#define xas_lock_irqsave(xas, flags) \
+				xa_lock_irqsave((xas)->xa, flags)
+#define xas_unlock_irqrestore(xas, flags) \
+				xa_unlock_irqrestore((xas)->xa, flags)
+
+/**
+ * xas_error() - Return an errno stored in the xa_state.
+ * @xas: XArray operation state.
+ *
+ * Return: 0 if no error has been noted.  A negative errno if one has.
+ */
+static inline int xas_error(const struct xa_state *xas)
+{
+	unsigned long v = (unsigned long)xas->xa_node;
+	return (v & 1) ? -(v >> 1) : 0;
+}
+
+/**
+ * xas_set_err() - Note an error in the xa_state.
+ * @xas: XArray operation state.
+ * @err: Negative error number.
+ *
+ * You can call this function with @err set to 0 to take the xa_state
+ * out of the error state.  The next operation will walk it to the correct
+ * location.
+ */
+static inline void xas_set_err(struct xa_state *xas, long err)
+{
+	xas->xa_node = XAS_ERROR(-err);
+}
+
+/**
+ * xas_invalid() - Is the xas in a retry or error state?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas cannot be used for operations.
+ */
+static inline bool xas_invalid(const struct xa_state *xas)
+{
+	return (unsigned long)xas->xa_node & 3;
+}
+
+/**
+ * xas_valid() - Is the xas a valid cursor into the array?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas can be used for operations.
+ */
+static inline bool xas_valid(const struct xa_state *xas)
+{
+	return !xas_invalid(xas);
+}
+
+/**
+ * xas_retry() - Handle a retry entry.
+ * @xas: XArray operation state.
+ * @entry: Entry from xarray.
+ *
+ * An RCU-protected read may see a retry entry as a side-effect of a
+ * simultaneous modification.  This function sets up the @xas to retry
+ * the walk from the head of the array.
+ *
+ * Return: true if the operation needs to be retried.
+ */
+static inline bool xas_retry(struct xa_state *xas, void *entry)
+{
+	if (!xa_is_retry(entry))
+		return false;
+	xas->xa_node = XAS_RESTART;
+	return true;
+}
+
+void *xas_load(struct xa_state *);
+
+/**
+ * xas_reload() - Refetch an entry from the xarray.
+ * @xas: XArray operation state.
+ *
+ * Use this function to check that a previously loaded entry still has
+ * the same value.  This is useful for the lockless pagecache lookup where
+ * we walk the array with only the RCU lock to protect us, lock the page,
+ * then check that the page hasn't moved since we looked it up.
+ *
+ * The caller guarantees that @xas is still valid.  If it may be in an
+ * error or restart state, call xas_load() instead.
+ *
+ * Return: The entry at this location in the xarray.
+ */
+static inline void *xas_reload(struct xa_state *xas)
+{
+	struct xa_node *node = xas->xa_node;
+
+	if (node)
+		return xa_entry(xas->xa, node, xas->xa_offset);
+	return xa_head(xas->xa);
+}
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 930eb7d298d7..a919c60b10a4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -255,49 +255,6 @@ static unsigned long next_index(unsigned long index,
 }
 
 #ifndef __KERNEL__
-static void dump_node(struct radix_tree_node *node, unsigned long index)
-{
-	unsigned long i;
-
-	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
-		node, node->offset, index, index | node_maxindex(node),
-		node->parent,
-		node->tags[0][0], node->tags[1][0], node->tags[2][0],
-		node->shift, node->count, node->exceptional);
-
-	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
-		unsigned long first = index | (i << node->shift);
-		unsigned long last = first | ((1UL << node->shift) - 1);
-		void *entry = node->slots[i];
-		if (!entry)
-			continue;
-		if (entry == RADIX_TREE_RETRY) {
-			pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
-					i, first, last, node);
-		} else if (!radix_tree_is_internal_node(entry)) {
-			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
-					entry, i, first, last, node);
-		} else if (xa_is_sibling(entry)) {
-			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
-					entry, i, first, last, node,
-					node->slots[xa_to_sibling(entry)]);
-		} else {
-			dump_node(entry_to_node(entry), first);
-		}
-	}
-}
-
-/* For debug */
-static void radix_tree_dump(struct radix_tree_root *root)
-{
-	pr_debug("radix root: %p xa_head %p tags %x\n",
-			root, root->xa_head,
-			root->xa_flags >> ROOT_TAG_SHIFT);
-	if (!radix_tree_is_internal_node(root->xa_head))
-		return;
-	dump_node(entry_to_node(root->xa_head), 0);
-}
-
 static void dump_ida_node(void *entry, unsigned long index)
 {
 	unsigned long i;
diff --git a/lib/xarray.c b/lib/xarray.c
index 67ddcb3e630c..2f77e4c5d0b8 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -31,6 +31,94 @@
  * @entry refers to something stored in a slot in the xarray
  */
 
+/* extracts the offset within this node from the index */
+static unsigned int get_offset(unsigned long index, struct xa_node *node)
+{
+	return (index >> node->shift) & XA_CHUNK_MASK;
+}
+
+static void *set_bounds(struct xa_state *xas)
+{
+	xas->xa_node = XAS_BOUNDS;
+	return NULL;
+}
+
+/*
+ * Starts a walk.  If the @xas is already valid, we assume that it's on
+ * the right path and just return where we've got to.  If we're in an
+ * error state, return NULL.  If the index is outside the current scope
+ * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
+ * set @xas->xa_node to NULL and return the current head of the array.
+ */
+static void *xas_start(struct xa_state *xas)
+{
+	void *entry;
+
+	if (xas_valid(xas))
+		return xas_reload(xas);
+	if (xas_error(xas))
+		return NULL;
+
+	entry = xa_head(xas->xa);
+	if (!xa_is_node(entry)) {
+		if (xas->xa_index)
+			return set_bounds(xas);
+	} else {
+		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
+			return set_bounds(xas);
+	}
+
+	xas->xa_node = NULL;
+	return entry;
+}
+
+static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+{
+	unsigned int offset = get_offset(xas->xa_index, node);
+	void *entry = xa_entry(xas->xa, node, offset);
+
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = xa_entry(xas->xa, node, offset);
+		/* Move xa_index to the first index of this entry */
+		xas->xa_index = (((xas->xa_index >> node->shift) &
+				  ~XA_CHUNK_MASK) | offset) << node->shift;
+	}
+
+	xas->xa_node = node;
+	xas->xa_offset = offset;
+	return entry;
+}
+
+/**
+ * xas_load() - Load an entry from the XArray (advanced).
+ * @xas: XArray operation state.
+ *
+ * Usually walks the @xas to the appropriate state to load the entry stored
+ * at xa_index.  However, it will do nothing and return NULL  if @xas is
+ * holding an error.  If the xa_shift indicates we're operating on a
+ * multislot entry, it will terminate early and potentially return an
+ * internal entry.  xas_load() will never expand the tree (see xas_create()).
+ *
+ * The caller should hold the xa_lock or the RCU lock.
+ *
+ * Return: Usually an entry in the XArray, but see description for exceptions.
+ */
+void *xas_load(struct xa_state *xas)
+{
+	void *entry = xas_start(xas);
+
+	while (xa_is_node(entry)) {
+		struct xa_node *node = xa_to_node(entry);
+
+		if (xas->xa_shift > node->shift)
+			break;
+		entry = xas_descend(xas, node);
+	}
+	return entry;
+}
+EXPORT_SYMBOL_GPL(xas_load);
+
 /**
  * __xa_init() - Initialise an empty XArray.
  * @xa: XArray.
@@ -45,3 +133,75 @@ void __xa_init(struct xarray *xa, gfp_t flags)
 	xa->xa_head = NULL;
 }
 EXPORT_SYMBOL(__xa_init);
+
+/**
+ * xa_load() - Load an entry from an XArray.
+ * @xa: XArray.
+ * @index: index into array.
+ *
+ * Return: The entry at @index in @xa.
+ */
+void *xa_load(struct xarray *xa, unsigned long index)
+{
+	XA_STATE(xas, xa, index);
+	void *entry;
+
+	rcu_read_lock();
+	do {
+		entry = xas_load(&xas);
+	} while (xas_retry(&xas, entry));
+	rcu_read_unlock();
+
+	return entry;
+}
+EXPORT_SYMBOL(xa_load);
+
+#ifdef XA_DEBUG
+void xa_dump_entry(void *entry, unsigned long index)
+{
+	if (!entry)
+		return;
+
+	if (xa_is_value(entry))
+		printk("%lu: value %#lx\n", index, xa_to_value(entry));
+	else if (!xa_is_internal(entry))
+		printk("%lu: %p\n", index, entry);
+	else if (xa_is_node(entry)) {
+		unsigned long i;
+		struct xa_node *node = xa_to_node(entry);
+		printk("node %p %s %d parent %p shift %d count %d "
+			"exceptional %d tags %lx %lx %lx indices %lu-%lu\n",
+			node, node->parent ? "offset" : "max", node->offset,
+			node->parent, node->shift, node->count,
+			node->exceptional,
+			node->tags[0][0], node->tags[1][0], node->tags[2][0],
+			index, index |
+			(((unsigned long)XA_CHUNK_SIZE << node->shift) - 1));
+		for (i = 0; i < XA_CHUNK_SIZE; i++)
+			xa_dump_entry(node->slots[i],
+					index + (i << node->shift));
+	} else if (xa_is_retry(entry))
+		printk("%lu: retry (%ld)\n", index, xa_to_internal(entry));
+	else if (xa_is_sibling(entry))
+		printk("%lu: sibling (%ld)\n", index, xa_to_sibling(entry));
+	else
+		printk("%lu: UNKNOWN ENTRY (%p)\n", index, entry);
+}
+
+void xa_dump_node(const struct xa_node *node)
+{
+	printk("xadn: node %p %s %d parent %p shift %d count %d "
+		"exceptional %d array %p list %p %p tags %lx %lx %lx\n",
+		node, node->parent ? "offset" : "max", node->offset,
+		node->parent, node->shift, node->count,
+		node->exceptional, node->root, node->private_list.prev,
+		node->private_list.next,
+		node->tags[0][0], node->tags[1][0], node->tags[2][0]);
+}
+
+void xa_dump(const struct xarray *xa)
+{
+	printk("xarray: %p %x %p\n", xa, xa->xa_flags, xa->xa_head);
+	xa_dump_entry(xa->xa_head, 0);
+}
+#endif
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index 8d4df7a72a8e..833136896b91 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -5,3 +5,4 @@ main
 multiorder
 radix-tree.c
 xarray.c
+xarray-test
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 3868bc189199..749ef734a87c 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -3,10 +3,11 @@
 CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
 LDFLAGS += -fsanitize=address
 LDLIBS+= -lpthread -lurcu
-TARGETS = main idr-test multiorder
+TARGETS = main idr-test multiorder xarray-test
 CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o
 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
-	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
+	 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o \
+	 xarray-test.o
 
 ifndef SHIFT
 	SHIFT=3
@@ -23,6 +24,8 @@ main:	$(OFILES)
 
 idr-test: idr-test.o $(CORE_OFILES)
 
+xarray-test: idr-test.o $(CORE_OFILES)
+
 multiorder: multiorder.o $(CORE_OFILES)
 
 clean:
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h
index 40c9671ee365..36fb716d5557 100644
--- a/tools/testing/radix-tree/linux/radix-tree.h
+++ b/tools/testing/radix-tree/linux/radix-tree.h
@@ -5,7 +5,6 @@
 #include "generated/map-shift.h"
 #include "linux/bug.h"
 #include "../../../../include/linux/radix-tree.h"
-#include <linux/xarray.h>
 
 extern int kmalloc_verbose;
 extern int test_verbose;
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/radix-tree/linux/rcupdate.h
index 73ed33658203..25010bf86c1d 100644
--- a/tools/testing/radix-tree/linux/rcupdate.h
+++ b/tools/testing/radix-tree/linux/rcupdate.h
@@ -6,5 +6,6 @@
 
 #define rcu_dereference_raw(p) rcu_dereference(p)
 #define rcu_dereference_protected(p, cond) rcu_dereference(p)
+#define rcu_dereference_check(p, cond) rcu_dereference(p)
 
 #endif
diff --git a/tools/testing/radix-tree/linux/xarray.h b/tools/testing/radix-tree/linux/xarray.h
index df3812cda376..3eaf9596c2a6 100644
--- a/tools/testing/radix-tree/linux/xarray.h
+++ b/tools/testing/radix-tree/linux/xarray.h
@@ -1,2 +1,3 @@
 #include "generated/map-shift.h"
+#define XA_DEBUG
 #include "../../../../include/linux/xarray.h"
diff --git a/tools/testing/radix-tree/xarray-test.c b/tools/testing/radix-tree/xarray-test.c
new file mode 100644
index 000000000000..3f8f19cb3739
--- /dev/null
+++ b/tools/testing/radix-tree/xarray-test.c
@@ -0,0 +1,56 @@
+/*
+ * xarray-test.c: Test the XArray API
+ * Copyright (c) 2017 Microsoft Corporation <mawilcox@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/bitmap.h>
+#include <linux/xarray.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "test.h"
+
+void check_xa_load(struct xarray *xa)
+{
+	unsigned long i, j;
+
+	for (i = 0; i < 1024; i++) {
+		for (j = 0; j < 1024; j++) {
+			void *entry = xa_load(xa, j);
+			if (j < i)
+				assert(xa_to_value(entry) == j);
+			else
+				assert(!entry);
+		}
+		radix_tree_insert(xa, i, xa_mk_value(i));
+	}
+}
+
+void xarray_checks(void)
+{
+	RADIX_TREE(array, GFP_KERNEL);
+
+	check_xa_load(&array);
+
+	item_kill_tree(&array);
+}
+
+int __weak main(void)
+{
+	radix_tree_init();
+	xarray_checks();
+	radix_tree_cpu_dead(1);
+	rcu_barrier();
+	if (nr_allocated)
+		printf("nr_allocated = %d\n", nr_allocated);
+	return 0;
+}
-- 
2.15.0


  parent reply	other threads:[~2017-12-06  0:42 UTC|newest]

Thread overview: 533+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-06  0:40 [PATCH v4 00/73] XArray version 4 Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 01/73] xfs: Rename xa_ elements to ail_ Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 02/73] xarray: Add the xa_lock to the radix_tree_root Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 03/73] page cache: Use xa_lock Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 04/73] xarray: Replace exceptional entries Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 05/73] xarray: Change definition of sibling entries Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 06/73] xarray: Add definition of struct xarray Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 07/73] xarray: Define struct xa_node Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 08/73] xarray: Add documentation Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-11 23:10   ` Randy Dunlap
2017-12-11 23:10     ` Randy Dunlap
2017-12-15  4:22     ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15 12:34       ` Naming of tag operations in the XArray Matthew Wilcox
2017-12-15 12:34         ` Matthew Wilcox
2017-12-19  0:16         ` Randy Dunlap
2017-12-19  0:16           ` Randy Dunlap
2017-12-19  0:16           ` Randy Dunlap
2017-12-15 17:10     ` Storing errors " Matthew Wilcox
2017-12-15 17:10       ` Matthew Wilcox
2017-12-15 17:10       ` Matthew Wilcox
2017-12-19  0:27       ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-06  0:40 ` Matthew Wilcox [this message]
2017-12-06  0:40   ` [PATCH v4 09/73] xarray: Add xa_load Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 10/73] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 11/73] xarray: Add xa_store Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 12/73] xarray: Add xa_cmpxchg Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 13/73] xarray: Add xa_for_each Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 14/73] xarray: Add xas_for_each_tag Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 15/73] xarray: Add xa_get_entries, xa_get_tagged and xa_get_maybe_tag Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 16/73] xarray: Add xa_destroy Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 17/73] xarray: Add xas_next and xas_prev Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 18/73] xarray: Add xas_create_range Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 19/73] xarray: Add MAINTAINERS entry Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 20/73] idr: Convert to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 21/73] ida: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 22/73] page cache: Convert hole search " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 23/73] page cache: Add page_cache_range_empty function Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 24/73] page cache: Add and replace pages using the XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 25/73] page cache: Convert page deletion to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 26/73] page cache: Convert page cache lookups " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 27/73] page cache: Convert delete_batch " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 28/73] page cache: Remove stray radix comment Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 29/73] mm: Convert page-writeback to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 30/73] mm: Convert workingset " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 31/73] mm: Convert truncate " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 32/73] mm: Convert add_to_swap_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 33/73] mm: Convert delete_from_swap_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 34/73] mm: Convert cgroup writeback " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 35/73] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 36/73] mm: Convert page migration " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 37/73] mm: Convert huge_memory " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 38/73] mm: Convert collapse_shmem " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 39/73] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 40/73] pagevec: Use xa_tag_t Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 41/73] shmem: Convert replace to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 42/73] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 43/73] shmem: Convert find_swap_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 44/73] shmem: Convert shmem_tag_pins " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 45/73] shmem: Convert shmem_wait_for_pins " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 46/73] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 47/73] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 48/73] shmem: Convert shmem_free_swap " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 49/73] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 50/73] shmem: Comment fixups Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 51/73] btrfs: Convert page cache to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 52/73] fs: Convert buffer " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 53/73] fs: Convert writeback " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 54/73] nilfs2: Convert " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 55/73] f2fs: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 56/73] lustre: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 57/73] dax: Convert dax_unlock_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 58/73] dax: Convert lock_slot " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 59/73] dax: More XArray conversion Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 60/73] dax: Convert __dax_invalidate_mapping_entry to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 61/73] dax: Convert dax_writeback_one " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 62/73] dax: Convert dax_insert_pfn_mkwrite " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 63/73] dax: Convert dax_insert_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 64/73] dax: Convert grab_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 65/73] dax: Fix sparse warning Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 66/73] page cache: Finish XArray conversion Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 67/73] vmalloc: Convert to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 68/73] brd: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 69/73] xfs: Convert m_perag_tree " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 70/73] xfs: Convert pag_ici_root " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 71/73] xfs: Convert xfs dquot " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 72/73] xfs: Convert mru cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  1:36   ` Dave Chinner
2017-12-06  1:36     ` Dave Chinner
2017-12-06  1:36     ` Dave Chinner
2017-12-06  2:02     ` Matthew Wilcox
2017-12-06  2:02       ` Matthew Wilcox
2017-12-06  3:14       ` Dave Chinner
2017-12-06  3:14         ` Dave Chinner
2017-12-06  4:45         ` Matthew Wilcox
2017-12-06  4:45           ` Matthew Wilcox
2017-12-06  4:45           ` Matthew Wilcox
2017-12-06  4:52           ` Matthew Wilcox
2017-12-06  4:52             ` Matthew Wilcox
2017-12-06  8:44           ` Dave Chinner
2017-12-06  8:44             ` Dave Chinner
2017-12-06  8:44             ` Dave Chinner
2017-12-06 14:06             ` Matthew Wilcox
2017-12-06 14:06               ` Matthew Wilcox
2017-12-06 14:06               ` Matthew Wilcox
2017-12-07  0:38               ` Dave Chinner
2017-12-07  0:38                 ` Dave Chinner
2017-12-08 23:01                 ` Matthew Wilcox
2017-12-08 23:01                   ` Matthew Wilcox
2017-12-10 23:57                   ` Dave Chinner
2017-12-10 23:57                     ` Dave Chinner
2017-12-10 23:57                     ` Dave Chinner
2017-12-11  4:23                     ` Matthew Wilcox
2017-12-11  4:23                       ` [v4,72/73] " Matthew Wilcox
2017-12-11  4:23                       ` [PATCH v4 72/73] " Matthew Wilcox
2017-12-11 21:55                       ` Dave Chinner
2017-12-11 21:55                         ` [v4,72/73] " Dave Chinner
2017-12-11 21:55                         ` [PATCH v4 72/73] " Dave Chinner
2017-12-07 16:06               ` Theodore Ts'o
2017-12-07 16:06                 ` Theodore Ts'o
2017-12-07 22:22                 ` Dave Chinner
2017-12-07 22:22                   ` Dave Chinner
2017-12-08  4:45                   ` Byungchul Park
2017-12-08  4:45                     ` Byungchul Park
2017-12-08  4:45                     ` Byungchul Park
2017-12-08  7:25                     ` Dave Chinner
2017-12-08  7:25                       ` Dave Chinner
2017-12-08  7:25                       ` Dave Chinner
2017-12-08  9:27                       ` Byungchul Park
2017-12-08  9:27                         ` Byungchul Park
2017-12-08  9:27                         ` Byungchul Park
2017-12-08 17:35                         ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 22:36                           ` Dave Chinner
2017-12-08 22:36                             ` Dave Chinner
2017-12-08 22:36                             ` Dave Chinner
2017-12-09 17:00                             ` Joe Perches
2017-12-09 17:00                               ` Joe Perches
2017-12-09 17:00                               ` Joe Perches
2017-12-11 21:43                               ` Dave Chinner
2017-12-11 21:43                                 ` Dave Chinner
2017-12-11 22:12                                 ` Joe Perches
2017-12-11 22:12                                   ` Joe Perches
2017-12-11 22:12                                   ` Joe Perches
2017-12-11 22:43                                   ` Matthew Wilcox
2017-12-11 22:43                                     ` Matthew Wilcox
2017-12-11 23:46                                     ` Joe Perches
2017-12-11 23:46                                       ` Joe Perches
2017-12-11 23:46                                       ` Joe Perches
2017-12-12 15:51                                       ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-14 18:23                                     ` Joe Perches
2017-12-14 18:23                                       ` Joe Perches
2017-12-14 18:23                                       ` [v4,72/73] " Joe Perches
2017-12-14 18:23                                       ` [PATCH v4 72/73] " Joe Perches
2017-12-17  1:26                                     ` [RFC patch] checkpatch: Add a test for long function definitions (>200 lines) Joe Perches
2017-12-17 21:46                                       ` Linus Torvalds
2017-12-17 22:22                                         ` Joe Perches
2017-12-17 22:33                                         ` Luc Van Oostenryck
2017-12-11 23:38                                   ` [PATCH v4 72/73] xfs: Convert mru cache to XArray Dave Chinner
2017-12-11 23:38                                     ` Dave Chinner
2017-12-21 12:05                                   ` Knut Omang
2017-12-21 12:05                                     ` Knut Omang
2017-12-07 22:38                 ` Lockdep is less useful than it was Matthew Wilcox
2017-12-07 22:38                   ` Matthew Wilcox
2017-12-07 22:39                   ` Matthew Wilcox
2017-12-07 22:39                     ` Matthew Wilcox
2017-12-08  0:14                   ` Dave Chinner
2017-12-08  0:14                     ` Dave Chinner
2017-12-08 15:27                   ` Theodore Ts'o
2017-12-08 15:27                     ` Theodore Ts'o
2017-12-08 18:14                     ` Matthew Wilcox
2017-12-08 18:14                       ` Matthew Wilcox
2017-12-08 22:47                       ` Dave Chinner
2017-12-08 22:47                         ` Dave Chinner
2017-12-06  0:41 ` [PATCH v4 73/73] usb: Convert xhci-mem to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  1:45 ` [PATCH v4 00/73] XArray version 4 Dave Chinner
2017-12-06  1:45   ` Dave Chinner
2017-12-06  1:45   ` Dave Chinner
2017-12-06  1:51   ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:53     ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  2:17       ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:27         ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:05   ` Matthew Wilcox
2017-12-06  2:05     ` Matthew Wilcox
2017-12-06  2:38     ` Dave Chinner
2017-12-06  2:38       ` Dave Chinner
2017-12-06 23:58 ` Ross Zwisler
2017-12-06 23:58   ` Ross Zwisler
2017-12-06 23:58   ` Ross Zwisler
2017-12-07  0:13   ` Matthew Wilcox
2017-12-07  0:13     ` Matthew Wilcox
2017-12-07  0:13     ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171206004159.3755-10-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=aquannie@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=linux-usb@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=mawilcox@microsoft.com \
    --cc=ross.zwisler@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.