All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: unlisted-recipients:; (no To-header on input)
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 05/73] xarray: Change definition of sibling entries
Date: Tue,  5 Dec 2017 16:40:51 -0800	[thread overview]
Message-ID: <20171206004159.3755-6-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Instead of storing a pointer to the slot containing the canonical entry,
store the offset of the slot.  Produces slightly more efficient code
(~300 bytes) and simplifies the implementation.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/radix-tree.c       | 65 +++++++++++----------------------------
 2 files changed, 100 insertions(+), 47 deletions(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index e55f5cfd14ed..2c45d87a3476 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -58,6 +58,8 @@ static inline bool xa_is_value(void *entry)
 	return (unsigned long)entry & 1;
 }
 
+/* Everything below here is the Advanced API.  Proceed with caution. */
+
 #define xa_trylock(xa)		spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)		spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)		spin_unlock(&(xa)->xa_lock)
@@ -71,4 +73,84 @@ static inline bool xa_is_value(void *entry)
 				spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 #define xa_lock_held(xa)	lockdep_is_held(&(xa)->xa_lock)
 
+/*
+ * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
+ * the best chunk size requires some tradeoffs.  A power of two recommends
+ * itself so that we can walk the tree based purely on shifts and masks.
+ * Generally, the larger the better; as the number of slots per level of the
+ * tree increases, the less tall the tree needs to be.  But that needs to be
+ * balanced against the memory consumption of each node.  On a 64-bit system,
+ * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
+ * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
+ */
+#ifndef XA_CHUNK_SHIFT
+#define XA_CHUNK_SHIFT		(CONFIG_BASE_SMALL ? 4 : 6)
+#endif
+#define XA_CHUNK_SIZE		(1UL << XA_CHUNK_SHIFT)
+#define XA_CHUNK_MASK		(XA_CHUNK_SIZE - 1)
+
+/*
+ * Internal entries have the bottom two bits set to the value 10b.  Most
+ * internal entries are pointers to the next node in the tree.  Since the
+ * kernel unmaps page 0 to trap NULL pointer dereferences, we can use values
+ * 0-1023 for special purposes.  Values 0-62 are used for sibling
+ * entries.  Value 256 is used for the retry entry.
+ */
+
+/* Private */
+static inline void *xa_mk_internal(unsigned long v)
+{
+	return (void *)((v << 2) | 2);
+}
+
+/* Private */
+static inline unsigned long xa_to_internal(void *entry)
+{
+	return (unsigned long)entry >> 2;
+}
+
+/**
+ * xa_is_internal() - Is the entry an internal entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is an internal entry.
+ */
+static inline bool xa_is_internal(void *entry)
+{
+	return ((unsigned long)entry & 3) == 2;
+}
+
+/* Private */
+static inline bool xa_is_node(void *entry)
+{
+	return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+/* Private */
+static inline void *xa_mk_sibling(unsigned int offset)
+{
+	return xa_mk_internal(offset);
+}
+
+/* Private */
+static inline unsigned long xa_to_sibling(void *entry)
+{
+	return xa_to_internal(entry);
+}
+
+/**
+ * xa_is_sibling() - Is the entry a sibling entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a sibling entry.
+ */
+static inline bool xa_is_sibling(void *entry)
+{
+	return IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+		xa_is_internal(entry) &&
+		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
+}
+
+#define XA_RETRY_ENTRY		xa_mk_internal(256)
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index cda7a730e591..0a7a21dd9398 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -37,6 +37,7 @@
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/xarray.h>
 
 
 /* Number of nodes in fully populated tree of given height */
@@ -97,24 +98,7 @@ static inline void *node_to_entry(void *ptr)
 	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
 }
 
-#define RADIX_TREE_RETRY	node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	void __rcu **ptr = node;
-	return (parent->slots <= ptr) &&
-			(ptr < parent->slots + RADIX_TREE_MAP_SIZE);
-}
-#else
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	return false;
-}
-#endif
+#define RADIX_TREE_RETRY	XA_RETRY_ENTRY
 
 static inline unsigned long
 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
@@ -128,16 +112,10 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
 	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
 	void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
 
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	if (radix_tree_is_internal_node(entry)) {
-		if (is_sibling_entry(parent, entry)) {
-			void __rcu **sibentry;
-			sibentry = (void __rcu **) entry_to_node(entry);
-			offset = get_slot_offset(parent, sibentry);
-			entry = rcu_dereference_raw(*sibentry);
-		}
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = rcu_dereference_raw(parent->slots[offset]);
 	}
-#endif
 
 	*nodep = (void *)entry;
 	return offset;
@@ -299,10 +277,10 @@ static void dump_node(struct radix_tree_node *node, unsigned long index)
 		} else if (!radix_tree_is_internal_node(entry)) {
 			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
 					entry, i, first, last, node);
-		} else if (is_sibling_entry(node, entry)) {
+		} else if (xa_is_sibling(entry)) {
 			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
 					entry, i, first, last, node,
-					*(void **)entry_to_node(entry));
+					node->slots[xa_to_sibling(entry)]);
 		} else {
 			dump_node(entry_to_node(entry), first);
 		}
@@ -872,8 +850,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 
 	for (;;) {
 		void *entry = rcu_dereference_raw(child->slots[offset]);
-		if (radix_tree_is_internal_node(entry) &&
-					!is_sibling_entry(child, entry)) {
+		if (xa_is_node(entry)) {
 			child = entry_to_node(entry);
 			offset = 0;
 			continue;
@@ -895,7 +872,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 static inline int insert_entries(struct radix_tree_node *node,
 		void __rcu **slot, void *item, unsigned order, bool replace)
 {
-	struct radix_tree_node *child;
+	void *sibling;
 	unsigned i, n, tag, offset, tags = 0;
 
 	if (node) {
@@ -913,7 +890,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 		offset = offset & ~(n - 1);
 		slot = &node->slots[offset];
 	}
-	child = node_to_entry(slot);
+	sibling = xa_mk_sibling(offset);
 
 	for (i = 0; i < n; i++) {
 		if (slot[i]) {
@@ -930,7 +907,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 	for (i = 0; i < n; i++) {
 		struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
 		if (i) {
-			rcu_assign_pointer(slot[i], child);
+			rcu_assign_pointer(slot[i], sibling);
 			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 				if (tags & (1 << tag))
 					tag_clear(node, tag, offset + i);
@@ -940,9 +917,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 				if (tags & (1 << tag))
 					tag_set(node, tag, offset);
 		}
-		if (radix_tree_is_internal_node(old) &&
-					!is_sibling_entry(node, old) &&
-					(old != RADIX_TREE_RETRY))
+		if (xa_is_node(old))
 			radix_tree_free_nodes(old);
 		if (xa_is_value(old))
 			node->exceptional--;
@@ -1101,10 +1076,10 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 				void __rcu **slot, int count, int exceptional)
 {
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
-	void *ptr = node_to_entry(slot);
-	unsigned offset = get_slot_offset(node, slot) + 1;
+	unsigned offset = get_slot_offset(node, slot);
+	void *ptr = xa_mk_sibling(offset);
 
-	while (offset < RADIX_TREE_MAP_SIZE) {
+	while (++offset < RADIX_TREE_MAP_SIZE) {
 		if (rcu_dereference_raw(node->slots[offset]) != ptr)
 			break;
 		if (count < 0) {
@@ -1112,7 +1087,6 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 			node->count--;
 		}
 		node->exceptional += exceptional;
-		offset++;
 	}
 #endif
 }
@@ -1311,8 +1285,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
 			tags |= 1 << tag;
 
 	for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
-		if (!is_sibling_entry(parent,
-				rcu_dereference_raw(parent->slots[end])))
+		if (!xa_is_sibling(rcu_dereference_raw(parent->slots[end])))
 			break;
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 			if (tags & (1 << tag))
@@ -1608,11 +1581,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
 static void __rcu **skip_siblings(struct radix_tree_node **nodep,
 			void __rcu **slot, struct radix_tree_iter *iter)
 {
-	void *sib = node_to_entry(slot - 1);
-
 	while (iter->index < iter->next_index) {
 		*nodep = rcu_dereference_raw(*slot);
-		if (*nodep && *nodep != sib)
+		if (*nodep && !xa_is_sibling(*nodep))
 			return slot;
 		slot++;
 		iter->index = __radix_tree_iter_add(iter, 1);
@@ -1763,7 +1734,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
 				while (++offset	< RADIX_TREE_MAP_SIZE) {
 					void *slot = rcu_dereference_raw(
 							node->slots[offset]);
-					if (is_sibling_entry(node, slot))
+					if (xa_is_sibling(slot))
 						continue;
 					if (slot)
 						break;
-- 
2.15.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 05/73] xarray: Change definition of sibling entries
Date: Tue,  5 Dec 2017 16:40:51 -0800	[thread overview]
Message-ID: <20171206004159.3755-6-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Instead of storing a pointer to the slot containing the canonical entry,
store the offset of the slot.  Produces slightly more efficient code
(~300 bytes) and simplifies the implementation.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/radix-tree.c       | 65 +++++++++++----------------------------
 2 files changed, 100 insertions(+), 47 deletions(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index e55f5cfd14ed..2c45d87a3476 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -58,6 +58,8 @@ static inline bool xa_is_value(void *entry)
 	return (unsigned long)entry & 1;
 }
 
+/* Everything below here is the Advanced API.  Proceed with caution. */
+
 #define xa_trylock(xa)		spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)		spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)		spin_unlock(&(xa)->xa_lock)
@@ -71,4 +73,84 @@ static inline bool xa_is_value(void *entry)
 				spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 #define xa_lock_held(xa)	lockdep_is_held(&(xa)->xa_lock)
 
+/*
+ * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
+ * the best chunk size requires some tradeoffs.  A power of two recommends
+ * itself so that we can walk the tree based purely on shifts and masks.
+ * Generally, the larger the better; as the number of slots per level of the
+ * tree increases, the less tall the tree needs to be.  But that needs to be
+ * balanced against the memory consumption of each node.  On a 64-bit system,
+ * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
+ * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
+ */
+#ifndef XA_CHUNK_SHIFT
+#define XA_CHUNK_SHIFT		(CONFIG_BASE_SMALL ? 4 : 6)
+#endif
+#define XA_CHUNK_SIZE		(1UL << XA_CHUNK_SHIFT)
+#define XA_CHUNK_MASK		(XA_CHUNK_SIZE - 1)
+
+/*
+ * Internal entries have the bottom two bits set to the value 10b.  Most
+ * internal entries are pointers to the next node in the tree.  Since the
+ * kernel unmaps page 0 to trap NULL pointer dereferences, we can use values
+ * 0-1023 for special purposes.  Values 0-62 are used for sibling
+ * entries.  Value 256 is used for the retry entry.
+ */
+
+/* Private */
+static inline void *xa_mk_internal(unsigned long v)
+{
+	return (void *)((v << 2) | 2);
+}
+
+/* Private */
+static inline unsigned long xa_to_internal(void *entry)
+{
+	return (unsigned long)entry >> 2;
+}
+
+/**
+ * xa_is_internal() - Is the entry an internal entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is an internal entry.
+ */
+static inline bool xa_is_internal(void *entry)
+{
+	return ((unsigned long)entry & 3) == 2;
+}
+
+/* Private */
+static inline bool xa_is_node(void *entry)
+{
+	return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+/* Private */
+static inline void *xa_mk_sibling(unsigned int offset)
+{
+	return xa_mk_internal(offset);
+}
+
+/* Private */
+static inline unsigned long xa_to_sibling(void *entry)
+{
+	return xa_to_internal(entry);
+}
+
+/**
+ * xa_is_sibling() - Is the entry a sibling entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a sibling entry.
+ */
+static inline bool xa_is_sibling(void *entry)
+{
+	return IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+		xa_is_internal(entry) &&
+		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
+}
+
+#define XA_RETRY_ENTRY		xa_mk_internal(256)
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index cda7a730e591..0a7a21dd9398 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -37,6 +37,7 @@
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/xarray.h>
 
 
 /* Number of nodes in fully populated tree of given height */
@@ -97,24 +98,7 @@ static inline void *node_to_entry(void *ptr)
 	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
 }
 
-#define RADIX_TREE_RETRY	node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	void __rcu **ptr = node;
-	return (parent->slots <= ptr) &&
-			(ptr < parent->slots + RADIX_TREE_MAP_SIZE);
-}
-#else
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	return false;
-}
-#endif
+#define RADIX_TREE_RETRY	XA_RETRY_ENTRY
 
 static inline unsigned long
 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
@@ -128,16 +112,10 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
 	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
 	void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
 
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	if (radix_tree_is_internal_node(entry)) {
-		if (is_sibling_entry(parent, entry)) {
-			void __rcu **sibentry;
-			sibentry = (void __rcu **) entry_to_node(entry);
-			offset = get_slot_offset(parent, sibentry);
-			entry = rcu_dereference_raw(*sibentry);
-		}
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = rcu_dereference_raw(parent->slots[offset]);
 	}
-#endif
 
 	*nodep = (void *)entry;
 	return offset;
@@ -299,10 +277,10 @@ static void dump_node(struct radix_tree_node *node, unsigned long index)
 		} else if (!radix_tree_is_internal_node(entry)) {
 			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
 					entry, i, first, last, node);
-		} else if (is_sibling_entry(node, entry)) {
+		} else if (xa_is_sibling(entry)) {
 			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
 					entry, i, first, last, node,
-					*(void **)entry_to_node(entry));
+					node->slots[xa_to_sibling(entry)]);
 		} else {
 			dump_node(entry_to_node(entry), first);
 		}
@@ -872,8 +850,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 
 	for (;;) {
 		void *entry = rcu_dereference_raw(child->slots[offset]);
-		if (radix_tree_is_internal_node(entry) &&
-					!is_sibling_entry(child, entry)) {
+		if (xa_is_node(entry)) {
 			child = entry_to_node(entry);
 			offset = 0;
 			continue;
@@ -895,7 +872,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 static inline int insert_entries(struct radix_tree_node *node,
 		void __rcu **slot, void *item, unsigned order, bool replace)
 {
-	struct radix_tree_node *child;
+	void *sibling;
 	unsigned i, n, tag, offset, tags = 0;
 
 	if (node) {
@@ -913,7 +890,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 		offset = offset & ~(n - 1);
 		slot = &node->slots[offset];
 	}
-	child = node_to_entry(slot);
+	sibling = xa_mk_sibling(offset);
 
 	for (i = 0; i < n; i++) {
 		if (slot[i]) {
@@ -930,7 +907,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 	for (i = 0; i < n; i++) {
 		struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
 		if (i) {
-			rcu_assign_pointer(slot[i], child);
+			rcu_assign_pointer(slot[i], sibling);
 			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 				if (tags & (1 << tag))
 					tag_clear(node, tag, offset + i);
@@ -940,9 +917,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 				if (tags & (1 << tag))
 					tag_set(node, tag, offset);
 		}
-		if (radix_tree_is_internal_node(old) &&
-					!is_sibling_entry(node, old) &&
-					(old != RADIX_TREE_RETRY))
+		if (xa_is_node(old))
 			radix_tree_free_nodes(old);
 		if (xa_is_value(old))
 			node->exceptional--;
@@ -1101,10 +1076,10 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 				void __rcu **slot, int count, int exceptional)
 {
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
-	void *ptr = node_to_entry(slot);
-	unsigned offset = get_slot_offset(node, slot) + 1;
+	unsigned offset = get_slot_offset(node, slot);
+	void *ptr = xa_mk_sibling(offset);
 
-	while (offset < RADIX_TREE_MAP_SIZE) {
+	while (++offset < RADIX_TREE_MAP_SIZE) {
 		if (rcu_dereference_raw(node->slots[offset]) != ptr)
 			break;
 		if (count < 0) {
@@ -1112,7 +1087,6 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 			node->count--;
 		}
 		node->exceptional += exceptional;
-		offset++;
 	}
 #endif
 }
@@ -1311,8 +1285,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
 			tags |= 1 << tag;
 
 	for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
-		if (!is_sibling_entry(parent,
-				rcu_dereference_raw(parent->slots[end])))
+		if (!xa_is_sibling(rcu_dereference_raw(parent->slots[end])))
 			break;
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 			if (tags & (1 << tag))
@@ -1608,11 +1581,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
 static void __rcu **skip_siblings(struct radix_tree_node **nodep,
 			void __rcu **slot, struct radix_tree_iter *iter)
 {
-	void *sib = node_to_entry(slot - 1);
-
 	while (iter->index < iter->next_index) {
 		*nodep = rcu_dereference_raw(*slot);
-		if (*nodep && *nodep != sib)
+		if (*nodep && !xa_is_sibling(*nodep))
 			return slot;
 		slot++;
 		iter->index = __radix_tree_iter_add(iter, 1);
@@ -1763,7 +1734,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
 				while (++offset	< RADIX_TREE_MAP_SIZE) {
 					void *slot = rcu_dereference_raw(
 							node->slots[offset]);
-					if (is_sibling_entry(node, slot))
+					if (xa_is_sibling(slot))
 						continue;
 					if (slot)
 						break;
-- 
2.15.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 05/73] xarray: Change definition of sibling entries
Date: Tue,  5 Dec 2017 16:40:51 -0800	[thread overview]
Message-ID: <20171206004159.3755-6-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Instead of storing a pointer to the slot containing the canonical entry,
store the offset of the slot.  Produces slightly more efficient code
(~300 bytes) and simplifies the implementation.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/radix-tree.c       | 65 +++++++++++----------------------------
 2 files changed, 100 insertions(+), 47 deletions(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index e55f5cfd14ed..2c45d87a3476 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -58,6 +58,8 @@ static inline bool xa_is_value(void *entry)
 	return (unsigned long)entry & 1;
 }
 
+/* Everything below here is the Advanced API.  Proceed with caution. */
+
 #define xa_trylock(xa)		spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)		spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)		spin_unlock(&(xa)->xa_lock)
@@ -71,4 +73,84 @@ static inline bool xa_is_value(void *entry)
 				spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 #define xa_lock_held(xa)	lockdep_is_held(&(xa)->xa_lock)
 
+/*
+ * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
+ * the best chunk size requires some tradeoffs.  A power of two recommends
+ * itself so that we can walk the tree based purely on shifts and masks.
+ * Generally, the larger the better; as the number of slots per level of the
+ * tree increases, the less tall the tree needs to be.  But that needs to be
+ * balanced against the memory consumption of each node.  On a 64-bit system,
+ * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
+ * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
+ */
+#ifndef XA_CHUNK_SHIFT
+#define XA_CHUNK_SHIFT		(CONFIG_BASE_SMALL ? 4 : 6)
+#endif
+#define XA_CHUNK_SIZE		(1UL << XA_CHUNK_SHIFT)
+#define XA_CHUNK_MASK		(XA_CHUNK_SIZE - 1)
+
+/*
+ * Internal entries have the bottom two bits set to the value 10b.  Most
+ * internal entries are pointers to the next node in the tree.  Since the
+ * kernel unmaps page 0 to trap NULL pointer dereferences, we can use values
+ * 0-1023 for special purposes.  Values 0-62 are used for sibling
+ * entries.  Value 256 is used for the retry entry.
+ */
+
+/* Private */
+static inline void *xa_mk_internal(unsigned long v)
+{
+	return (void *)((v << 2) | 2);
+}
+
+/* Private */
+static inline unsigned long xa_to_internal(void *entry)
+{
+	return (unsigned long)entry >> 2;
+}
+
+/**
+ * xa_is_internal() - Is the entry an internal entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is an internal entry.
+ */
+static inline bool xa_is_internal(void *entry)
+{
+	return ((unsigned long)entry & 3) == 2;
+}
+
+/* Private */
+static inline bool xa_is_node(void *entry)
+{
+	return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+/* Private */
+static inline void *xa_mk_sibling(unsigned int offset)
+{
+	return xa_mk_internal(offset);
+}
+
+/* Private */
+static inline unsigned long xa_to_sibling(void *entry)
+{
+	return xa_to_internal(entry);
+}
+
+/**
+ * xa_is_sibling() - Is the entry a sibling entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a sibling entry.
+ */
+static inline bool xa_is_sibling(void *entry)
+{
+	return IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+		xa_is_internal(entry) &&
+		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
+}
+
+#define XA_RETRY_ENTRY		xa_mk_internal(256)
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index cda7a730e591..0a7a21dd9398 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -37,6 +37,7 @@
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/xarray.h>
 
 
 /* Number of nodes in fully populated tree of given height */
@@ -97,24 +98,7 @@ static inline void *node_to_entry(void *ptr)
 	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
 }
 
-#define RADIX_TREE_RETRY	node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	void __rcu **ptr = node;
-	return (parent->slots <= ptr) &&
-			(ptr < parent->slots + RADIX_TREE_MAP_SIZE);
-}
-#else
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	return false;
-}
-#endif
+#define RADIX_TREE_RETRY	XA_RETRY_ENTRY
 
 static inline unsigned long
 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
@@ -128,16 +112,10 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
 	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
 	void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
 
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	if (radix_tree_is_internal_node(entry)) {
-		if (is_sibling_entry(parent, entry)) {
-			void __rcu **sibentry;
-			sibentry = (void __rcu **) entry_to_node(entry);
-			offset = get_slot_offset(parent, sibentry);
-			entry = rcu_dereference_raw(*sibentry);
-		}
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = rcu_dereference_raw(parent->slots[offset]);
 	}
-#endif
 
 	*nodep = (void *)entry;
 	return offset;
@@ -299,10 +277,10 @@ static void dump_node(struct radix_tree_node *node, unsigned long index)
 		} else if (!radix_tree_is_internal_node(entry)) {
 			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
 					entry, i, first, last, node);
-		} else if (is_sibling_entry(node, entry)) {
+		} else if (xa_is_sibling(entry)) {
 			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
 					entry, i, first, last, node,
-					*(void **)entry_to_node(entry));
+					node->slots[xa_to_sibling(entry)]);
 		} else {
 			dump_node(entry_to_node(entry), first);
 		}
@@ -872,8 +850,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 
 	for (;;) {
 		void *entry = rcu_dereference_raw(child->slots[offset]);
-		if (radix_tree_is_internal_node(entry) &&
-					!is_sibling_entry(child, entry)) {
+		if (xa_is_node(entry)) {
 			child = entry_to_node(entry);
 			offset = 0;
 			continue;
@@ -895,7 +872,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 static inline int insert_entries(struct radix_tree_node *node,
 		void __rcu **slot, void *item, unsigned order, bool replace)
 {
-	struct radix_tree_node *child;
+	void *sibling;
 	unsigned i, n, tag, offset, tags = 0;
 
 	if (node) {
@@ -913,7 +890,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 		offset = offset & ~(n - 1);
 		slot = &node->slots[offset];
 	}
-	child = node_to_entry(slot);
+	sibling = xa_mk_sibling(offset);
 
 	for (i = 0; i < n; i++) {
 		if (slot[i]) {
@@ -930,7 +907,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 	for (i = 0; i < n; i++) {
 		struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
 		if (i) {
-			rcu_assign_pointer(slot[i], child);
+			rcu_assign_pointer(slot[i], sibling);
 			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 				if (tags & (1 << tag))
 					tag_clear(node, tag, offset + i);
@@ -940,9 +917,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 				if (tags & (1 << tag))
 					tag_set(node, tag, offset);
 		}
-		if (radix_tree_is_internal_node(old) &&
-					!is_sibling_entry(node, old) &&
-					(old != RADIX_TREE_RETRY))
+		if (xa_is_node(old))
 			radix_tree_free_nodes(old);
 		if (xa_is_value(old))
 			node->exceptional--;
@@ -1101,10 +1076,10 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 				void __rcu **slot, int count, int exceptional)
 {
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
-	void *ptr = node_to_entry(slot);
-	unsigned offset = get_slot_offset(node, slot) + 1;
+	unsigned offset = get_slot_offset(node, slot);
+	void *ptr = xa_mk_sibling(offset);
 
-	while (offset < RADIX_TREE_MAP_SIZE) {
+	while (++offset < RADIX_TREE_MAP_SIZE) {
 		if (rcu_dereference_raw(node->slots[offset]) != ptr)
 			break;
 		if (count < 0) {
@@ -1112,7 +1087,6 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 			node->count--;
 		}
 		node->exceptional += exceptional;
-		offset++;
 	}
 #endif
 }
@@ -1311,8 +1285,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
 			tags |= 1 << tag;
 
 	for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
-		if (!is_sibling_entry(parent,
-				rcu_dereference_raw(parent->slots[end])))
+		if (!xa_is_sibling(rcu_dereference_raw(parent->slots[end])))
 			break;
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 			if (tags & (1 << tag))
@@ -1608,11 +1581,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
 static void __rcu **skip_siblings(struct radix_tree_node **nodep,
 			void __rcu **slot, struct radix_tree_iter *iter)
 {
-	void *sib = node_to_entry(slot - 1);
-
 	while (iter->index < iter->next_index) {
 		*nodep = rcu_dereference_raw(*slot);
-		if (*nodep && *nodep != sib)
+		if (*nodep && !xa_is_sibling(*nodep))
 			return slot;
 		slot++;
 		iter->index = __radix_tree_iter_add(iter, 1);
@@ -1763,7 +1734,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
 				while (++offset	< RADIX_TREE_MAP_SIZE) {
 					void *slot = rcu_dereference_raw(
 							node->slots[offset]);
-					if (is_sibling_entry(node, slot))
+					if (xa_is_sibling(slot))
 						continue;
 					if (slot)
 						break;
-- 
2.15.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 05/73] xarray: Change definition of sibling entries
Date: Tue,  5 Dec 2017 16:40:51 -0800	[thread overview]
Message-ID: <20171206004159.3755-6-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Instead of storing a pointer to the slot containing the canonical entry,
store the offset of the slot.  Produces slightly more efficient code
(~300 bytes) and simplifies the implementation.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/radix-tree.c       | 65 +++++++++++----------------------------
 2 files changed, 100 insertions(+), 47 deletions(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index e55f5cfd14ed..2c45d87a3476 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -58,6 +58,8 @@ static inline bool xa_is_value(void *entry)
 	return (unsigned long)entry & 1;
 }
 
+/* Everything below here is the Advanced API.  Proceed with caution. */
+
 #define xa_trylock(xa)		spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)		spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)		spin_unlock(&(xa)->xa_lock)
@@ -71,4 +73,84 @@ static inline bool xa_is_value(void *entry)
 				spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 #define xa_lock_held(xa)	lockdep_is_held(&(xa)->xa_lock)
 
+/*
+ * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
+ * the best chunk size requires some tradeoffs.  A power of two recommends
+ * itself so that we can walk the tree based purely on shifts and masks.
+ * Generally, the larger the better; as the number of slots per level of the
+ * tree increases, the less tall the tree needs to be.  But that needs to be
+ * balanced against the memory consumption of each node.  On a 64-bit system,
+ * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
+ * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
+ */
+#ifndef XA_CHUNK_SHIFT
+#define XA_CHUNK_SHIFT		(CONFIG_BASE_SMALL ? 4 : 6)
+#endif
+#define XA_CHUNK_SIZE		(1UL << XA_CHUNK_SHIFT)
+#define XA_CHUNK_MASK		(XA_CHUNK_SIZE - 1)
+
+/*
+ * Internal entries have the bottom two bits set to the value 10b.  Most
+ * internal entries are pointers to the next node in the tree.  Since the
+ * kernel unmaps page 0 to trap NULL pointer dereferences, we can use values
+ * 0-1023 for special purposes.  Values 0-62 are used for sibling
+ * entries.  Value 256 is used for the retry entry.
+ */
+
+/* Private */
+static inline void *xa_mk_internal(unsigned long v)
+{
+	return (void *)((v << 2) | 2);
+}
+
+/* Private */
+static inline unsigned long xa_to_internal(void *entry)
+{
+	return (unsigned long)entry >> 2;
+}
+
+/**
+ * xa_is_internal() - Is the entry an internal entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is an internal entry.
+ */
+static inline bool xa_is_internal(void *entry)
+{
+	return ((unsigned long)entry & 3) == 2;
+}
+
+/* Private */
+static inline bool xa_is_node(void *entry)
+{
+	return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+/* Private */
+static inline void *xa_mk_sibling(unsigned int offset)
+{
+	return xa_mk_internal(offset);
+}
+
+/* Private */
+static inline unsigned long xa_to_sibling(void *entry)
+{
+	return xa_to_internal(entry);
+}
+
+/**
+ * xa_is_sibling() - Is the entry a sibling entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a sibling entry.
+ */
+static inline bool xa_is_sibling(void *entry)
+{
+	return IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+		xa_is_internal(entry) &&
+		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
+}
+
+#define XA_RETRY_ENTRY		xa_mk_internal(256)
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index cda7a730e591..0a7a21dd9398 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -37,6 +37,7 @@
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/xarray.h>
 
 
 /* Number of nodes in fully populated tree of given height */
@@ -97,24 +98,7 @@ static inline void *node_to_entry(void *ptr)
 	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
 }
 
-#define RADIX_TREE_RETRY	node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	void __rcu **ptr = node;
-	return (parent->slots <= ptr) &&
-			(ptr < parent->slots + RADIX_TREE_MAP_SIZE);
-}
-#else
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	return false;
-}
-#endif
+#define RADIX_TREE_RETRY	XA_RETRY_ENTRY
 
 static inline unsigned long
 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
@@ -128,16 +112,10 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
 	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
 	void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
 
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	if (radix_tree_is_internal_node(entry)) {
-		if (is_sibling_entry(parent, entry)) {
-			void __rcu **sibentry;
-			sibentry = (void __rcu **) entry_to_node(entry);
-			offset = get_slot_offset(parent, sibentry);
-			entry = rcu_dereference_raw(*sibentry);
-		}
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = rcu_dereference_raw(parent->slots[offset]);
 	}
-#endif
 
 	*nodep = (void *)entry;
 	return offset;
@@ -299,10 +277,10 @@ static void dump_node(struct radix_tree_node *node, unsigned long index)
 		} else if (!radix_tree_is_internal_node(entry)) {
 			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
 					entry, i, first, last, node);
-		} else if (is_sibling_entry(node, entry)) {
+		} else if (xa_is_sibling(entry)) {
 			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
 					entry, i, first, last, node,
-					*(void **)entry_to_node(entry));
+					node->slots[xa_to_sibling(entry)]);
 		} else {
 			dump_node(entry_to_node(entry), first);
 		}
@@ -872,8 +850,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 
 	for (;;) {
 		void *entry = rcu_dereference_raw(child->slots[offset]);
-		if (radix_tree_is_internal_node(entry) &&
-					!is_sibling_entry(child, entry)) {
+		if (xa_is_node(entry)) {
 			child = entry_to_node(entry);
 			offset = 0;
 			continue;
@@ -895,7 +872,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 static inline int insert_entries(struct radix_tree_node *node,
 		void __rcu **slot, void *item, unsigned order, bool replace)
 {
-	struct radix_tree_node *child;
+	void *sibling;
 	unsigned i, n, tag, offset, tags = 0;
 
 	if (node) {
@@ -913,7 +890,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 		offset = offset & ~(n - 1);
 		slot = &node->slots[offset];
 	}
-	child = node_to_entry(slot);
+	sibling = xa_mk_sibling(offset);
 
 	for (i = 0; i < n; i++) {
 		if (slot[i]) {
@@ -930,7 +907,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 	for (i = 0; i < n; i++) {
 		struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
 		if (i) {
-			rcu_assign_pointer(slot[i], child);
+			rcu_assign_pointer(slot[i], sibling);
 			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 				if (tags & (1 << tag))
 					tag_clear(node, tag, offset + i);
@@ -940,9 +917,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 				if (tags & (1 << tag))
 					tag_set(node, tag, offset);
 		}
-		if (radix_tree_is_internal_node(old) &&
-					!is_sibling_entry(node, old) &&
-					(old != RADIX_TREE_RETRY))
+		if (xa_is_node(old))
 			radix_tree_free_nodes(old);
 		if (xa_is_value(old))
 			node->exceptional--;
@@ -1101,10 +1076,10 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 				void __rcu **slot, int count, int exceptional)
 {
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
-	void *ptr = node_to_entry(slot);
-	unsigned offset = get_slot_offset(node, slot) + 1;
+	unsigned offset = get_slot_offset(node, slot);
+	void *ptr = xa_mk_sibling(offset);
 
-	while (offset < RADIX_TREE_MAP_SIZE) {
+	while (++offset < RADIX_TREE_MAP_SIZE) {
 		if (rcu_dereference_raw(node->slots[offset]) != ptr)
 			break;
 		if (count < 0) {
@@ -1112,7 +1087,6 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 			node->count--;
 		}
 		node->exceptional += exceptional;
-		offset++;
 	}
 #endif
 }
@@ -1311,8 +1285,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
 			tags |= 1 << tag;
 
 	for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
-		if (!is_sibling_entry(parent,
-				rcu_dereference_raw(parent->slots[end])))
+		if (!xa_is_sibling(rcu_dereference_raw(parent->slots[end])))
 			break;
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 			if (tags & (1 << tag))
@@ -1608,11 +1581,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
 static void __rcu **skip_siblings(struct radix_tree_node **nodep,
 			void __rcu **slot, struct radix_tree_iter *iter)
 {
-	void *sib = node_to_entry(slot - 1);
-
 	while (iter->index < iter->next_index) {
 		*nodep = rcu_dereference_raw(*slot);
-		if (*nodep && *nodep != sib)
+		if (*nodep && !xa_is_sibling(*nodep))
 			return slot;
 		slot++;
 		iter->index = __radix_tree_iter_add(iter, 1);
@@ -1763,7 +1734,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
 				while (++offset	< RADIX_TREE_MAP_SIZE) {
 					void *slot = rcu_dereference_raw(
 							node->slots[offset]);
-					if (is_sibling_entry(node, slot))
+					if (xa_is_sibling(slot))
 						continue;
 					if (slot)
 						break;
-- 
2.15.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 05/73] xarray: Change definition of sibling entries
Date: Tue,  5 Dec 2017 16:40:51 -0800	[thread overview]
Message-ID: <20171206004159.3755-6-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Instead of storing a pointer to the slot containing the canonical entry,
store the offset of the slot.  Produces slightly more efficient code
(~300 bytes) and simplifies the implementation.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/xarray.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/radix-tree.c       | 65 +++++++++++----------------------------
 2 files changed, 100 insertions(+), 47 deletions(-)

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index e55f5cfd14ed..2c45d87a3476 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -58,6 +58,8 @@ static inline bool xa_is_value(void *entry)
 	return (unsigned long)entry & 1;
 }
 
+/* Everything below here is the Advanced API.  Proceed with caution. */
+
 #define xa_trylock(xa)		spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)		spin_lock(&(xa)->xa_lock)
 #define xa_unlock(xa)		spin_unlock(&(xa)->xa_lock)
@@ -71,4 +73,84 @@ static inline bool xa_is_value(void *entry)
 				spin_unlock_irqrestore(&(xa)->xa_lock, flags)
 #define xa_lock_held(xa)	lockdep_is_held(&(xa)->xa_lock)
 
+/*
+ * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
+ * the best chunk size requires some tradeoffs.  A power of two recommends
+ * itself so that we can walk the tree based purely on shifts and masks.
+ * Generally, the larger the better; as the number of slots per level of the
+ * tree increases, the less tall the tree needs to be.  But that needs to be
+ * balanced against the memory consumption of each node.  On a 64-bit system,
+ * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
+ * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
+ */
+#ifndef XA_CHUNK_SHIFT
+#define XA_CHUNK_SHIFT		(CONFIG_BASE_SMALL ? 4 : 6)
+#endif
+#define XA_CHUNK_SIZE		(1UL << XA_CHUNK_SHIFT)
+#define XA_CHUNK_MASK		(XA_CHUNK_SIZE - 1)
+
+/*
+ * Internal entries have the bottom two bits set to the value 10b.  Most
+ * internal entries are pointers to the next node in the tree.  Since the
+ * kernel unmaps page 0 to trap NULL pointer dereferences, we can use values
+ * 0-1023 for special purposes.  Values 0-62 are used for sibling
+ * entries.  Value 256 is used for the retry entry.
+ */
+
+/* Private */
+static inline void *xa_mk_internal(unsigned long v)
+{
+	return (void *)((v << 2) | 2);
+}
+
+/* Private */
+static inline unsigned long xa_to_internal(void *entry)
+{
+	return (unsigned long)entry >> 2;
+}
+
+/**
+ * xa_is_internal() - Is the entry an internal entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is an internal entry.
+ */
+static inline bool xa_is_internal(void *entry)
+{
+	return ((unsigned long)entry & 3) == 2;
+}
+
+/* Private */
+static inline bool xa_is_node(void *entry)
+{
+	return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+/* Private */
+static inline void *xa_mk_sibling(unsigned int offset)
+{
+	return xa_mk_internal(offset);
+}
+
+/* Private */
+static inline unsigned long xa_to_sibling(void *entry)
+{
+	return xa_to_internal(entry);
+}
+
+/**
+ * xa_is_sibling() - Is the entry a sibling entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a sibling entry.
+ */
+static inline bool xa_is_sibling(void *entry)
+{
+	return IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+		xa_is_internal(entry) &&
+		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
+}
+
+#define XA_RETRY_ENTRY		xa_mk_internal(256)
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index cda7a730e591..0a7a21dd9398 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -37,6 +37,7 @@
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/xarray.h>
 
 
 /* Number of nodes in fully populated tree of given height */
@@ -97,24 +98,7 @@ static inline void *node_to_entry(void *ptr)
 	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
 }
 
-#define RADIX_TREE_RETRY	node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	void __rcu **ptr = node;
-	return (parent->slots <= ptr) &&
-			(ptr < parent->slots + RADIX_TREE_MAP_SIZE);
-}
-#else
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
-	return false;
-}
-#endif
+#define RADIX_TREE_RETRY	XA_RETRY_ENTRY
 
 static inline unsigned long
 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
@@ -128,16 +112,10 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
 	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
 	void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
 
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	if (radix_tree_is_internal_node(entry)) {
-		if (is_sibling_entry(parent, entry)) {
-			void __rcu **sibentry;
-			sibentry = (void __rcu **) entry_to_node(entry);
-			offset = get_slot_offset(parent, sibentry);
-			entry = rcu_dereference_raw(*sibentry);
-		}
+	if (xa_is_sibling(entry)) {
+		offset = xa_to_sibling(entry);
+		entry = rcu_dereference_raw(parent->slots[offset]);
 	}
-#endif
 
 	*nodep = (void *)entry;
 	return offset;
@@ -299,10 +277,10 @@ static void dump_node(struct radix_tree_node *node, unsigned long index)
 		} else if (!radix_tree_is_internal_node(entry)) {
 			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
 					entry, i, first, last, node);
-		} else if (is_sibling_entry(node, entry)) {
+		} else if (xa_is_sibling(entry)) {
 			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
 					entry, i, first, last, node,
-					*(void **)entry_to_node(entry));
+					node->slots[xa_to_sibling(entry)]);
 		} else {
 			dump_node(entry_to_node(entry), first);
 		}
@@ -872,8 +850,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 
 	for (;;) {
 		void *entry = rcu_dereference_raw(child->slots[offset]);
-		if (radix_tree_is_internal_node(entry) &&
-					!is_sibling_entry(child, entry)) {
+		if (xa_is_node(entry)) {
 			child = entry_to_node(entry);
 			offset = 0;
 			continue;
@@ -895,7 +872,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
 static inline int insert_entries(struct radix_tree_node *node,
 		void __rcu **slot, void *item, unsigned order, bool replace)
 {
-	struct radix_tree_node *child;
+	void *sibling;
 	unsigned i, n, tag, offset, tags = 0;
 
 	if (node) {
@@ -913,7 +890,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 		offset = offset & ~(n - 1);
 		slot = &node->slots[offset];
 	}
-	child = node_to_entry(slot);
+	sibling = xa_mk_sibling(offset);
 
 	for (i = 0; i < n; i++) {
 		if (slot[i]) {
@@ -930,7 +907,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 	for (i = 0; i < n; i++) {
 		struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
 		if (i) {
-			rcu_assign_pointer(slot[i], child);
+			rcu_assign_pointer(slot[i], sibling);
 			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 				if (tags & (1 << tag))
 					tag_clear(node, tag, offset + i);
@@ -940,9 +917,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 				if (tags & (1 << tag))
 					tag_set(node, tag, offset);
 		}
-		if (radix_tree_is_internal_node(old) &&
-					!is_sibling_entry(node, old) &&
-					(old != RADIX_TREE_RETRY))
+		if (xa_is_node(old))
 			radix_tree_free_nodes(old);
 		if (xa_is_value(old))
 			node->exceptional--;
@@ -1101,10 +1076,10 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 				void __rcu **slot, int count, int exceptional)
 {
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
-	void *ptr = node_to_entry(slot);
-	unsigned offset = get_slot_offset(node, slot) + 1;
+	unsigned offset = get_slot_offset(node, slot);
+	void *ptr = xa_mk_sibling(offset);
 
-	while (offset < RADIX_TREE_MAP_SIZE) {
+	while (++offset < RADIX_TREE_MAP_SIZE) {
 		if (rcu_dereference_raw(node->slots[offset]) != ptr)
 			break;
 		if (count < 0) {
@@ -1112,7 +1087,6 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 			node->count--;
 		}
 		node->exceptional += exceptional;
-		offset++;
 	}
 #endif
 }
@@ -1311,8 +1285,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
 			tags |= 1 << tag;
 
 	for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
-		if (!is_sibling_entry(parent,
-				rcu_dereference_raw(parent->slots[end])))
+		if (!xa_is_sibling(rcu_dereference_raw(parent->slots[end])))
 			break;
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 			if (tags & (1 << tag))
@@ -1608,11 +1581,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
 static void __rcu **skip_siblings(struct radix_tree_node **nodep,
 			void __rcu **slot, struct radix_tree_iter *iter)
 {
-	void *sib = node_to_entry(slot - 1);
-
 	while (iter->index < iter->next_index) {
 		*nodep = rcu_dereference_raw(*slot);
-		if (*nodep && *nodep != sib)
+		if (*nodep && !xa_is_sibling(*nodep))
 			return slot;
 		slot++;
 		iter->index = __radix_tree_iter_add(iter, 1);
@@ -1763,7 +1734,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
 				while (++offset	< RADIX_TREE_MAP_SIZE) {
 					void *slot = rcu_dereference_raw(
 							node->slots[offset]);
-					if (is_sibling_entry(node, slot))
+					if (xa_is_sibling(slot))
 						continue;
 					if (slot)
 						break;
-- 
2.15.0


  parent reply	other threads:[~2017-12-06  0:42 UTC|newest]

Thread overview: 533+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-06  0:40 [PATCH v4 00/73] XArray version 4 Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 01/73] xfs: Rename xa_ elements to ail_ Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 02/73] xarray: Add the xa_lock to the radix_tree_root Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 03/73] page cache: Use xa_lock Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 04/73] xarray: Replace exceptional entries Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox [this message]
2017-12-06  0:40   ` [PATCH v4 05/73] xarray: Change definition of sibling entries Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 06/73] xarray: Add definition of struct xarray Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 07/73] xarray: Define struct xa_node Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 08/73] xarray: Add documentation Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-11 23:10   ` Randy Dunlap
2017-12-11 23:10     ` Randy Dunlap
2017-12-15  4:22     ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15 12:34       ` Naming of tag operations in the XArray Matthew Wilcox
2017-12-15 12:34         ` Matthew Wilcox
2017-12-19  0:16         ` Randy Dunlap
2017-12-19  0:16           ` Randy Dunlap
2017-12-19  0:16           ` Randy Dunlap
2017-12-15 17:10     ` Storing errors " Matthew Wilcox
2017-12-15 17:10       ` Matthew Wilcox
2017-12-15 17:10       ` Matthew Wilcox
2017-12-19  0:27       ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-06  0:40 ` [PATCH v4 09/73] xarray: Add xa_load Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 10/73] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 11/73] xarray: Add xa_store Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 12/73] xarray: Add xa_cmpxchg Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 13/73] xarray: Add xa_for_each Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 14/73] xarray: Add xas_for_each_tag Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 15/73] xarray: Add xa_get_entries, xa_get_tagged and xa_get_maybe_tag Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 16/73] xarray: Add xa_destroy Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 17/73] xarray: Add xas_next and xas_prev Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 18/73] xarray: Add xas_create_range Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 19/73] xarray: Add MAINTAINERS entry Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 20/73] idr: Convert to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 21/73] ida: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 22/73] page cache: Convert hole search " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 23/73] page cache: Add page_cache_range_empty function Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 24/73] page cache: Add and replace pages using the XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 25/73] page cache: Convert page deletion to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 26/73] page cache: Convert page cache lookups " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 27/73] page cache: Convert delete_batch " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 28/73] page cache: Remove stray radix comment Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 29/73] mm: Convert page-writeback to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 30/73] mm: Convert workingset " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 31/73] mm: Convert truncate " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 32/73] mm: Convert add_to_swap_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 33/73] mm: Convert delete_from_swap_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 34/73] mm: Convert cgroup writeback " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 35/73] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 36/73] mm: Convert page migration " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 37/73] mm: Convert huge_memory " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 38/73] mm: Convert collapse_shmem " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 39/73] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 40/73] pagevec: Use xa_tag_t Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 41/73] shmem: Convert replace to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 42/73] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 43/73] shmem: Convert find_swap_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 44/73] shmem: Convert shmem_tag_pins " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 45/73] shmem: Convert shmem_wait_for_pins " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 46/73] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 47/73] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 48/73] shmem: Convert shmem_free_swap " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 49/73] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 50/73] shmem: Comment fixups Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 51/73] btrfs: Convert page cache to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 52/73] fs: Convert buffer " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 53/73] fs: Convert writeback " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 54/73] nilfs2: Convert " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 55/73] f2fs: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 56/73] lustre: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 57/73] dax: Convert dax_unlock_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 58/73] dax: Convert lock_slot " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 59/73] dax: More XArray conversion Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 60/73] dax: Convert __dax_invalidate_mapping_entry to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 61/73] dax: Convert dax_writeback_one " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 62/73] dax: Convert dax_insert_pfn_mkwrite " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 63/73] dax: Convert dax_insert_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 64/73] dax: Convert grab_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 65/73] dax: Fix sparse warning Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 66/73] page cache: Finish XArray conversion Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 67/73] vmalloc: Convert to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 68/73] brd: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 69/73] xfs: Convert m_perag_tree " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 70/73] xfs: Convert pag_ici_root " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 71/73] xfs: Convert xfs dquot " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 72/73] xfs: Convert mru cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  1:36   ` Dave Chinner
2017-12-06  1:36     ` Dave Chinner
2017-12-06  1:36     ` Dave Chinner
2017-12-06  2:02     ` Matthew Wilcox
2017-12-06  2:02       ` Matthew Wilcox
2017-12-06  3:14       ` Dave Chinner
2017-12-06  3:14         ` Dave Chinner
2017-12-06  4:45         ` Matthew Wilcox
2017-12-06  4:45           ` Matthew Wilcox
2017-12-06  4:45           ` Matthew Wilcox
2017-12-06  4:52           ` Matthew Wilcox
2017-12-06  4:52             ` Matthew Wilcox
2017-12-06  8:44           ` Dave Chinner
2017-12-06  8:44             ` Dave Chinner
2017-12-06  8:44             ` Dave Chinner
2017-12-06 14:06             ` Matthew Wilcox
2017-12-06 14:06               ` Matthew Wilcox
2017-12-06 14:06               ` Matthew Wilcox
2017-12-07  0:38               ` Dave Chinner
2017-12-07  0:38                 ` Dave Chinner
2017-12-08 23:01                 ` Matthew Wilcox
2017-12-08 23:01                   ` Matthew Wilcox
2017-12-10 23:57                   ` Dave Chinner
2017-12-10 23:57                     ` Dave Chinner
2017-12-10 23:57                     ` Dave Chinner
2017-12-11  4:23                     ` Matthew Wilcox
2017-12-11  4:23                       ` [v4,72/73] " Matthew Wilcox
2017-12-11  4:23                       ` [PATCH v4 72/73] " Matthew Wilcox
2017-12-11 21:55                       ` Dave Chinner
2017-12-11 21:55                         ` [v4,72/73] " Dave Chinner
2017-12-11 21:55                         ` [PATCH v4 72/73] " Dave Chinner
2017-12-07 16:06               ` Theodore Ts'o
2017-12-07 16:06                 ` Theodore Ts'o
2017-12-07 22:22                 ` Dave Chinner
2017-12-07 22:22                   ` Dave Chinner
2017-12-08  4:45                   ` Byungchul Park
2017-12-08  4:45                     ` Byungchul Park
2017-12-08  4:45                     ` Byungchul Park
2017-12-08  7:25                     ` Dave Chinner
2017-12-08  7:25                       ` Dave Chinner
2017-12-08  7:25                       ` Dave Chinner
2017-12-08  9:27                       ` Byungchul Park
2017-12-08  9:27                         ` Byungchul Park
2017-12-08  9:27                         ` Byungchul Park
2017-12-08 17:35                         ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 22:36                           ` Dave Chinner
2017-12-08 22:36                             ` Dave Chinner
2017-12-08 22:36                             ` Dave Chinner
2017-12-09 17:00                             ` Joe Perches
2017-12-09 17:00                               ` Joe Perches
2017-12-09 17:00                               ` Joe Perches
2017-12-11 21:43                               ` Dave Chinner
2017-12-11 21:43                                 ` Dave Chinner
2017-12-11 22:12                                 ` Joe Perches
2017-12-11 22:12                                   ` Joe Perches
2017-12-11 22:12                                   ` Joe Perches
2017-12-11 22:43                                   ` Matthew Wilcox
2017-12-11 22:43                                     ` Matthew Wilcox
2017-12-11 23:46                                     ` Joe Perches
2017-12-11 23:46                                       ` Joe Perches
2017-12-11 23:46                                       ` Joe Perches
2017-12-12 15:51                                       ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-14 18:23                                     ` Joe Perches
2017-12-14 18:23                                       ` Joe Perches
2017-12-14 18:23                                       ` [v4,72/73] " Joe Perches
2017-12-14 18:23                                       ` [PATCH v4 72/73] " Joe Perches
2017-12-17  1:26                                     ` [RFC patch] checkpatch: Add a test for long function definitions (>200 lines) Joe Perches
2017-12-17 21:46                                       ` Linus Torvalds
2017-12-17 22:22                                         ` Joe Perches
2017-12-17 22:33                                         ` Luc Van Oostenryck
2017-12-11 23:38                                   ` [PATCH v4 72/73] xfs: Convert mru cache to XArray Dave Chinner
2017-12-11 23:38                                     ` Dave Chinner
2017-12-21 12:05                                   ` Knut Omang
2017-12-21 12:05                                     ` Knut Omang
2017-12-07 22:38                 ` Lockdep is less useful than it was Matthew Wilcox
2017-12-07 22:38                   ` Matthew Wilcox
2017-12-07 22:39                   ` Matthew Wilcox
2017-12-07 22:39                     ` Matthew Wilcox
2017-12-08  0:14                   ` Dave Chinner
2017-12-08  0:14                     ` Dave Chinner
2017-12-08 15:27                   ` Theodore Ts'o
2017-12-08 15:27                     ` Theodore Ts'o
2017-12-08 18:14                     ` Matthew Wilcox
2017-12-08 18:14                       ` Matthew Wilcox
2017-12-08 22:47                       ` Dave Chinner
2017-12-08 22:47                         ` Dave Chinner
2017-12-06  0:41 ` [PATCH v4 73/73] usb: Convert xhci-mem to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  1:45 ` [PATCH v4 00/73] XArray version 4 Dave Chinner
2017-12-06  1:45   ` Dave Chinner
2017-12-06  1:45   ` Dave Chinner
2017-12-06  1:51   ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:53     ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  2:17       ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:27         ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:05   ` Matthew Wilcox
2017-12-06  2:05     ` Matthew Wilcox
2017-12-06  2:38     ` Dave Chinner
2017-12-06  2:38       ` Dave Chinner
2017-12-06 23:58 ` Ross Zwisler
2017-12-06 23:58   ` Ross Zwisler
2017-12-06 23:58   ` Ross Zwisler
2017-12-07  0:13   ` Matthew Wilcox
2017-12-07  0:13     ` Matthew Wilcox
2017-12-07  0:13     ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171206004159.3755-6-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=aquannie@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=linux-usb@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=mawilcox@microsoft.com \
    --cc=ross.zwisler@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.